amba-pl08x.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536
  1. /*
  2. * Copyright (c) 2006 ARM Ltd.
  3. * Copyright (c) 2010 ST-Ericsson SA
  4. *
  5. * Author: Peter Pearse <peter.pearse@arm.com>
  6. * Author: Linus Walleij <linus.walleij@stericsson.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * The full GNU General Public License is in this distribution in the file
  19. * called COPYING.
  20. *
  21. * Documentation: ARM DDI 0196G == PL080
  22. * Documentation: ARM DDI 0218E == PL081
  23. * Documentation: S3C6410 User's Manual == PL080S
  24. *
  25. * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
  26. * channel.
  27. *
  28. * The PL080 has 8 channels available for simultaneous use, and the PL081
  29. * has only two channels. So on these DMA controllers the number of channels
  30. * and the number of incoming DMA signals are two totally different things.
  31. * It is usually not possible to theoretically handle all physical signals,
  32. * so a multiplexing scheme with possible denial of use is necessary.
  33. *
  34. * The PL080 has a dual bus master, PL081 has a single master.
  35. *
  36. * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
  37. * It differs in following aspects:
  38. * - CH_CONFIG register at different offset,
  39. * - separate CH_CONTROL2 register for transfer size,
  40. * - bigger maximum transfer size,
  41. * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
  42. * - no support for peripheral flow control.
  43. *
  44. * Memory to peripheral transfer may be visualized as
  45. * Get data from memory to DMAC
  46. * Until no data left
  47. * On burst request from peripheral
  48. * Destination burst from DMAC to peripheral
  49. * Clear burst request
  50. * Raise terminal count interrupt
  51. *
  52. * For peripherals with a FIFO:
  53. * Source burst size == half the depth of the peripheral FIFO
  54. * Destination burst size == the depth of the peripheral FIFO
  55. *
  56. * (Bursts are irrelevant for mem to mem transfers - there are no burst
  57. * signals, the DMA controller will simply facilitate its AHB master.)
  58. *
  59. * ASSUMES default (little) endianness for DMA transfers
  60. *
  61. * The PL08x has two flow control settings:
  62. * - DMAC flow control: the transfer size defines the number of transfers
  63. * which occur for the current LLI entry, and the DMAC raises TC at the
  64. * end of every LLI entry. Observed behaviour shows the DMAC listening
  65. * to both the BREQ and SREQ signals (contrary to documented),
  66. * transferring data if either is active. The LBREQ and LSREQ signals
  67. * are ignored.
  68. *
  69. * - Peripheral flow control: the transfer size is ignored (and should be
  70. * zero). The data is transferred from the current LLI entry, until
  71. * after the final transfer signalled by LBREQ or LSREQ. The DMAC
  72. * will then move to the next LLI entry. Unsupported by PL080S.
  73. */
  74. #include <linux/amba/bus.h>
  75. #include <linux/amba/pl08x.h>
  76. #include <linux/debugfs.h>
  77. #include <linux/delay.h>
  78. #include <linux/device.h>
  79. #include <linux/dmaengine.h>
  80. #include <linux/dmapool.h>
  81. #include <linux/dma-mapping.h>
  82. #include <linux/export.h>
  83. #include <linux/init.h>
  84. #include <linux/interrupt.h>
  85. #include <linux/module.h>
  86. #include <linux/of.h>
  87. #include <linux/of_dma.h>
  88. #include <linux/pm_runtime.h>
  89. #include <linux/seq_file.h>
  90. #include <linux/slab.h>
  91. #include <linux/amba/pl080.h>
  92. #include "dmaengine.h"
  93. #include "virt-dma.h"
  94. #define DRIVER_NAME "pl08xdmac"
  95. #define PL80X_DMA_BUSWIDTHS \
  96. BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
  97. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  98. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  99. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
  100. static struct amba_driver pl08x_amba_driver;
  101. struct pl08x_driver_data;
  102. /**
  103. * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  104. * @channels: the number of channels available in this variant
  105. * @signals: the number of request signals available from the hardware
  106. * @dualmaster: whether this version supports dual AHB masters or not.
  107. * @nomadik: whether the channels have Nomadik security extension bits
  108. * that need to be checked for permission before use and some registers are
  109. * missing
  110. * @pl080s: whether this version is a PL080S, which has separate register and
  111. * LLI word for transfer size.
  112. * @max_transfer_size: the maximum single element transfer size for this
  113. * PL08x variant.
  114. */
  115. struct vendor_data {
  116. u8 config_offset;
  117. u8 channels;
  118. u8 signals;
  119. bool dualmaster;
  120. bool nomadik;
  121. bool pl080s;
  122. u32 max_transfer_size;
  123. };
  124. /**
  125. * struct pl08x_bus_data - information of source or destination
  126. * busses for a transfer
  127. * @addr: current address
  128. * @maxwidth: the maximum width of a transfer on this bus
  129. * @buswidth: the width of this bus in bytes: 1, 2 or 4
  130. */
  131. struct pl08x_bus_data {
  132. dma_addr_t addr;
  133. u8 maxwidth;
  134. u8 buswidth;
  135. };
  136. #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
  137. /**
  138. * struct pl08x_phy_chan - holder for the physical channels
  139. * @id: physical index to this channel
  140. * @lock: a lock to use when altering an instance of this struct
  141. * @serving: the virtual channel currently being served by this physical
  142. * channel
  143. * @locked: channel unavailable for the system, e.g. dedicated to secure
  144. * world
  145. */
  146. struct pl08x_phy_chan {
  147. unsigned int id;
  148. void __iomem *base;
  149. void __iomem *reg_config;
  150. spinlock_t lock;
  151. struct pl08x_dma_chan *serving;
  152. bool locked;
  153. };
  154. /**
  155. * struct pl08x_sg - structure containing data per sg
  156. * @src_addr: src address of sg
  157. * @dst_addr: dst address of sg
  158. * @len: transfer len in bytes
  159. * @node: node for txd's dsg_list
  160. */
  161. struct pl08x_sg {
  162. dma_addr_t src_addr;
  163. dma_addr_t dst_addr;
  164. size_t len;
  165. struct list_head node;
  166. };
  167. /**
  168. * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
  169. * @vd: virtual DMA descriptor
  170. * @dsg_list: list of children sg's
  171. * @llis_bus: DMA memory address (physical) start for the LLIs
  172. * @llis_va: virtual memory address start for the LLIs
  173. * @cctl: control reg values for current txd
  174. * @ccfg: config reg values for current txd
  175. * @done: this marks completed descriptors, which should not have their
  176. * mux released.
  177. * @cyclic: indicate cyclic transfers
  178. */
  179. struct pl08x_txd {
  180. struct virt_dma_desc vd;
  181. struct list_head dsg_list;
  182. dma_addr_t llis_bus;
  183. u32 *llis_va;
  184. /* Default cctl value for LLIs */
  185. u32 cctl;
  186. /*
  187. * Settings to be put into the physical channel when we
  188. * trigger this txd. Other registers are in llis_va[0].
  189. */
  190. u32 ccfg;
  191. bool done;
  192. bool cyclic;
  193. };
  194. /**
  195. * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
  196. * states
  197. * @PL08X_CHAN_IDLE: the channel is idle
  198. * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
  199. * channel and is running a transfer on it
  200. * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
  201. * channel, but the transfer is currently paused
  202. * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
  203. * channel to become available (only pertains to memcpy channels)
  204. */
  205. enum pl08x_dma_chan_state {
  206. PL08X_CHAN_IDLE,
  207. PL08X_CHAN_RUNNING,
  208. PL08X_CHAN_PAUSED,
  209. PL08X_CHAN_WAITING,
  210. };
  211. /**
  212. * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  213. * @vc: wrappped virtual channel
  214. * @phychan: the physical channel utilized by this channel, if there is one
  215. * @name: name of channel
  216. * @cd: channel platform data
  217. * @runtime_addr: address for RX/TX according to the runtime config
  218. * @at: active transaction on this channel
  219. * @lock: a lock for this channel data
  220. * @host: a pointer to the host (internal use)
  221. * @state: whether the channel is idle, paused, running etc
  222. * @slave: whether this channel is a device (slave) or for memcpy
  223. * @signal: the physical DMA request signal which this channel is using
  224. * @mux_use: count of descriptors using this DMA request signal setting
  225. */
  226. struct pl08x_dma_chan {
  227. struct virt_dma_chan vc;
  228. struct pl08x_phy_chan *phychan;
  229. const char *name;
  230. struct pl08x_channel_data *cd;
  231. struct dma_slave_config cfg;
  232. struct pl08x_txd *at;
  233. struct pl08x_driver_data *host;
  234. enum pl08x_dma_chan_state state;
  235. bool slave;
  236. int signal;
  237. unsigned mux_use;
  238. };
  239. /**
  240. * struct pl08x_driver_data - the local state holder for the PL08x
  241. * @slave: slave engine for this instance
  242. * @memcpy: memcpy engine for this instance
  243. * @base: virtual memory base (remapped) for the PL08x
  244. * @adev: the corresponding AMBA (PrimeCell) bus entry
  245. * @vd: vendor data for this PL08x variant
  246. * @pd: platform data passed in from the platform/machine
  247. * @phy_chans: array of data for the physical channels
  248. * @pool: a pool for the LLI descriptors
  249. * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
  250. * fetches
  251. * @mem_buses: set to indicate memory transfers on AHB2.
  252. * @lock: a spinlock for this struct
  253. */
  254. struct pl08x_driver_data {
  255. struct dma_device slave;
  256. struct dma_device memcpy;
  257. void __iomem *base;
  258. struct amba_device *adev;
  259. const struct vendor_data *vd;
  260. struct pl08x_platform_data *pd;
  261. struct pl08x_phy_chan *phy_chans;
  262. struct dma_pool *pool;
  263. u8 lli_buses;
  264. u8 mem_buses;
  265. u8 lli_words;
  266. };
  267. /*
  268. * PL08X specific defines
  269. */
  270. /* The order of words in an LLI. */
  271. #define PL080_LLI_SRC 0
  272. #define PL080_LLI_DST 1
  273. #define PL080_LLI_LLI 2
  274. #define PL080_LLI_CCTL 3
  275. #define PL080S_LLI_CCTL2 4
  276. /* Total words in an LLI. */
  277. #define PL080_LLI_WORDS 4
  278. #define PL080S_LLI_WORDS 8
  279. /*
  280. * Number of LLIs in each LLI buffer allocated for one transfer
  281. * (maximum times we call dma_pool_alloc on this pool without freeing)
  282. */
  283. #define MAX_NUM_TSFR_LLIS 512
  284. #define PL08X_ALIGN 8
  285. static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
  286. {
  287. return container_of(chan, struct pl08x_dma_chan, vc.chan);
  288. }
  289. static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
  290. {
  291. return container_of(tx, struct pl08x_txd, vd.tx);
  292. }
  293. /*
  294. * Mux handling.
  295. *
  296. * This gives us the DMA request input to the PL08x primecell which the
  297. * peripheral described by the channel data will be routed to, possibly
  298. * via a board/SoC specific external MUX. One important point to note
  299. * here is that this does not depend on the physical channel.
  300. */
  301. static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
  302. {
  303. const struct pl08x_platform_data *pd = plchan->host->pd;
  304. int ret;
  305. if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
  306. ret = pd->get_xfer_signal(plchan->cd);
  307. if (ret < 0) {
  308. plchan->mux_use = 0;
  309. return ret;
  310. }
  311. plchan->signal = ret;
  312. }
  313. return 0;
  314. }
  315. static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
  316. {
  317. const struct pl08x_platform_data *pd = plchan->host->pd;
  318. if (plchan->signal >= 0) {
  319. WARN_ON(plchan->mux_use == 0);
  320. if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
  321. pd->put_xfer_signal(plchan->cd, plchan->signal);
  322. plchan->signal = -1;
  323. }
  324. }
  325. }
  326. /*
  327. * Physical channel handling
  328. */
  329. /* Whether a certain channel is busy or not */
  330. static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
  331. {
  332. unsigned int val;
  333. val = readl(ch->reg_config);
  334. return val & PL080_CONFIG_ACTIVE;
  335. }
  336. static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
  337. struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
  338. {
  339. if (pl08x->vd->pl080s)
  340. dev_vdbg(&pl08x->adev->dev,
  341. "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
  342. "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
  343. phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
  344. lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
  345. lli[PL080S_LLI_CCTL2], ccfg);
  346. else
  347. dev_vdbg(&pl08x->adev->dev,
  348. "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
  349. "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
  350. phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
  351. lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
  352. writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
  353. writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
  354. writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
  355. writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
  356. if (pl08x->vd->pl080s)
  357. writel_relaxed(lli[PL080S_LLI_CCTL2],
  358. phychan->base + PL080S_CH_CONTROL2);
  359. writel(ccfg, phychan->reg_config);
  360. }
  361. /*
  362. * Set the initial DMA register values i.e. those for the first LLI
  363. * The next LLI pointer and the configuration interrupt bit have
  364. * been set when the LLIs were constructed. Poke them into the hardware
  365. * and start the transfer.
  366. */
  367. static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
  368. {
  369. struct pl08x_driver_data *pl08x = plchan->host;
  370. struct pl08x_phy_chan *phychan = plchan->phychan;
  371. struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
  372. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  373. u32 val;
  374. list_del(&txd->vd.node);
  375. plchan->at = txd;
  376. /* Wait for channel inactive */
  377. while (pl08x_phy_channel_busy(phychan))
  378. cpu_relax();
  379. pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
  380. /* Enable the DMA channel */
  381. /* Do not access config register until channel shows as disabled */
  382. while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
  383. cpu_relax();
  384. /* Do not access config register until channel shows as inactive */
  385. val = readl(phychan->reg_config);
  386. while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
  387. val = readl(phychan->reg_config);
  388. writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
  389. }
  390. /*
  391. * Pause the channel by setting the HALT bit.
  392. *
  393. * For M->P transfers, pause the DMAC first and then stop the peripheral -
  394. * the FIFO can only drain if the peripheral is still requesting data.
  395. * (note: this can still timeout if the DMAC FIFO never drains of data.)
  396. *
  397. * For P->M transfers, disable the peripheral first to stop it filling
  398. * the DMAC FIFO, and then pause the DMAC.
  399. */
  400. static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
  401. {
  402. u32 val;
  403. int timeout;
  404. /* Set the HALT bit and wait for the FIFO to drain */
  405. val = readl(ch->reg_config);
  406. val |= PL080_CONFIG_HALT;
  407. writel(val, ch->reg_config);
  408. /* Wait for channel inactive */
  409. for (timeout = 1000; timeout; timeout--) {
  410. if (!pl08x_phy_channel_busy(ch))
  411. break;
  412. udelay(1);
  413. }
  414. if (pl08x_phy_channel_busy(ch))
  415. pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
  416. }
  417. static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
  418. {
  419. u32 val;
  420. /* Clear the HALT bit */
  421. val = readl(ch->reg_config);
  422. val &= ~PL080_CONFIG_HALT;
  423. writel(val, ch->reg_config);
  424. }
  425. /*
  426. * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
  427. * clears any pending interrupt status. This should not be used for
  428. * an on-going transfer, but as a method of shutting down a channel
  429. * (eg, when it's no longer used) or terminating a transfer.
  430. */
  431. static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
  432. struct pl08x_phy_chan *ch)
  433. {
  434. u32 val = readl(ch->reg_config);
  435. val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
  436. PL080_CONFIG_TC_IRQ_MASK);
  437. writel(val, ch->reg_config);
  438. writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
  439. writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
  440. }
  441. static inline u32 get_bytes_in_cctl(u32 cctl)
  442. {
  443. /* The source width defines the number of bytes */
  444. u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
  445. cctl &= PL080_CONTROL_SWIDTH_MASK;
  446. switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
  447. case PL080_WIDTH_8BIT:
  448. break;
  449. case PL080_WIDTH_16BIT:
  450. bytes *= 2;
  451. break;
  452. case PL080_WIDTH_32BIT:
  453. bytes *= 4;
  454. break;
  455. }
  456. return bytes;
  457. }
  458. static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
  459. {
  460. /* The source width defines the number of bytes */
  461. u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
  462. cctl &= PL080_CONTROL_SWIDTH_MASK;
  463. switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
  464. case PL080_WIDTH_8BIT:
  465. break;
  466. case PL080_WIDTH_16BIT:
  467. bytes *= 2;
  468. break;
  469. case PL080_WIDTH_32BIT:
  470. bytes *= 4;
  471. break;
  472. }
  473. return bytes;
  474. }
  475. /* The channel should be paused when calling this */
  476. static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
  477. {
  478. struct pl08x_driver_data *pl08x = plchan->host;
  479. const u32 *llis_va, *llis_va_limit;
  480. struct pl08x_phy_chan *ch;
  481. dma_addr_t llis_bus;
  482. struct pl08x_txd *txd;
  483. u32 llis_max_words;
  484. size_t bytes;
  485. u32 clli;
  486. ch = plchan->phychan;
  487. txd = plchan->at;
  488. if (!ch || !txd)
  489. return 0;
  490. /*
  491. * Follow the LLIs to get the number of remaining
  492. * bytes in the currently active transaction.
  493. */
  494. clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
  495. /* First get the remaining bytes in the active transfer */
  496. if (pl08x->vd->pl080s)
  497. bytes = get_bytes_in_cctl_pl080s(
  498. readl(ch->base + PL080_CH_CONTROL),
  499. readl(ch->base + PL080S_CH_CONTROL2));
  500. else
  501. bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
  502. if (!clli)
  503. return bytes;
  504. llis_va = txd->llis_va;
  505. llis_bus = txd->llis_bus;
  506. llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
  507. BUG_ON(clli < llis_bus || clli >= llis_bus +
  508. sizeof(u32) * llis_max_words);
  509. /*
  510. * Locate the next LLI - as this is an array,
  511. * it's simple maths to find.
  512. */
  513. llis_va += (clli - llis_bus) / sizeof(u32);
  514. llis_va_limit = llis_va + llis_max_words;
  515. for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
  516. if (pl08x->vd->pl080s)
  517. bytes += get_bytes_in_cctl_pl080s(
  518. llis_va[PL080_LLI_CCTL],
  519. llis_va[PL080S_LLI_CCTL2]);
  520. else
  521. bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
  522. /*
  523. * A LLI pointer going backward terminates the LLI list
  524. */
  525. if (llis_va[PL080_LLI_LLI] <= clli)
  526. break;
  527. }
  528. return bytes;
  529. }
  530. /*
  531. * Allocate a physical channel for a virtual channel
  532. *
  533. * Try to locate a physical channel to be used for this transfer. If all
  534. * are taken return NULL and the requester will have to cope by using
  535. * some fallback PIO mode or retrying later.
  536. */
  537. static struct pl08x_phy_chan *
  538. pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
  539. struct pl08x_dma_chan *virt_chan)
  540. {
  541. struct pl08x_phy_chan *ch = NULL;
  542. unsigned long flags;
  543. int i;
  544. for (i = 0; i < pl08x->vd->channels; i++) {
  545. ch = &pl08x->phy_chans[i];
  546. spin_lock_irqsave(&ch->lock, flags);
  547. if (!ch->locked && !ch->serving) {
  548. ch->serving = virt_chan;
  549. spin_unlock_irqrestore(&ch->lock, flags);
  550. break;
  551. }
  552. spin_unlock_irqrestore(&ch->lock, flags);
  553. }
  554. if (i == pl08x->vd->channels) {
  555. /* No physical channel available, cope with it */
  556. return NULL;
  557. }
  558. return ch;
  559. }
  560. /* Mark the physical channel as free. Note, this write is atomic. */
  561. static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
  562. struct pl08x_phy_chan *ch)
  563. {
  564. ch->serving = NULL;
  565. }
  566. /*
  567. * Try to allocate a physical channel. When successful, assign it to
  568. * this virtual channel, and initiate the next descriptor. The
  569. * virtual channel lock must be held at this point.
  570. */
  571. static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
  572. {
  573. struct pl08x_driver_data *pl08x = plchan->host;
  574. struct pl08x_phy_chan *ch;
  575. ch = pl08x_get_phy_channel(pl08x, plchan);
  576. if (!ch) {
  577. dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
  578. plchan->state = PL08X_CHAN_WAITING;
  579. return;
  580. }
  581. dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
  582. ch->id, plchan->name);
  583. plchan->phychan = ch;
  584. plchan->state = PL08X_CHAN_RUNNING;
  585. pl08x_start_next_txd(plchan);
  586. }
  587. static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
  588. struct pl08x_dma_chan *plchan)
  589. {
  590. struct pl08x_driver_data *pl08x = plchan->host;
  591. dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
  592. ch->id, plchan->name);
  593. /*
  594. * We do this without taking the lock; we're really only concerned
  595. * about whether this pointer is NULL or not, and we're guaranteed
  596. * that this will only be called when it _already_ is non-NULL.
  597. */
  598. ch->serving = plchan;
  599. plchan->phychan = ch;
  600. plchan->state = PL08X_CHAN_RUNNING;
  601. pl08x_start_next_txd(plchan);
  602. }
  603. /*
  604. * Free a physical DMA channel, potentially reallocating it to another
  605. * virtual channel if we have any pending.
  606. */
  607. static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
  608. {
  609. struct pl08x_driver_data *pl08x = plchan->host;
  610. struct pl08x_dma_chan *p, *next;
  611. retry:
  612. next = NULL;
  613. /* Find a waiting virtual channel for the next transfer. */
  614. list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
  615. if (p->state == PL08X_CHAN_WAITING) {
  616. next = p;
  617. break;
  618. }
  619. if (!next) {
  620. list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
  621. if (p->state == PL08X_CHAN_WAITING) {
  622. next = p;
  623. break;
  624. }
  625. }
  626. /* Ensure that the physical channel is stopped */
  627. pl08x_terminate_phy_chan(pl08x, plchan->phychan);
  628. if (next) {
  629. bool success;
  630. /*
  631. * Eww. We know this isn't going to deadlock
  632. * but lockdep probably doesn't.
  633. */
  634. spin_lock(&next->vc.lock);
  635. /* Re-check the state now that we have the lock */
  636. success = next->state == PL08X_CHAN_WAITING;
  637. if (success)
  638. pl08x_phy_reassign_start(plchan->phychan, next);
  639. spin_unlock(&next->vc.lock);
  640. /* If the state changed, try to find another channel */
  641. if (!success)
  642. goto retry;
  643. } else {
  644. /* No more jobs, so free up the physical channel */
  645. pl08x_put_phy_channel(pl08x, plchan->phychan);
  646. }
  647. plchan->phychan = NULL;
  648. plchan->state = PL08X_CHAN_IDLE;
  649. }
  650. /*
  651. * LLI handling
  652. */
  653. static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
  654. {
  655. switch (coded) {
  656. case PL080_WIDTH_8BIT:
  657. return 1;
  658. case PL080_WIDTH_16BIT:
  659. return 2;
  660. case PL080_WIDTH_32BIT:
  661. return 4;
  662. default:
  663. break;
  664. }
  665. BUG();
  666. return 0;
  667. }
  668. static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
  669. size_t tsize)
  670. {
  671. u32 retbits = cctl;
  672. /* Remove all src, dst and transfer size bits */
  673. retbits &= ~PL080_CONTROL_DWIDTH_MASK;
  674. retbits &= ~PL080_CONTROL_SWIDTH_MASK;
  675. retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
  676. /* Then set the bits according to the parameters */
  677. switch (srcwidth) {
  678. case 1:
  679. retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
  680. break;
  681. case 2:
  682. retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
  683. break;
  684. case 4:
  685. retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
  686. break;
  687. default:
  688. BUG();
  689. break;
  690. }
  691. switch (dstwidth) {
  692. case 1:
  693. retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
  694. break;
  695. case 2:
  696. retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
  697. break;
  698. case 4:
  699. retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
  700. break;
  701. default:
  702. BUG();
  703. break;
  704. }
  705. tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
  706. retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
  707. return retbits;
  708. }
  709. struct pl08x_lli_build_data {
  710. struct pl08x_txd *txd;
  711. struct pl08x_bus_data srcbus;
  712. struct pl08x_bus_data dstbus;
  713. size_t remainder;
  714. u32 lli_bus;
  715. };
  716. /*
  717. * Autoselect a master bus to use for the transfer. Slave will be the chosen as
  718. * victim in case src & dest are not similarly aligned. i.e. If after aligning
  719. * masters address with width requirements of transfer (by sending few byte by
  720. * byte data), slave is still not aligned, then its width will be reduced to
  721. * BYTE.
  722. * - prefers the destination bus if both available
  723. * - prefers bus with fixed address (i.e. peripheral)
  724. */
  725. static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
  726. struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
  727. {
  728. if (!(cctl & PL080_CONTROL_DST_INCR)) {
  729. *mbus = &bd->dstbus;
  730. *sbus = &bd->srcbus;
  731. } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
  732. *mbus = &bd->srcbus;
  733. *sbus = &bd->dstbus;
  734. } else {
  735. if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
  736. *mbus = &bd->dstbus;
  737. *sbus = &bd->srcbus;
  738. } else {
  739. *mbus = &bd->srcbus;
  740. *sbus = &bd->dstbus;
  741. }
  742. }
  743. }
  744. /*
  745. * Fills in one LLI for a certain transfer descriptor and advance the counter
  746. */
  747. static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
  748. struct pl08x_lli_build_data *bd,
  749. int num_llis, int len, u32 cctl, u32 cctl2)
  750. {
  751. u32 offset = num_llis * pl08x->lli_words;
  752. u32 *llis_va = bd->txd->llis_va + offset;
  753. dma_addr_t llis_bus = bd->txd->llis_bus;
  754. BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
  755. /* Advance the offset to next LLI. */
  756. offset += pl08x->lli_words;
  757. llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
  758. llis_va[PL080_LLI_DST] = bd->dstbus.addr;
  759. llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
  760. llis_va[PL080_LLI_LLI] |= bd->lli_bus;
  761. llis_va[PL080_LLI_CCTL] = cctl;
  762. if (pl08x->vd->pl080s)
  763. llis_va[PL080S_LLI_CCTL2] = cctl2;
  764. if (cctl & PL080_CONTROL_SRC_INCR)
  765. bd->srcbus.addr += len;
  766. if (cctl & PL080_CONTROL_DST_INCR)
  767. bd->dstbus.addr += len;
  768. BUG_ON(bd->remainder < len);
  769. bd->remainder -= len;
  770. }
  771. static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
  772. struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
  773. int num_llis, size_t *total_bytes)
  774. {
  775. *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
  776. pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
  777. (*total_bytes) += len;
  778. }
  779. #ifdef VERBOSE_DEBUG
  780. static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
  781. const u32 *llis_va, int num_llis)
  782. {
  783. int i;
  784. if (pl08x->vd->pl080s) {
  785. dev_vdbg(&pl08x->adev->dev,
  786. "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
  787. "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
  788. for (i = 0; i < num_llis; i++) {
  789. dev_vdbg(&pl08x->adev->dev,
  790. "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  791. i, llis_va, llis_va[PL080_LLI_SRC],
  792. llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
  793. llis_va[PL080_LLI_CCTL],
  794. llis_va[PL080S_LLI_CCTL2]);
  795. llis_va += pl08x->lli_words;
  796. }
  797. } else {
  798. dev_vdbg(&pl08x->adev->dev,
  799. "%-3s %-9s %-10s %-10s %-10s %s\n",
  800. "lli", "", "csrc", "cdst", "clli", "cctl");
  801. for (i = 0; i < num_llis; i++) {
  802. dev_vdbg(&pl08x->adev->dev,
  803. "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  804. i, llis_va, llis_va[PL080_LLI_SRC],
  805. llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
  806. llis_va[PL080_LLI_CCTL]);
  807. llis_va += pl08x->lli_words;
  808. }
  809. }
  810. }
  811. #else
  812. static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
  813. const u32 *llis_va, int num_llis) {}
  814. #endif
  815. /*
  816. * This fills in the table of LLIs for the transfer descriptor
  817. * Note that we assume we never have to change the burst sizes
  818. * Return 0 for error
  819. */
  820. static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
  821. struct pl08x_txd *txd)
  822. {
  823. struct pl08x_bus_data *mbus, *sbus;
  824. struct pl08x_lli_build_data bd;
  825. int num_llis = 0;
  826. u32 cctl, early_bytes = 0;
  827. size_t max_bytes_per_lli, total_bytes;
  828. u32 *llis_va, *last_lli;
  829. struct pl08x_sg *dsg;
  830. txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
  831. if (!txd->llis_va) {
  832. dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
  833. return 0;
  834. }
  835. bd.txd = txd;
  836. bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
  837. cctl = txd->cctl;
  838. /* Find maximum width of the source bus */
  839. bd.srcbus.maxwidth =
  840. pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
  841. PL080_CONTROL_SWIDTH_SHIFT);
  842. /* Find maximum width of the destination bus */
  843. bd.dstbus.maxwidth =
  844. pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
  845. PL080_CONTROL_DWIDTH_SHIFT);
  846. list_for_each_entry(dsg, &txd->dsg_list, node) {
  847. total_bytes = 0;
  848. cctl = txd->cctl;
  849. bd.srcbus.addr = dsg->src_addr;
  850. bd.dstbus.addr = dsg->dst_addr;
  851. bd.remainder = dsg->len;
  852. bd.srcbus.buswidth = bd.srcbus.maxwidth;
  853. bd.dstbus.buswidth = bd.dstbus.maxwidth;
  854. pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
  855. dev_vdbg(&pl08x->adev->dev,
  856. "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
  857. (u64)bd.srcbus.addr,
  858. cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
  859. bd.srcbus.buswidth,
  860. (u64)bd.dstbus.addr,
  861. cctl & PL080_CONTROL_DST_INCR ? "+" : "",
  862. bd.dstbus.buswidth,
  863. bd.remainder);
  864. dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
  865. mbus == &bd.srcbus ? "src" : "dst",
  866. sbus == &bd.srcbus ? "src" : "dst");
  867. /*
  868. * Zero length is only allowed if all these requirements are
  869. * met:
  870. * - flow controller is peripheral.
  871. * - src.addr is aligned to src.width
  872. * - dst.addr is aligned to dst.width
  873. *
  874. * sg_len == 1 should be true, as there can be two cases here:
  875. *
  876. * - Memory addresses are contiguous and are not scattered.
  877. * Here, Only one sg will be passed by user driver, with
  878. * memory address and zero length. We pass this to controller
  879. * and after the transfer it will receive the last burst
  880. * request from peripheral and so transfer finishes.
  881. *
  882. * - Memory addresses are scattered and are not contiguous.
  883. * Here, Obviously as DMA controller doesn't know when a lli's
  884. * transfer gets over, it can't load next lli. So in this
  885. * case, there has to be an assumption that only one lli is
  886. * supported. Thus, we can't have scattered addresses.
  887. */
  888. if (!bd.remainder) {
  889. u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
  890. PL080_CONFIG_FLOW_CONTROL_SHIFT;
  891. if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
  892. (fc <= PL080_FLOW_SRC2DST_SRC))) {
  893. dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
  894. __func__);
  895. return 0;
  896. }
  897. if (!IS_BUS_ALIGNED(&bd.srcbus) ||
  898. !IS_BUS_ALIGNED(&bd.dstbus)) {
  899. dev_err(&pl08x->adev->dev,
  900. "%s src & dst address must be aligned to src"
  901. " & dst width if peripheral is flow controller",
  902. __func__);
  903. return 0;
  904. }
  905. cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
  906. bd.dstbus.buswidth, 0);
  907. pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
  908. 0, cctl, 0);
  909. break;
  910. }
  911. /*
  912. * Send byte by byte for following cases
  913. * - Less than a bus width available
  914. * - until master bus is aligned
  915. */
  916. if (bd.remainder < mbus->buswidth)
  917. early_bytes = bd.remainder;
  918. else if (!IS_BUS_ALIGNED(mbus)) {
  919. early_bytes = mbus->buswidth -
  920. (mbus->addr & (mbus->buswidth - 1));
  921. if ((bd.remainder - early_bytes) < mbus->buswidth)
  922. early_bytes = bd.remainder;
  923. }
  924. if (early_bytes) {
  925. dev_vdbg(&pl08x->adev->dev,
  926. "%s byte width LLIs (remain 0x%08zx)\n",
  927. __func__, bd.remainder);
  928. prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
  929. num_llis++, &total_bytes);
  930. }
  931. if (bd.remainder) {
  932. /*
  933. * Master now aligned
  934. * - if slave is not then we must set its width down
  935. */
  936. if (!IS_BUS_ALIGNED(sbus)) {
  937. dev_dbg(&pl08x->adev->dev,
  938. "%s set down bus width to one byte\n",
  939. __func__);
  940. sbus->buswidth = 1;
  941. }
  942. /*
  943. * Bytes transferred = tsize * src width, not
  944. * MIN(buswidths)
  945. */
  946. max_bytes_per_lli = bd.srcbus.buswidth *
  947. pl08x->vd->max_transfer_size;
  948. dev_vdbg(&pl08x->adev->dev,
  949. "%s max bytes per lli = %zu\n",
  950. __func__, max_bytes_per_lli);
  951. /*
  952. * Make largest possible LLIs until less than one bus
  953. * width left
  954. */
  955. while (bd.remainder > (mbus->buswidth - 1)) {
  956. size_t lli_len, tsize, width;
  957. /*
  958. * If enough left try to send max possible,
  959. * otherwise try to send the remainder
  960. */
  961. lli_len = min(bd.remainder, max_bytes_per_lli);
  962. /*
  963. * Check against maximum bus alignment:
  964. * Calculate actual transfer size in relation to
  965. * bus width an get a maximum remainder of the
  966. * highest bus width - 1
  967. */
  968. width = max(mbus->buswidth, sbus->buswidth);
  969. lli_len = (lli_len / width) * width;
  970. tsize = lli_len / bd.srcbus.buswidth;
  971. dev_vdbg(&pl08x->adev->dev,
  972. "%s fill lli with single lli chunk of "
  973. "size 0x%08zx (remainder 0x%08zx)\n",
  974. __func__, lli_len, bd.remainder);
  975. cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
  976. bd.dstbus.buswidth, tsize);
  977. pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
  978. lli_len, cctl, tsize);
  979. total_bytes += lli_len;
  980. }
  981. /*
  982. * Send any odd bytes
  983. */
  984. if (bd.remainder) {
  985. dev_vdbg(&pl08x->adev->dev,
  986. "%s align with boundary, send odd bytes (remain %zu)\n",
  987. __func__, bd.remainder);
  988. prep_byte_width_lli(pl08x, &bd, &cctl,
  989. bd.remainder, num_llis++, &total_bytes);
  990. }
  991. }
  992. if (total_bytes != dsg->len) {
  993. dev_err(&pl08x->adev->dev,
  994. "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
  995. __func__, total_bytes, dsg->len);
  996. return 0;
  997. }
  998. if (num_llis >= MAX_NUM_TSFR_LLIS) {
  999. dev_err(&pl08x->adev->dev,
  1000. "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
  1001. __func__, MAX_NUM_TSFR_LLIS);
  1002. return 0;
  1003. }
  1004. }
  1005. llis_va = txd->llis_va;
  1006. last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
  1007. if (txd->cyclic) {
  1008. /* Link back to the first LLI. */
  1009. last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
  1010. } else {
  1011. /* The final LLI terminates the LLI. */
  1012. last_lli[PL080_LLI_LLI] = 0;
  1013. /* The final LLI element shall also fire an interrupt. */
  1014. last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
  1015. }
  1016. pl08x_dump_lli(pl08x, llis_va, num_llis);
  1017. return num_llis;
  1018. }
  1019. static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
  1020. struct pl08x_txd *txd)
  1021. {
  1022. struct pl08x_sg *dsg, *_dsg;
  1023. if (txd->llis_va)
  1024. dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
  1025. list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
  1026. list_del(&dsg->node);
  1027. kfree(dsg);
  1028. }
  1029. kfree(txd);
  1030. }
  1031. static void pl08x_desc_free(struct virt_dma_desc *vd)
  1032. {
  1033. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  1034. struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
  1035. dma_descriptor_unmap(&vd->tx);
  1036. if (!txd->done)
  1037. pl08x_release_mux(plchan);
  1038. pl08x_free_txd(plchan->host, txd);
  1039. }
  1040. static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
  1041. struct pl08x_dma_chan *plchan)
  1042. {
  1043. LIST_HEAD(head);
  1044. vchan_get_all_descriptors(&plchan->vc, &head);
  1045. vchan_dma_desc_free_list(&plchan->vc, &head);
  1046. }
  1047. /*
  1048. * The DMA ENGINE API
  1049. */
  1050. static void pl08x_free_chan_resources(struct dma_chan *chan)
  1051. {
  1052. /* Ensure all queued descriptors are freed */
  1053. vchan_free_chan_resources(to_virt_chan(chan));
  1054. }
  1055. static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
  1056. struct dma_chan *chan, unsigned long flags)
  1057. {
  1058. struct dma_async_tx_descriptor *retval = NULL;
  1059. return retval;
  1060. }
  1061. /*
  1062. * Code accessing dma_async_is_complete() in a tight loop may give problems.
  1063. * If slaves are relying on interrupts to signal completion this function
  1064. * must not be called with interrupts disabled.
  1065. */
  1066. static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
  1067. dma_cookie_t cookie, struct dma_tx_state *txstate)
  1068. {
  1069. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1070. struct virt_dma_desc *vd;
  1071. unsigned long flags;
  1072. enum dma_status ret;
  1073. size_t bytes = 0;
  1074. ret = dma_cookie_status(chan, cookie, txstate);
  1075. if (ret == DMA_COMPLETE)
  1076. return ret;
  1077. /*
  1078. * There's no point calculating the residue if there's
  1079. * no txstate to store the value.
  1080. */
  1081. if (!txstate) {
  1082. if (plchan->state == PL08X_CHAN_PAUSED)
  1083. ret = DMA_PAUSED;
  1084. return ret;
  1085. }
  1086. spin_lock_irqsave(&plchan->vc.lock, flags);
  1087. ret = dma_cookie_status(chan, cookie, txstate);
  1088. if (ret != DMA_COMPLETE) {
  1089. vd = vchan_find_desc(&plchan->vc, cookie);
  1090. if (vd) {
  1091. /* On the issued list, so hasn't been processed yet */
  1092. struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
  1093. struct pl08x_sg *dsg;
  1094. list_for_each_entry(dsg, &txd->dsg_list, node)
  1095. bytes += dsg->len;
  1096. } else {
  1097. bytes = pl08x_getbytes_chan(plchan);
  1098. }
  1099. }
  1100. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1101. /*
  1102. * This cookie not complete yet
  1103. * Get number of bytes left in the active transactions and queue
  1104. */
  1105. dma_set_residue(txstate, bytes);
  1106. if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
  1107. ret = DMA_PAUSED;
  1108. /* Whether waiting or running, we're in progress */
  1109. return ret;
  1110. }
  1111. /* PrimeCell DMA extension */
  1112. struct burst_table {
  1113. u32 burstwords;
  1114. u32 reg;
  1115. };
  1116. static const struct burst_table burst_sizes[] = {
  1117. {
  1118. .burstwords = 256,
  1119. .reg = PL080_BSIZE_256,
  1120. },
  1121. {
  1122. .burstwords = 128,
  1123. .reg = PL080_BSIZE_128,
  1124. },
  1125. {
  1126. .burstwords = 64,
  1127. .reg = PL080_BSIZE_64,
  1128. },
  1129. {
  1130. .burstwords = 32,
  1131. .reg = PL080_BSIZE_32,
  1132. },
  1133. {
  1134. .burstwords = 16,
  1135. .reg = PL080_BSIZE_16,
  1136. },
  1137. {
  1138. .burstwords = 8,
  1139. .reg = PL080_BSIZE_8,
  1140. },
  1141. {
  1142. .burstwords = 4,
  1143. .reg = PL080_BSIZE_4,
  1144. },
  1145. {
  1146. .burstwords = 0,
  1147. .reg = PL080_BSIZE_1,
  1148. },
  1149. };
  1150. /*
  1151. * Given the source and destination available bus masks, select which
  1152. * will be routed to each port. We try to have source and destination
  1153. * on separate ports, but always respect the allowable settings.
  1154. */
  1155. static u32 pl08x_select_bus(u8 src, u8 dst)
  1156. {
  1157. u32 cctl = 0;
  1158. if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
  1159. cctl |= PL080_CONTROL_DST_AHB2;
  1160. if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
  1161. cctl |= PL080_CONTROL_SRC_AHB2;
  1162. return cctl;
  1163. }
  1164. static u32 pl08x_cctl(u32 cctl)
  1165. {
  1166. cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
  1167. PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
  1168. PL080_CONTROL_PROT_MASK);
  1169. /* Access the cell in privileged mode, non-bufferable, non-cacheable */
  1170. return cctl | PL080_CONTROL_PROT_SYS;
  1171. }
  1172. static u32 pl08x_width(enum dma_slave_buswidth width)
  1173. {
  1174. switch (width) {
  1175. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  1176. return PL080_WIDTH_8BIT;
  1177. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  1178. return PL080_WIDTH_16BIT;
  1179. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  1180. return PL080_WIDTH_32BIT;
  1181. default:
  1182. return ~0;
  1183. }
  1184. }
  1185. static u32 pl08x_burst(u32 maxburst)
  1186. {
  1187. int i;
  1188. for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
  1189. if (burst_sizes[i].burstwords <= maxburst)
  1190. break;
  1191. return burst_sizes[i].reg;
  1192. }
  1193. static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
  1194. enum dma_slave_buswidth addr_width, u32 maxburst)
  1195. {
  1196. u32 width, burst, cctl = 0;
  1197. width = pl08x_width(addr_width);
  1198. if (width == ~0)
  1199. return ~0;
  1200. cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
  1201. cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
  1202. /*
  1203. * If this channel will only request single transfers, set this
  1204. * down to ONE element. Also select one element if no maxburst
  1205. * is specified.
  1206. */
  1207. if (plchan->cd->single)
  1208. maxburst = 1;
  1209. burst = pl08x_burst(maxburst);
  1210. cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
  1211. cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
  1212. return pl08x_cctl(cctl);
  1213. }
  1214. /*
  1215. * Slave transactions callback to the slave device to allow
  1216. * synchronization of slave DMA signals with the DMAC enable
  1217. */
  1218. static void pl08x_issue_pending(struct dma_chan *chan)
  1219. {
  1220. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1221. unsigned long flags;
  1222. spin_lock_irqsave(&plchan->vc.lock, flags);
  1223. if (vchan_issue_pending(&plchan->vc)) {
  1224. if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
  1225. pl08x_phy_alloc_and_start(plchan);
  1226. }
  1227. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1228. }
  1229. static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
  1230. {
  1231. struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  1232. if (txd) {
  1233. INIT_LIST_HEAD(&txd->dsg_list);
  1234. /* Always enable error and terminal interrupts */
  1235. txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
  1236. PL080_CONFIG_TC_IRQ_MASK;
  1237. }
  1238. return txd;
  1239. }
  1240. /*
  1241. * Initialize a descriptor to be used by memcpy submit
  1242. */
  1243. static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
  1244. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  1245. size_t len, unsigned long flags)
  1246. {
  1247. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1248. struct pl08x_driver_data *pl08x = plchan->host;
  1249. struct pl08x_txd *txd;
  1250. struct pl08x_sg *dsg;
  1251. int ret;
  1252. txd = pl08x_get_txd(plchan);
  1253. if (!txd) {
  1254. dev_err(&pl08x->adev->dev,
  1255. "%s no memory for descriptor\n", __func__);
  1256. return NULL;
  1257. }
  1258. dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
  1259. if (!dsg) {
  1260. pl08x_free_txd(pl08x, txd);
  1261. return NULL;
  1262. }
  1263. list_add_tail(&dsg->node, &txd->dsg_list);
  1264. dsg->src_addr = src;
  1265. dsg->dst_addr = dest;
  1266. dsg->len = len;
  1267. /* Set platform data for m2m */
  1268. txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  1269. txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
  1270. ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
  1271. /* Both to be incremented or the code will break */
  1272. txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
  1273. if (pl08x->vd->dualmaster)
  1274. txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
  1275. pl08x->mem_buses);
  1276. ret = pl08x_fill_llis_for_desc(plchan->host, txd);
  1277. if (!ret) {
  1278. pl08x_free_txd(pl08x, txd);
  1279. return NULL;
  1280. }
  1281. return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  1282. }
  1283. static struct pl08x_txd *pl08x_init_txd(
  1284. struct dma_chan *chan,
  1285. enum dma_transfer_direction direction,
  1286. dma_addr_t *slave_addr)
  1287. {
  1288. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1289. struct pl08x_driver_data *pl08x = plchan->host;
  1290. struct pl08x_txd *txd;
  1291. enum dma_slave_buswidth addr_width;
  1292. int ret, tmp;
  1293. u8 src_buses, dst_buses;
  1294. u32 maxburst, cctl;
  1295. txd = pl08x_get_txd(plchan);
  1296. if (!txd) {
  1297. dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
  1298. return NULL;
  1299. }
  1300. /*
  1301. * Set up addresses, the PrimeCell configured address
  1302. * will take precedence since this may configure the
  1303. * channel target address dynamically at runtime.
  1304. */
  1305. if (direction == DMA_MEM_TO_DEV) {
  1306. cctl = PL080_CONTROL_SRC_INCR;
  1307. *slave_addr = plchan->cfg.dst_addr;
  1308. addr_width = plchan->cfg.dst_addr_width;
  1309. maxburst = plchan->cfg.dst_maxburst;
  1310. src_buses = pl08x->mem_buses;
  1311. dst_buses = plchan->cd->periph_buses;
  1312. } else if (direction == DMA_DEV_TO_MEM) {
  1313. cctl = PL080_CONTROL_DST_INCR;
  1314. *slave_addr = plchan->cfg.src_addr;
  1315. addr_width = plchan->cfg.src_addr_width;
  1316. maxburst = plchan->cfg.src_maxburst;
  1317. src_buses = plchan->cd->periph_buses;
  1318. dst_buses = pl08x->mem_buses;
  1319. } else {
  1320. pl08x_free_txd(pl08x, txd);
  1321. dev_err(&pl08x->adev->dev,
  1322. "%s direction unsupported\n", __func__);
  1323. return NULL;
  1324. }
  1325. cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
  1326. if (cctl == ~0) {
  1327. pl08x_free_txd(pl08x, txd);
  1328. dev_err(&pl08x->adev->dev,
  1329. "DMA slave configuration botched?\n");
  1330. return NULL;
  1331. }
  1332. txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
  1333. if (plchan->cfg.device_fc)
  1334. tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
  1335. PL080_FLOW_PER2MEM_PER;
  1336. else
  1337. tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
  1338. PL080_FLOW_PER2MEM;
  1339. txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
  1340. ret = pl08x_request_mux(plchan);
  1341. if (ret < 0) {
  1342. pl08x_free_txd(pl08x, txd);
  1343. dev_dbg(&pl08x->adev->dev,
  1344. "unable to mux for transfer on %s due to platform restrictions\n",
  1345. plchan->name);
  1346. return NULL;
  1347. }
  1348. dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
  1349. plchan->signal, plchan->name);
  1350. /* Assign the flow control signal to this channel */
  1351. if (direction == DMA_MEM_TO_DEV)
  1352. txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
  1353. else
  1354. txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
  1355. return txd;
  1356. }
  1357. static int pl08x_tx_add_sg(struct pl08x_txd *txd,
  1358. enum dma_transfer_direction direction,
  1359. dma_addr_t slave_addr,
  1360. dma_addr_t buf_addr,
  1361. unsigned int len)
  1362. {
  1363. struct pl08x_sg *dsg;
  1364. dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
  1365. if (!dsg)
  1366. return -ENOMEM;
  1367. list_add_tail(&dsg->node, &txd->dsg_list);
  1368. dsg->len = len;
  1369. if (direction == DMA_MEM_TO_DEV) {
  1370. dsg->src_addr = buf_addr;
  1371. dsg->dst_addr = slave_addr;
  1372. } else {
  1373. dsg->src_addr = slave_addr;
  1374. dsg->dst_addr = buf_addr;
  1375. }
  1376. return 0;
  1377. }
  1378. static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
  1379. struct dma_chan *chan, struct scatterlist *sgl,
  1380. unsigned int sg_len, enum dma_transfer_direction direction,
  1381. unsigned long flags, void *context)
  1382. {
  1383. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1384. struct pl08x_driver_data *pl08x = plchan->host;
  1385. struct pl08x_txd *txd;
  1386. struct scatterlist *sg;
  1387. int ret, tmp;
  1388. dma_addr_t slave_addr;
  1389. dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
  1390. __func__, sg_dma_len(sgl), plchan->name);
  1391. txd = pl08x_init_txd(chan, direction, &slave_addr);
  1392. if (!txd)
  1393. return NULL;
  1394. for_each_sg(sgl, sg, sg_len, tmp) {
  1395. ret = pl08x_tx_add_sg(txd, direction, slave_addr,
  1396. sg_dma_address(sg),
  1397. sg_dma_len(sg));
  1398. if (ret) {
  1399. pl08x_release_mux(plchan);
  1400. pl08x_free_txd(pl08x, txd);
  1401. dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
  1402. __func__);
  1403. return NULL;
  1404. }
  1405. }
  1406. ret = pl08x_fill_llis_for_desc(plchan->host, txd);
  1407. if (!ret) {
  1408. pl08x_release_mux(plchan);
  1409. pl08x_free_txd(pl08x, txd);
  1410. return NULL;
  1411. }
  1412. return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  1413. }
  1414. static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
  1415. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  1416. size_t period_len, enum dma_transfer_direction direction,
  1417. unsigned long flags)
  1418. {
  1419. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1420. struct pl08x_driver_data *pl08x = plchan->host;
  1421. struct pl08x_txd *txd;
  1422. int ret, tmp;
  1423. dma_addr_t slave_addr;
  1424. dev_dbg(&pl08x->adev->dev,
  1425. "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
  1426. __func__, period_len, buf_len,
  1427. direction == DMA_MEM_TO_DEV ? "to" : "from",
  1428. plchan->name);
  1429. txd = pl08x_init_txd(chan, direction, &slave_addr);
  1430. if (!txd)
  1431. return NULL;
  1432. txd->cyclic = true;
  1433. txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
  1434. for (tmp = 0; tmp < buf_len; tmp += period_len) {
  1435. ret = pl08x_tx_add_sg(txd, direction, slave_addr,
  1436. buf_addr + tmp, period_len);
  1437. if (ret) {
  1438. pl08x_release_mux(plchan);
  1439. pl08x_free_txd(pl08x, txd);
  1440. return NULL;
  1441. }
  1442. }
  1443. ret = pl08x_fill_llis_for_desc(plchan->host, txd);
  1444. if (!ret) {
  1445. pl08x_release_mux(plchan);
  1446. pl08x_free_txd(pl08x, txd);
  1447. return NULL;
  1448. }
  1449. return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  1450. }
  1451. static int pl08x_config(struct dma_chan *chan,
  1452. struct dma_slave_config *config)
  1453. {
  1454. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1455. struct pl08x_driver_data *pl08x = plchan->host;
  1456. if (!plchan->slave)
  1457. return -EINVAL;
  1458. /* Reject definitely invalid configurations */
  1459. if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  1460. config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  1461. return -EINVAL;
  1462. if (config->device_fc && pl08x->vd->pl080s) {
  1463. dev_err(&pl08x->adev->dev,
  1464. "%s: PL080S does not support peripheral flow control\n",
  1465. __func__);
  1466. return -EINVAL;
  1467. }
  1468. plchan->cfg = *config;
  1469. return 0;
  1470. }
  1471. static int pl08x_terminate_all(struct dma_chan *chan)
  1472. {
  1473. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1474. struct pl08x_driver_data *pl08x = plchan->host;
  1475. unsigned long flags;
  1476. spin_lock_irqsave(&plchan->vc.lock, flags);
  1477. if (!plchan->phychan && !plchan->at) {
  1478. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1479. return 0;
  1480. }
  1481. plchan->state = PL08X_CHAN_IDLE;
  1482. if (plchan->phychan) {
  1483. /*
  1484. * Mark physical channel as free and free any slave
  1485. * signal
  1486. */
  1487. pl08x_phy_free(plchan);
  1488. }
  1489. /* Dequeue jobs and free LLIs */
  1490. if (plchan->at) {
  1491. pl08x_desc_free(&plchan->at->vd);
  1492. plchan->at = NULL;
  1493. }
  1494. /* Dequeue jobs not yet fired as well */
  1495. pl08x_free_txd_list(pl08x, plchan);
  1496. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1497. return 0;
  1498. }
  1499. static int pl08x_pause(struct dma_chan *chan)
  1500. {
  1501. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1502. unsigned long flags;
  1503. /*
  1504. * Anything succeeds on channels with no physical allocation and
  1505. * no queued transfers.
  1506. */
  1507. spin_lock_irqsave(&plchan->vc.lock, flags);
  1508. if (!plchan->phychan && !plchan->at) {
  1509. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1510. return 0;
  1511. }
  1512. pl08x_pause_phy_chan(plchan->phychan);
  1513. plchan->state = PL08X_CHAN_PAUSED;
  1514. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1515. return 0;
  1516. }
  1517. static int pl08x_resume(struct dma_chan *chan)
  1518. {
  1519. struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
  1520. unsigned long flags;
  1521. /*
  1522. * Anything succeeds on channels with no physical allocation and
  1523. * no queued transfers.
  1524. */
  1525. spin_lock_irqsave(&plchan->vc.lock, flags);
  1526. if (!plchan->phychan && !plchan->at) {
  1527. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1528. return 0;
  1529. }
  1530. pl08x_resume_phy_chan(plchan->phychan);
  1531. plchan->state = PL08X_CHAN_RUNNING;
  1532. spin_unlock_irqrestore(&plchan->vc.lock, flags);
  1533. return 0;
  1534. }
  1535. bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
  1536. {
  1537. struct pl08x_dma_chan *plchan;
  1538. char *name = chan_id;
  1539. /* Reject channels for devices not bound to this driver */
  1540. if (chan->device->dev->driver != &pl08x_amba_driver.drv)
  1541. return false;
  1542. plchan = to_pl08x_chan(chan);
  1543. /* Check that the channel is not taken! */
  1544. if (!strcmp(plchan->name, name))
  1545. return true;
  1546. return false;
  1547. }
  1548. EXPORT_SYMBOL_GPL(pl08x_filter_id);
  1549. /*
  1550. * Just check that the device is there and active
  1551. * TODO: turn this bit on/off depending on the number of physical channels
  1552. * actually used, if it is zero... well shut it off. That will save some
  1553. * power. Cut the clock at the same time.
  1554. */
  1555. static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
  1556. {
  1557. /* The Nomadik variant does not have the config register */
  1558. if (pl08x->vd->nomadik)
  1559. return;
  1560. writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
  1561. }
  1562. static irqreturn_t pl08x_irq(int irq, void *dev)
  1563. {
  1564. struct pl08x_driver_data *pl08x = dev;
  1565. u32 mask = 0, err, tc, i;
  1566. /* check & clear - ERR & TC interrupts */
  1567. err = readl(pl08x->base + PL080_ERR_STATUS);
  1568. if (err) {
  1569. dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
  1570. __func__, err);
  1571. writel(err, pl08x->base + PL080_ERR_CLEAR);
  1572. }
  1573. tc = readl(pl08x->base + PL080_TC_STATUS);
  1574. if (tc)
  1575. writel(tc, pl08x->base + PL080_TC_CLEAR);
  1576. if (!err && !tc)
  1577. return IRQ_NONE;
  1578. for (i = 0; i < pl08x->vd->channels; i++) {
  1579. if (((1 << i) & err) || ((1 << i) & tc)) {
  1580. /* Locate physical channel */
  1581. struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
  1582. struct pl08x_dma_chan *plchan = phychan->serving;
  1583. struct pl08x_txd *tx;
  1584. if (!plchan) {
  1585. dev_err(&pl08x->adev->dev,
  1586. "%s Error TC interrupt on unused channel: 0x%08x\n",
  1587. __func__, i);
  1588. continue;
  1589. }
  1590. spin_lock(&plchan->vc.lock);
  1591. tx = plchan->at;
  1592. if (tx && tx->cyclic) {
  1593. vchan_cyclic_callback(&tx->vd);
  1594. } else if (tx) {
  1595. plchan->at = NULL;
  1596. /*
  1597. * This descriptor is done, release its mux
  1598. * reservation.
  1599. */
  1600. pl08x_release_mux(plchan);
  1601. tx->done = true;
  1602. vchan_cookie_complete(&tx->vd);
  1603. /*
  1604. * And start the next descriptor (if any),
  1605. * otherwise free this channel.
  1606. */
  1607. if (vchan_next_desc(&plchan->vc))
  1608. pl08x_start_next_txd(plchan);
  1609. else
  1610. pl08x_phy_free(plchan);
  1611. }
  1612. spin_unlock(&plchan->vc.lock);
  1613. mask |= (1 << i);
  1614. }
  1615. }
  1616. return mask ? IRQ_HANDLED : IRQ_NONE;
  1617. }
  1618. static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
  1619. {
  1620. chan->slave = true;
  1621. chan->name = chan->cd->bus_id;
  1622. chan->cfg.src_addr = chan->cd->addr;
  1623. chan->cfg.dst_addr = chan->cd->addr;
  1624. }
  1625. /*
  1626. * Initialise the DMAC memcpy/slave channels.
  1627. * Make a local wrapper to hold required data
  1628. */
  1629. static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
  1630. struct dma_device *dmadev, unsigned int channels, bool slave)
  1631. {
  1632. struct pl08x_dma_chan *chan;
  1633. int i;
  1634. INIT_LIST_HEAD(&dmadev->channels);
  1635. /*
  1636. * Register as many many memcpy as we have physical channels,
  1637. * we won't always be able to use all but the code will have
  1638. * to cope with that situation.
  1639. */
  1640. for (i = 0; i < channels; i++) {
  1641. chan = kzalloc(sizeof(*chan), GFP_KERNEL);
  1642. if (!chan)
  1643. return -ENOMEM;
  1644. chan->host = pl08x;
  1645. chan->state = PL08X_CHAN_IDLE;
  1646. chan->signal = -1;
  1647. if (slave) {
  1648. chan->cd = &pl08x->pd->slave_channels[i];
  1649. /*
  1650. * Some implementations have muxed signals, whereas some
  1651. * use a mux in front of the signals and need dynamic
  1652. * assignment of signals.
  1653. */
  1654. chan->signal = i;
  1655. pl08x_dma_slave_init(chan);
  1656. } else {
  1657. chan->cd = &pl08x->pd->memcpy_channel;
  1658. chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
  1659. if (!chan->name) {
  1660. kfree(chan);
  1661. return -ENOMEM;
  1662. }
  1663. }
  1664. dev_dbg(&pl08x->adev->dev,
  1665. "initialize virtual channel \"%s\"\n",
  1666. chan->name);
  1667. chan->vc.desc_free = pl08x_desc_free;
  1668. vchan_init(&chan->vc, dmadev);
  1669. }
  1670. dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
  1671. i, slave ? "slave" : "memcpy");
  1672. return i;
  1673. }
  1674. static void pl08x_free_virtual_channels(struct dma_device *dmadev)
  1675. {
  1676. struct pl08x_dma_chan *chan = NULL;
  1677. struct pl08x_dma_chan *next;
  1678. list_for_each_entry_safe(chan,
  1679. next, &dmadev->channels, vc.chan.device_node) {
  1680. list_del(&chan->vc.chan.device_node);
  1681. kfree(chan);
  1682. }
  1683. }
  1684. #ifdef CONFIG_DEBUG_FS
  1685. static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
  1686. {
  1687. switch (state) {
  1688. case PL08X_CHAN_IDLE:
  1689. return "idle";
  1690. case PL08X_CHAN_RUNNING:
  1691. return "running";
  1692. case PL08X_CHAN_PAUSED:
  1693. return "paused";
  1694. case PL08X_CHAN_WAITING:
  1695. return "waiting";
  1696. default:
  1697. break;
  1698. }
  1699. return "UNKNOWN STATE";
  1700. }
  1701. static int pl08x_debugfs_show(struct seq_file *s, void *data)
  1702. {
  1703. struct pl08x_driver_data *pl08x = s->private;
  1704. struct pl08x_dma_chan *chan;
  1705. struct pl08x_phy_chan *ch;
  1706. unsigned long flags;
  1707. int i;
  1708. seq_printf(s, "PL08x physical channels:\n");
  1709. seq_printf(s, "CHANNEL:\tUSER:\n");
  1710. seq_printf(s, "--------\t-----\n");
  1711. for (i = 0; i < pl08x->vd->channels; i++) {
  1712. struct pl08x_dma_chan *virt_chan;
  1713. ch = &pl08x->phy_chans[i];
  1714. spin_lock_irqsave(&ch->lock, flags);
  1715. virt_chan = ch->serving;
  1716. seq_printf(s, "%d\t\t%s%s\n",
  1717. ch->id,
  1718. virt_chan ? virt_chan->name : "(none)",
  1719. ch->locked ? " LOCKED" : "");
  1720. spin_unlock_irqrestore(&ch->lock, flags);
  1721. }
  1722. seq_printf(s, "\nPL08x virtual memcpy channels:\n");
  1723. seq_printf(s, "CHANNEL:\tSTATE:\n");
  1724. seq_printf(s, "--------\t------\n");
  1725. list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
  1726. seq_printf(s, "%s\t\t%s\n", chan->name,
  1727. pl08x_state_str(chan->state));
  1728. }
  1729. seq_printf(s, "\nPL08x virtual slave channels:\n");
  1730. seq_printf(s, "CHANNEL:\tSTATE:\n");
  1731. seq_printf(s, "--------\t------\n");
  1732. list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
  1733. seq_printf(s, "%s\t\t%s\n", chan->name,
  1734. pl08x_state_str(chan->state));
  1735. }
  1736. return 0;
  1737. }
  1738. static int pl08x_debugfs_open(struct inode *inode, struct file *file)
  1739. {
  1740. return single_open(file, pl08x_debugfs_show, inode->i_private);
  1741. }
  1742. static const struct file_operations pl08x_debugfs_operations = {
  1743. .open = pl08x_debugfs_open,
  1744. .read = seq_read,
  1745. .llseek = seq_lseek,
  1746. .release = single_release,
  1747. };
  1748. static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
  1749. {
  1750. /* Expose a simple debugfs interface to view all clocks */
  1751. (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
  1752. S_IFREG | S_IRUGO, NULL, pl08x,
  1753. &pl08x_debugfs_operations);
  1754. }
  1755. #else
  1756. static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
  1757. {
  1758. }
  1759. #endif
  1760. #ifdef CONFIG_OF
  1761. static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
  1762. u32 id)
  1763. {
  1764. struct pl08x_dma_chan *chan;
  1765. list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
  1766. if (chan->signal == id)
  1767. return &chan->vc.chan;
  1768. }
  1769. return NULL;
  1770. }
  1771. static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
  1772. struct of_dma *ofdma)
  1773. {
  1774. struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
  1775. struct dma_chan *dma_chan;
  1776. struct pl08x_dma_chan *plchan;
  1777. if (!pl08x)
  1778. return NULL;
  1779. if (dma_spec->args_count != 2) {
  1780. dev_err(&pl08x->adev->dev,
  1781. "DMA channel translation requires two cells\n");
  1782. return NULL;
  1783. }
  1784. dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
  1785. if (!dma_chan) {
  1786. dev_err(&pl08x->adev->dev,
  1787. "DMA slave channel not found\n");
  1788. return NULL;
  1789. }
  1790. plchan = to_pl08x_chan(dma_chan);
  1791. dev_dbg(&pl08x->adev->dev,
  1792. "translated channel for signal %d\n",
  1793. dma_spec->args[0]);
  1794. /* Augment channel data for applicable AHB buses */
  1795. plchan->cd->periph_buses = dma_spec->args[1];
  1796. return dma_get_slave_channel(dma_chan);
  1797. }
  1798. static int pl08x_of_probe(struct amba_device *adev,
  1799. struct pl08x_driver_data *pl08x,
  1800. struct device_node *np)
  1801. {
  1802. struct pl08x_platform_data *pd;
  1803. struct pl08x_channel_data *chanp = NULL;
  1804. u32 cctl_memcpy = 0;
  1805. u32 val;
  1806. int ret;
  1807. int i;
  1808. pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
  1809. if (!pd)
  1810. return -ENOMEM;
  1811. /* Eligible bus masters for fetching LLIs */
  1812. if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
  1813. pd->lli_buses |= PL08X_AHB1;
  1814. if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
  1815. pd->lli_buses |= PL08X_AHB2;
  1816. if (!pd->lli_buses) {
  1817. dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
  1818. pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
  1819. }
  1820. /* Eligible bus masters for memory access */
  1821. if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
  1822. pd->mem_buses |= PL08X_AHB1;
  1823. if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
  1824. pd->mem_buses |= PL08X_AHB2;
  1825. if (!pd->mem_buses) {
  1826. dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
  1827. pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
  1828. }
  1829. /* Parse the memcpy channel properties */
  1830. ret = of_property_read_u32(np, "memcpy-burst-size", &val);
  1831. if (ret) {
  1832. dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
  1833. val = 1;
  1834. }
  1835. switch (val) {
  1836. default:
  1837. dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
  1838. /* Fall through */
  1839. case 1:
  1840. cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
  1841. PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
  1842. break;
  1843. case 4:
  1844. cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
  1845. PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
  1846. break;
  1847. case 8:
  1848. cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
  1849. PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
  1850. break;
  1851. case 16:
  1852. cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
  1853. PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
  1854. break;
  1855. case 32:
  1856. cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
  1857. PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
  1858. break;
  1859. case 64:
  1860. cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
  1861. PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
  1862. break;
  1863. case 128:
  1864. cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
  1865. PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
  1866. break;
  1867. case 256:
  1868. cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
  1869. PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
  1870. break;
  1871. }
  1872. ret = of_property_read_u32(np, "memcpy-bus-width", &val);
  1873. if (ret) {
  1874. dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
  1875. val = 8;
  1876. }
  1877. switch (val) {
  1878. default:
  1879. dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
  1880. /* Fall through */
  1881. case 8:
  1882. cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
  1883. PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
  1884. break;
  1885. case 16:
  1886. cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
  1887. PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
  1888. break;
  1889. case 32:
  1890. cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
  1891. PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
  1892. break;
  1893. }
  1894. /* This is currently the only thing making sense */
  1895. cctl_memcpy |= PL080_CONTROL_PROT_SYS;
  1896. /* Set up memcpy channel */
  1897. pd->memcpy_channel.bus_id = "memcpy";
  1898. pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
  1899. /* Use the buses that can access memory, obviously */
  1900. pd->memcpy_channel.periph_buses = pd->mem_buses;
  1901. /*
  1902. * Allocate channel data for all possible slave channels (one
  1903. * for each possible signal), channels will then be allocated
  1904. * for a device and have it's AHB interfaces set up at
  1905. * translation time.
  1906. */
  1907. chanp = devm_kcalloc(&adev->dev,
  1908. pl08x->vd->signals,
  1909. sizeof(struct pl08x_channel_data),
  1910. GFP_KERNEL);
  1911. if (!chanp)
  1912. return -ENOMEM;
  1913. pd->slave_channels = chanp;
  1914. for (i = 0; i < pl08x->vd->signals; i++) {
  1915. /* chanp->periph_buses will be assigned at translation */
  1916. chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
  1917. chanp++;
  1918. }
  1919. pd->num_slave_channels = pl08x->vd->signals;
  1920. pl08x->pd = pd;
  1921. return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
  1922. pl08x);
  1923. }
  1924. #else
  1925. static inline int pl08x_of_probe(struct amba_device *adev,
  1926. struct pl08x_driver_data *pl08x,
  1927. struct device_node *np)
  1928. {
  1929. return -EINVAL;
  1930. }
  1931. #endif
  1932. static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
  1933. {
  1934. struct pl08x_driver_data *pl08x;
  1935. const struct vendor_data *vd = id->data;
  1936. struct device_node *np = adev->dev.of_node;
  1937. u32 tsfr_size;
  1938. int ret = 0;
  1939. int i;
  1940. ret = amba_request_regions(adev, NULL);
  1941. if (ret)
  1942. return ret;
  1943. /* Ensure that we can do DMA */
  1944. ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
  1945. if (ret)
  1946. goto out_no_pl08x;
  1947. /* Create the driver state holder */
  1948. pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
  1949. if (!pl08x) {
  1950. ret = -ENOMEM;
  1951. goto out_no_pl08x;
  1952. }
  1953. /* Assign useful pointers to the driver state */
  1954. pl08x->adev = adev;
  1955. pl08x->vd = vd;
  1956. /* Initialize memcpy engine */
  1957. dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
  1958. pl08x->memcpy.dev = &adev->dev;
  1959. pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
  1960. pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
  1961. pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
  1962. pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
  1963. pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
  1964. pl08x->memcpy.device_config = pl08x_config;
  1965. pl08x->memcpy.device_pause = pl08x_pause;
  1966. pl08x->memcpy.device_resume = pl08x_resume;
  1967. pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
  1968. pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
  1969. pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
  1970. pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
  1971. pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  1972. /* Initialize slave engine */
  1973. dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
  1974. dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
  1975. pl08x->slave.dev = &adev->dev;
  1976. pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
  1977. pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
  1978. pl08x->slave.device_tx_status = pl08x_dma_tx_status;
  1979. pl08x->slave.device_issue_pending = pl08x_issue_pending;
  1980. pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
  1981. pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
  1982. pl08x->slave.device_config = pl08x_config;
  1983. pl08x->slave.device_pause = pl08x_pause;
  1984. pl08x->slave.device_resume = pl08x_resume;
  1985. pl08x->slave.device_terminate_all = pl08x_terminate_all;
  1986. pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
  1987. pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
  1988. pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1989. pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  1990. /* Get the platform data */
  1991. pl08x->pd = dev_get_platdata(&adev->dev);
  1992. if (!pl08x->pd) {
  1993. if (np) {
  1994. ret = pl08x_of_probe(adev, pl08x, np);
  1995. if (ret)
  1996. goto out_no_platdata;
  1997. } else {
  1998. dev_err(&adev->dev, "no platform data supplied\n");
  1999. ret = -EINVAL;
  2000. goto out_no_platdata;
  2001. }
  2002. }
  2003. /* By default, AHB1 only. If dualmaster, from platform */
  2004. pl08x->lli_buses = PL08X_AHB1;
  2005. pl08x->mem_buses = PL08X_AHB1;
  2006. if (pl08x->vd->dualmaster) {
  2007. pl08x->lli_buses = pl08x->pd->lli_buses;
  2008. pl08x->mem_buses = pl08x->pd->mem_buses;
  2009. }
  2010. if (vd->pl080s)
  2011. pl08x->lli_words = PL080S_LLI_WORDS;
  2012. else
  2013. pl08x->lli_words = PL080_LLI_WORDS;
  2014. tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
  2015. /* A DMA memory pool for LLIs, align on 1-byte boundary */
  2016. pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
  2017. tsfr_size, PL08X_ALIGN, 0);
  2018. if (!pl08x->pool) {
  2019. ret = -ENOMEM;
  2020. goto out_no_lli_pool;
  2021. }
  2022. pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
  2023. if (!pl08x->base) {
  2024. ret = -ENOMEM;
  2025. goto out_no_ioremap;
  2026. }
  2027. /* Turn on the PL08x */
  2028. pl08x_ensure_on(pl08x);
  2029. /* Attach the interrupt handler */
  2030. writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
  2031. writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
  2032. ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
  2033. if (ret) {
  2034. dev_err(&adev->dev, "%s failed to request interrupt %d\n",
  2035. __func__, adev->irq[0]);
  2036. goto out_no_irq;
  2037. }
  2038. /* Initialize physical channels */
  2039. pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
  2040. GFP_KERNEL);
  2041. if (!pl08x->phy_chans) {
  2042. ret = -ENOMEM;
  2043. goto out_no_phychans;
  2044. }
  2045. for (i = 0; i < vd->channels; i++) {
  2046. struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
  2047. ch->id = i;
  2048. ch->base = pl08x->base + PL080_Cx_BASE(i);
  2049. ch->reg_config = ch->base + vd->config_offset;
  2050. spin_lock_init(&ch->lock);
  2051. /*
  2052. * Nomadik variants can have channels that are locked
  2053. * down for the secure world only. Lock up these channels
  2054. * by perpetually serving a dummy virtual channel.
  2055. */
  2056. if (vd->nomadik) {
  2057. u32 val;
  2058. val = readl(ch->reg_config);
  2059. if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
  2060. dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
  2061. ch->locked = true;
  2062. }
  2063. }
  2064. dev_dbg(&adev->dev, "physical channel %d is %s\n",
  2065. i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
  2066. }
  2067. /* Register as many memcpy channels as there are physical channels */
  2068. ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
  2069. pl08x->vd->channels, false);
  2070. if (ret <= 0) {
  2071. dev_warn(&pl08x->adev->dev,
  2072. "%s failed to enumerate memcpy channels - %d\n",
  2073. __func__, ret);
  2074. goto out_no_memcpy;
  2075. }
  2076. /* Register slave channels */
  2077. ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
  2078. pl08x->pd->num_slave_channels, true);
  2079. if (ret < 0) {
  2080. dev_warn(&pl08x->adev->dev,
  2081. "%s failed to enumerate slave channels - %d\n",
  2082. __func__, ret);
  2083. goto out_no_slave;
  2084. }
  2085. ret = dma_async_device_register(&pl08x->memcpy);
  2086. if (ret) {
  2087. dev_warn(&pl08x->adev->dev,
  2088. "%s failed to register memcpy as an async device - %d\n",
  2089. __func__, ret);
  2090. goto out_no_memcpy_reg;
  2091. }
  2092. ret = dma_async_device_register(&pl08x->slave);
  2093. if (ret) {
  2094. dev_warn(&pl08x->adev->dev,
  2095. "%s failed to register slave as an async device - %d\n",
  2096. __func__, ret);
  2097. goto out_no_slave_reg;
  2098. }
  2099. amba_set_drvdata(adev, pl08x);
  2100. init_pl08x_debugfs(pl08x);
  2101. dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
  2102. amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
  2103. (unsigned long long)adev->res.start, adev->irq[0]);
  2104. return 0;
  2105. out_no_slave_reg:
  2106. dma_async_device_unregister(&pl08x->memcpy);
  2107. out_no_memcpy_reg:
  2108. pl08x_free_virtual_channels(&pl08x->slave);
  2109. out_no_slave:
  2110. pl08x_free_virtual_channels(&pl08x->memcpy);
  2111. out_no_memcpy:
  2112. kfree(pl08x->phy_chans);
  2113. out_no_phychans:
  2114. free_irq(adev->irq[0], pl08x);
  2115. out_no_irq:
  2116. iounmap(pl08x->base);
  2117. out_no_ioremap:
  2118. dma_pool_destroy(pl08x->pool);
  2119. out_no_lli_pool:
  2120. out_no_platdata:
  2121. kfree(pl08x);
  2122. out_no_pl08x:
  2123. amba_release_regions(adev);
  2124. return ret;
  2125. }
  2126. /* PL080 has 8 channels and the PL080 have just 2 */
  2127. static struct vendor_data vendor_pl080 = {
  2128. .config_offset = PL080_CH_CONFIG,
  2129. .channels = 8,
  2130. .signals = 16,
  2131. .dualmaster = true,
  2132. .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
  2133. };
  2134. static struct vendor_data vendor_nomadik = {
  2135. .config_offset = PL080_CH_CONFIG,
  2136. .channels = 8,
  2137. .signals = 32,
  2138. .dualmaster = true,
  2139. .nomadik = true,
  2140. .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
  2141. };
  2142. static struct vendor_data vendor_pl080s = {
  2143. .config_offset = PL080S_CH_CONFIG,
  2144. .channels = 8,
  2145. .signals = 32,
  2146. .pl080s = true,
  2147. .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
  2148. };
  2149. static struct vendor_data vendor_pl081 = {
  2150. .config_offset = PL080_CH_CONFIG,
  2151. .channels = 2,
  2152. .signals = 16,
  2153. .dualmaster = false,
  2154. .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
  2155. };
  2156. static struct amba_id pl08x_ids[] = {
  2157. /* Samsung PL080S variant */
  2158. {
  2159. .id = 0x0a141080,
  2160. .mask = 0xffffffff,
  2161. .data = &vendor_pl080s,
  2162. },
  2163. /* PL080 */
  2164. {
  2165. .id = 0x00041080,
  2166. .mask = 0x000fffff,
  2167. .data = &vendor_pl080,
  2168. },
  2169. /* PL081 */
  2170. {
  2171. .id = 0x00041081,
  2172. .mask = 0x000fffff,
  2173. .data = &vendor_pl081,
  2174. },
  2175. /* Nomadik 8815 PL080 variant */
  2176. {
  2177. .id = 0x00280080,
  2178. .mask = 0x00ffffff,
  2179. .data = &vendor_nomadik,
  2180. },
  2181. { 0, 0 },
  2182. };
  2183. MODULE_DEVICE_TABLE(amba, pl08x_ids);
  2184. static struct amba_driver pl08x_amba_driver = {
  2185. .drv.name = DRIVER_NAME,
  2186. .id_table = pl08x_ids,
  2187. .probe = pl08x_probe,
  2188. };
  2189. static int __init pl08x_init(void)
  2190. {
  2191. int retval;
  2192. retval = amba_driver_register(&pl08x_amba_driver);
  2193. if (retval)
  2194. printk(KERN_WARNING DRIVER_NAME
  2195. "failed to register as an AMBA device (%d)\n",
  2196. retval);
  2197. return retval;
  2198. }
  2199. subsys_initcall(pl08x_init);