123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688 |
- /*
- * DMA driver for Xilinx Video DMA Engine
- *
- * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
- *
- * Based on the Freescale DMA driver.
- *
- * Description:
- * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
- * core that provides high-bandwidth direct memory access between memory
- * and AXI4-Stream type video target peripherals. The core provides efficient
- * two dimensional DMA operations with independent asynchronous read (S2MM)
- * and write (MM2S) channel operation. It can be configured to have either
- * one channel or two channels. If configured as two channels, one is to
- * transmit to the video device (MM2S) and another is to receive from the
- * video device (S2MM). Initialization, status, interrupt and management
- * registers are accessed through an AXI4-Lite slave interface.
- *
- * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
- * provides high-bandwidth one dimensional direct memory access between memory
- * and AXI4-Stream target peripherals. It supports one receive and one
- * transmit channel, both of them optional at synthesis time.
- *
- * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
- * Access (DMA) between a memory-mapped source address and a memory-mapped
- * destination address.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- */
- #include <linux/bitops.h>
- #include <linux/dmapool.h>
- #include <linux/dma/xilinx_dma.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/iopoll.h>
- #include <linux/module.h>
- #include <linux/of_address.h>
- #include <linux/of_dma.h>
- #include <linux/of_platform.h>
- #include <linux/of_irq.h>
- #include <linux/slab.h>
- #include <linux/clk.h>
- #include <linux/io-64-nonatomic-lo-hi.h>
- #include "../dmaengine.h"
- /* Register/Descriptor Offsets */
- #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
- #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
- #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
- #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
- /* Control Registers */
- #define XILINX_DMA_REG_DMACR 0x0000
- #define XILINX_DMA_DMACR_DELAY_MAX 0xff
- #define XILINX_DMA_DMACR_DELAY_SHIFT 24
- #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
- #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
- #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
- #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
- #define XILINX_DMA_DMACR_MASTER_SHIFT 8
- #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
- #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
- #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
- #define XILINX_DMA_DMACR_RESET BIT(2)
- #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
- #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
- #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
- #define XILINX_DMA_REG_DMASR 0x0004
- #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
- #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
- #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
- #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
- #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
- #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
- #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
- #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
- #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
- #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
- #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
- #define XILINX_DMA_DMASR_IDLE BIT(1)
- #define XILINX_DMA_DMASR_HALTED BIT(0)
- #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
- #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
- #define XILINX_DMA_REG_CURDESC 0x0008
- #define XILINX_DMA_REG_TAILDESC 0x0010
- #define XILINX_DMA_REG_REG_INDEX 0x0014
- #define XILINX_DMA_REG_FRMSTORE 0x0018
- #define XILINX_DMA_REG_THRESHOLD 0x001c
- #define XILINX_DMA_REG_FRMPTR_STS 0x0024
- #define XILINX_DMA_REG_PARK_PTR 0x0028
- #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
- #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
- #define XILINX_DMA_REG_VDMA_VERSION 0x002c
- /* Register Direct Mode Registers */
- #define XILINX_DMA_REG_VSIZE 0x0000
- #define XILINX_DMA_REG_HSIZE 0x0004
- #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
- #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
- #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
- #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
- #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
- /* HW specific definitions */
- #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
- #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
- (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
- XILINX_DMA_DMASR_DLY_CNT_IRQ | \
- XILINX_DMA_DMASR_ERR_IRQ)
- #define XILINX_DMA_DMASR_ALL_ERR_MASK \
- (XILINX_DMA_DMASR_EOL_LATE_ERR | \
- XILINX_DMA_DMASR_SOF_LATE_ERR | \
- XILINX_DMA_DMASR_SG_DEC_ERR | \
- XILINX_DMA_DMASR_SG_SLV_ERR | \
- XILINX_DMA_DMASR_EOF_EARLY_ERR | \
- XILINX_DMA_DMASR_SOF_EARLY_ERR | \
- XILINX_DMA_DMASR_DMA_DEC_ERR | \
- XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
- XILINX_DMA_DMASR_DMA_INT_ERR)
- /*
- * Recoverable errors are DMA Internal error, SOF Early, EOF Early
- * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
- * is enabled in the h/w system.
- */
- #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
- (XILINX_DMA_DMASR_SOF_LATE_ERR | \
- XILINX_DMA_DMASR_EOF_EARLY_ERR | \
- XILINX_DMA_DMASR_SOF_EARLY_ERR | \
- XILINX_DMA_DMASR_DMA_INT_ERR)
- /* Axi VDMA Flush on Fsync bits */
- #define XILINX_DMA_FLUSH_S2MM 3
- #define XILINX_DMA_FLUSH_MM2S 2
- #define XILINX_DMA_FLUSH_BOTH 1
- /* Delay loop counter to prevent hardware failure */
- #define XILINX_DMA_LOOP_COUNT 1000000
- /* AXI DMA Specific Registers/Offsets */
- #define XILINX_DMA_REG_SRCDSTADDR 0x18
- #define XILINX_DMA_REG_BTT 0x28
- /* AXI DMA Specific Masks/Bit fields */
- #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
- #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
- #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
- #define XILINX_DMA_CR_COALESCE_SHIFT 16
- #define XILINX_DMA_BD_SOP BIT(27)
- #define XILINX_DMA_BD_EOP BIT(26)
- #define XILINX_DMA_COALESCE_MAX 255
- #define XILINX_DMA_NUM_APP_WORDS 5
- /* Multi-Channel DMA Descriptor offsets*/
- #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
- #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
- /* Multi-Channel DMA Masks/Shifts */
- #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
- #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
- #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
- #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
- #define XILINX_DMA_BD_STRIDE_SHIFT 0
- #define XILINX_DMA_BD_VSIZE_SHIFT 19
- /* AXI CDMA Specific Registers/Offsets */
- #define XILINX_CDMA_REG_SRCADDR 0x18
- #define XILINX_CDMA_REG_DSTADDR 0x20
- /* AXI CDMA Specific Masks */
- #define XILINX_CDMA_CR_SGMODE BIT(3)
- /**
- * struct xilinx_vdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @vsize: Vertical Size @0x10
- * @hsize: Horizontal Size @0x14
- * @stride: Number of bytes between the first
- * pixels of each horizontal line @0x18
- */
- struct xilinx_vdma_desc_hw {
- u32 next_desc;
- u32 pad1;
- u32 buf_addr;
- u32 buf_addr_msb;
- u32 vsize;
- u32 hsize;
- u32 stride;
- } __aligned(64);
- /**
- * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
- * @next_desc: Next Descriptor Pointer @0x00
- * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
- * @buf_addr: Buffer address @0x08
- * @buf_addr_msb: MSB of Buffer address @0x0C
- * @pad1: Reserved @0x10
- * @pad2: Reserved @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- * @app: APP Fields @0x20 - 0x30
- */
- struct xilinx_axidma_desc_hw {
- u32 next_desc;
- u32 next_desc_msb;
- u32 buf_addr;
- u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
- u32 control;
- u32 status;
- u32 app[XILINX_DMA_NUM_APP_WORDS];
- } __aligned(64);
- /**
- * struct xilinx_cdma_desc_hw - Hardware Descriptor
- * @next_desc: Next Descriptor Pointer @0x00
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
- * @src_addr: Source address @0x08
- * @src_addrmsb: Source address MSB @0x0C
- * @dest_addr: Destination address @0x10
- * @dest_addrmsb: Destination address MSB @0x14
- * @control: Control field @0x18
- * @status: Status field @0x1C
- */
- struct xilinx_cdma_desc_hw {
- u32 next_desc;
- u32 next_desc_msb;
- u32 src_addr;
- u32 src_addr_msb;
- u32 dest_addr;
- u32 dest_addr_msb;
- u32 control;
- u32 status;
- } __aligned(64);
- /**
- * struct xilinx_vdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_vdma_tx_segment {
- struct xilinx_vdma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_axidma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_axidma_tx_segment {
- struct xilinx_axidma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_cdma_tx_segment - Descriptor segment
- * @hw: Hardware descriptor
- * @node: Node in the descriptor segments list
- * @phys: Physical address of segment
- */
- struct xilinx_cdma_tx_segment {
- struct xilinx_cdma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /**
- * struct xilinx_dma_tx_descriptor - Per Transaction structure
- * @async_tx: Async transaction descriptor
- * @segments: TX segments list
- * @node: Node in the channel descriptors list
- * @cyclic: Check for cyclic transfers.
- */
- struct xilinx_dma_tx_descriptor {
- struct dma_async_tx_descriptor async_tx;
- struct list_head segments;
- struct list_head node;
- bool cyclic;
- };
- /**
- * struct xilinx_dma_chan - Driver specific DMA channel structure
- * @xdev: Driver specific device structure
- * @ctrl_offset: Control registers offset
- * @desc_offset: TX descriptor registers offset
- * @lock: Descriptor operation lock
- * @pending_list: Descriptors waiting
- * @active_list: Descriptors ready to submit
- * @done_list: Complete descriptors
- * @common: DMA common channel
- * @desc_pool: Descriptors pool
- * @dev: The dma device
- * @irq: Channel IRQ
- * @id: Channel ID
- * @direction: Transfer direction
- * @num_frms: Number of frames
- * @has_sg: Support scatter transfers
- * @cyclic: Check for cyclic transfers.
- * @genlock: Support genlock mode
- * @err: Channel has errors
- * @tasklet: Cleanup work after irq
- * @config: Device configuration info
- * @flush_on_fsync: Flush on Frame sync
- * @desc_pendingcount: Descriptor pending count
- * @ext_addr: Indicates 64 bit addressing is supported by dma channel
- * @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
- * @seg_v: Statically allocated segments base
- * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
- * @start_transfer: Differentiate b/w DMA IP's transfer
- */
- struct xilinx_dma_chan {
- struct xilinx_dma_device *xdev;
- u32 ctrl_offset;
- u32 desc_offset;
- spinlock_t lock;
- struct list_head pending_list;
- struct list_head active_list;
- struct list_head done_list;
- struct dma_chan common;
- struct dma_pool *desc_pool;
- struct device *dev;
- int irq;
- int id;
- enum dma_transfer_direction direction;
- int num_frms;
- bool has_sg;
- bool cyclic;
- bool genlock;
- bool err;
- struct tasklet_struct tasklet;
- struct xilinx_vdma_config config;
- bool flush_on_fsync;
- u32 desc_pendingcount;
- bool ext_addr;
- u32 desc_submitcount;
- u32 residue;
- struct xilinx_axidma_tx_segment *seg_v;
- struct xilinx_axidma_tx_segment *cyclic_seg_v;
- void (*start_transfer)(struct xilinx_dma_chan *chan);
- u16 tdest;
- };
- struct xilinx_dma_config {
- enum xdma_ip_type dmatype;
- int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **txs_clk,
- struct clk **rx_clk, struct clk **rxs_clk);
- };
- /**
- * struct xilinx_dma_device - DMA device structure
- * @regs: I/O mapped base address
- * @dev: Device Structure
- * @common: DMA device structure
- * @chan: Driver specific DMA channel
- * @has_sg: Specifies whether Scatter-Gather is present or not
- * @mcdma: Specifies whether Multi-Channel is present or not
- * @flush_on_fsync: Flush on frame sync
- * @ext_addr: Indicates 64 bit addressing is supported by dma device
- * @pdev: Platform device structure pointer
- * @dma_config: DMA config structure
- * @axi_clk: DMA Axi4-lite interace clock
- * @tx_clk: DMA mm2s clock
- * @txs_clk: DMA mm2s stream clock
- * @rx_clk: DMA s2mm clock
- * @rxs_clk: DMA s2mm stream clock
- * @nr_channels: Number of channels DMA device supports
- * @chan_id: DMA channel identifier
- */
- struct xilinx_dma_device {
- void __iomem *regs;
- struct device *dev;
- struct dma_device common;
- struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool has_sg;
- bool mcdma;
- u32 flush_on_fsync;
- bool ext_addr;
- struct platform_device *pdev;
- const struct xilinx_dma_config *dma_config;
- struct clk *axi_clk;
- struct clk *tx_clk;
- struct clk *txs_clk;
- struct clk *rx_clk;
- struct clk *rxs_clk;
- u32 nr_channels;
- u32 chan_id;
- };
- /* Macros */
- #define to_xilinx_chan(chan) \
- container_of(chan, struct xilinx_dma_chan, common)
- #define to_dma_tx_descriptor(tx) \
- container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
- #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
- cond, delay_us, timeout_us)
- /* IO accessors */
- static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
- {
- return ioread32(chan->xdev->regs + reg);
- }
- static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
- {
- iowrite32(value, chan->xdev->regs + reg);
- }
- static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
- u32 value)
- {
- dma_write(chan, chan->desc_offset + reg, value);
- }
- static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
- {
- return dma_read(chan, chan->ctrl_offset + reg);
- }
- static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
- u32 value)
- {
- dma_write(chan, chan->ctrl_offset + reg, value);
- }
- static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
- u32 clr)
- {
- dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
- }
- static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
- u32 set)
- {
- dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
- }
- /**
- * vdma_desc_write_64 - 64-bit descriptor write
- * @chan: Driver specific VDMA channel
- * @reg: Register to write
- * @value_lsb: lower address of the descriptor.
- * @value_msb: upper address of the descriptor.
- *
- * Since vdma driver is trying to write to a register offset which is not a
- * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
- * instead of a single 64 bit register write.
- */
- static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
- u32 value_lsb, u32 value_msb)
- {
- /* Write the lsb 32 bits*/
- writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
- /* Write the msb 32 bits */
- writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
- }
- static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
- {
- lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
- }
- static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
- dma_addr_t addr)
- {
- if (chan->ext_addr)
- dma_writeq(chan, reg, addr);
- else
- dma_ctrl_write(chan, reg, addr);
- }
- static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
- struct xilinx_axidma_desc_hw *hw,
- dma_addr_t buf_addr, size_t sg_used,
- size_t period_len)
- {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
- hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
- period_len);
- } else {
- hw->buf_addr = buf_addr + sg_used + period_len;
- }
- }
- /* -----------------------------------------------------------------------------
- * Descriptors and segments alloc and free
- */
- /**
- * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_vdma_tx_segment *
- xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_vdma_tx_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- segment->phys = phys;
- return segment;
- }
- /**
- * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_cdma_tx_segment *
- xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_cdma_tx_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- segment->phys = phys;
- return segment;
- }
- /**
- * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated segment on success and NULL on failure.
- */
- static struct xilinx_axidma_tx_segment *
- xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_axidma_tx_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- segment->phys = phys;
- return segment;
- }
- /**
- * xilinx_dma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_axidma_tx_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- /**
- * xilinx_cdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_cdma_tx_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- /**
- * xilinx_vdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific DMA channel
- * @segment: DMA transaction segment
- */
- static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
- struct xilinx_vdma_tx_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- /**
- * xilinx_dma_tx_descriptor - Allocate transaction descriptor
- * @chan: Driver specific DMA channel
- *
- * Return: The allocated descriptor on success and NULL on failure.
- */
- static struct xilinx_dma_tx_descriptor *
- xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc;
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
- INIT_LIST_HEAD(&desc->segments);
- return desc;
- }
- /**
- * xilinx_dma_free_tx_descriptor - Free transaction descriptor
- * @chan: Driver specific DMA channel
- * @desc: DMA transaction descriptor
- */
- static void
- xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc)
- {
- struct xilinx_vdma_tx_segment *segment, *next;
- struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
- struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
- if (!desc)
- return;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- list_for_each_entry_safe(segment, next, &desc->segments, node) {
- list_del(&segment->node);
- xilinx_vdma_free_tx_segment(chan, segment);
- }
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- list_for_each_entry_safe(cdma_segment, cdma_next,
- &desc->segments, node) {
- list_del(&cdma_segment->node);
- xilinx_cdma_free_tx_segment(chan, cdma_segment);
- }
- } else {
- list_for_each_entry_safe(axidma_segment, axidma_next,
- &desc->segments, node) {
- list_del(&axidma_segment->node);
- xilinx_dma_free_tx_segment(chan, axidma_segment);
- }
- }
- kfree(desc);
- }
- /* Required functions */
- /**
- * xilinx_dma_free_desc_list - Free descriptors list
- * @chan: Driver specific DMA channel
- * @list: List to parse and delete the descriptor
- */
- static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
- struct list_head *list)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- list_for_each_entry_safe(desc, next, list, node) {
- list_del(&desc->node);
- xilinx_dma_free_tx_descriptor(chan, desc);
- }
- }
- /**
- * xilinx_dma_free_descriptors - Free channel descriptors
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
- {
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- xilinx_dma_free_desc_list(chan, &chan->pending_list);
- xilinx_dma_free_desc_list(chan, &chan->done_list);
- xilinx_dma_free_desc_list(chan, &chan->active_list);
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_free_chan_resources - Free channel resources
- * @dchan: DMA channel
- */
- static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- dev_dbg(chan->dev, "Free all channel resources.\n");
- xilinx_dma_free_descriptors(chan);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
- }
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
- }
- /**
- * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- * @flags: flags for spin lock
- */
- static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc,
- unsigned long *flags)
- {
- dma_async_tx_callback callback;
- void *callback_param;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
- spin_unlock_irqrestore(&chan->lock, *flags);
- callback(callback_param);
- spin_lock_irqsave(&chan->lock, *flags);
- }
- }
- /**
- * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_desc_callback cb;
- if (desc->cyclic) {
- xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
- break;
- }
- /* Remove from the list of running transactions */
- list_del(&desc->node);
- /* Run the link descriptor callback function */
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, flags);
- }
- /* Run any dependencies, then free the descriptor */
- dma_run_dependencies(&desc->async_tx);
- xilinx_dma_free_tx_descriptor(chan, desc);
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the Xilinx DMA channel structure
- */
- static void xilinx_dma_do_tasklet(unsigned long data)
- {
- struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
- xilinx_dma_chan_desc_cleanup(chan);
- }
- /**
- * xilinx_dma_alloc_chan_resources - Allocate channel resources
- * @dchan: DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- /* Has this channel already been allocated? */
- if (chan->desc_pool)
- return 0;
- /*
- * We need the descriptor to be aligned to 64bytes
- * for meeting Xilinx VDMA specification requirement.
- */
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_axidma_tx_segment),
- __alignof__(struct xilinx_axidma_tx_segment),
- 0);
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_cdma_tx_segment),
- __alignof__(struct xilinx_cdma_tx_segment),
- 0);
- } else {
- chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_vdma_tx_segment),
- __alignof__(struct xilinx_vdma_tx_segment),
- 0);
- }
- if (!chan->desc_pool) {
- dev_err(chan->dev,
- "unable to allocate channel %d descriptor pool\n",
- chan->id);
- return -ENOMEM;
- }
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- /*
- * For AXI DMA case after submitting a pending_list, keep
- * an extra segment allocated so that the "next descriptor"
- * pointer on the tail descriptor always points to a
- * valid descriptor, even when paused after reaching taildesc.
- * This way, it is possible to issue additional
- * transfers without halting and restarting the channel.
- */
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
- /*
- * For cyclic DMA mode we need to program the tail Descriptor
- * register with a value which is not a part of the BD chain
- * so allocating a desc segment during channel allocation for
- * programming tail descriptor.
- */
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
- }
- dma_cookie_init(dchan);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- /* For AXI DMA resetting once channel will reset the
- * other channel as well so enable the interrupts here.
- */
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- }
- if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_CDMA_CR_SGMODE);
- return 0;
- }
- /**
- * xilinx_dma_tx_status - Get DMA transaction status
- * @dchan: DMA channel
- * @cookie: Transaction identifier
- * @txstate: Transaction state
- *
- * Return: DMA transaction status
- */
- static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
- enum dma_status ret;
- unsigned long flags;
- u32 residue = 0;
- ret = dma_cookie_status(dchan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
- return ret;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->has_sg) {
- list_for_each_entry(segment, &desc->segments, node) {
- hw = &segment->hw;
- residue += (hw->control - hw->status) &
- XILINX_DMA_MAX_TRANS_LEN;
- }
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- chan->residue = residue;
- dma_set_residue(txstate, chan->residue);
- }
- return ret;
- }
- /**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
- static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
- {
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_HALTED) &&
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
- XILINX_DMA_DMACR_RUNSTOP);
- }
- /**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
- static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
- {
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_IDLE;
- }
- /**
- * xilinx_dma_halt - Halt DMA channel
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
- {
- int err;
- u32 val;
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
- /* Wait for the hardware to halt */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- (val & XILINX_DMA_DMASR_HALTED), 0,
- XILINX_DMA_LOOP_COUNT);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
- }
- }
- /**
- * xilinx_dma_start - Start DMA channel
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_start(struct xilinx_dma_chan *chan)
- {
- int err;
- u32 val;
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
- /* Wait for the hardware to start */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- !(val & XILINX_DMA_DMASR_HALTED), 0,
- XILINX_DMA_LOOP_COUNT);
- if (err) {
- dev_err(chan->dev, "Cannot start channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
- }
- }
- /**
- * xilinx_vdma_start_transfer - Starts VDMA transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_dma_tx_descriptor *desc, *tail_desc;
- u32 reg;
- struct xilinx_vdma_tx_segment *tail_segment;
- /* This function was invoked with lock held */
- if (chan->err)
- return;
- if (list_empty(&chan->pending_list))
- return;
- desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
- /*
- * If hardware is idle, then all descriptors on the running lists are
- * done, start new transfers
- */
- if (chan->has_sg)
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- desc->async_tx.phys);
- /* Configure the hardware using info in the config structure */
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- if (config->frm_cnt_en)
- reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
- else
- reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* Configure channel to allow number frame buffers */
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
- chan->desc_pendingcount);
- /*
- * With SG, start with circular mode, so that BDs can be fetched.
- * In direct register mode, if not parking, enable circular mode
- */
- if (chan->has_sg || !config->park)
- reg |= XILINX_DMA_DMACR_CIRC_EN;
- if (config->park)
- reg &= ~XILINX_DMA_DMACR_CIRC_EN;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- if (config->park && (config->park_frm >= 0) &&
- (config->park_frm < chan->num_frms)) {
- if (chan->direction == DMA_MEM_TO_DEV)
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
- else
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
- }
- /* Start the hardware */
- xilinx_dma_start(chan);
- if (chan->err)
- return;
- /* Start the transfer */
- if (chan->has_sg) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- struct xilinx_vdma_tx_segment *segment, *last = NULL;
- int i = 0;
- if (chan->desc_submitcount < chan->num_frms)
- i = chan->desc_submitcount;
- list_for_each_entry(segment, &desc->segments, node) {
- if (chan->ext_addr)
- vdma_desc_write_64(chan,
- XILINX_VDMA_REG_START_ADDRESS_64(i++),
- segment->hw.buf_addr,
- segment->hw.buf_addr_msb);
- else
- vdma_desc_write(chan,
- XILINX_VDMA_REG_START_ADDRESS(i++),
- segment->hw.buf_addr);
- last = segment;
- }
- if (!last)
- return;
- /* HW expects these parameters to be same for one transaction */
- vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
- vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
- last->hw.stride);
- vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- }
- if (!chan->has_sg) {
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
- chan->desc_submitcount++;
- chan->desc_pendingcount--;
- if (chan->desc_submitcount == chan->num_frms)
- chan->desc_submitcount = 0;
- } else {
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- }
- }
- /**
- * xilinx_cdma_start_transfer - Starts cdma transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_cdma_tx_segment *tail_segment;
- u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
- if (chan->err)
- return;
- if (list_empty(&chan->pending_list))
- return;
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_cdma_tx_segment, node);
- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
- ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
- ctrl_reg |= chan->desc_pendingcount <<
- XILINX_DMA_CR_COALESCE_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
- }
- if (chan->has_sg) {
- xilinx_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- /* Update tail ptr register which will start the transfer */
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- /* In simple mode */
- struct xilinx_cdma_tx_segment *segment;
- struct xilinx_cdma_desc_hw *hw;
- segment = list_first_entry(&head_desc->segments,
- struct xilinx_cdma_tx_segment,
- node);
- hw = &segment->hw;
- xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
- xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
- /* Start the transfer */
- dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
- }
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- }
- /**
- * xilinx_dma_start_transfer - Starts DMA transfer
- * @chan: Driver specific channel struct pointer
- */
- static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
- u32 reg;
- if (chan->err)
- return;
- if (list_empty(&chan->pending_list))
- return;
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- if (chan->has_sg && !chan->xdev->mcdma) {
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
- }
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
- reg &= ~XILINX_DMA_CR_COALESCE_MAX;
- reg |= chan->desc_pendingcount <<
- XILINX_DMA_CR_COALESCE_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- }
- if (chan->has_sg && !chan->xdev->mcdma)
- xilinx_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
- xilinx_dma_start(chan);
- if (chan->err)
- return;
- /* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
- if (chan->cyclic)
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- chan->cyclic_seg_v->phys);
- else
- xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
- } else {
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
- segment = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- hw = &segment->hw;
- xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
- /* Start the transfer */
- dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
- hw->control & XILINX_DMA_MAX_TRANS_LEN);
- }
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
- }
- /**
- * xilinx_dma_issue_pending - Issue pending transactions
- * @dchan: DMA channel
- */
- static void xilinx_dma_issue_pending(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- chan->start_transfer(chan);
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
- * @chan : xilinx DMA channel
- *
- * CONTEXT: hardirq
- */
- static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_tx_descriptor *desc, *next;
- /* This function was invoked with lock held */
- if (list_empty(&chan->active_list))
- return;
- list_for_each_entry_safe(desc, next, &chan->active_list, node) {
- list_del(&desc->node);
- if (!desc->cyclic)
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
- }
- }
- /**
- * xilinx_dma_reset - Reset DMA channel
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
- {
- int err;
- u32 tmp;
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
- /* Wait for the hardware to finish reset */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
- !(tmp & XILINX_DMA_DMACR_RESET), 0,
- XILINX_DMA_LOOP_COUNT);
- if (err) {
- dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
- dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
- dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- return -ETIMEDOUT;
- }
- chan->err = false;
- return err;
- }
- /**
- * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
- * @chan: Driver specific DMA channel
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
- {
- int err;
- /* Reset VDMA */
- err = xilinx_dma_reset(chan);
- if (err)
- return err;
- /* Enable interrupts */
- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- return 0;
- }
- /**
- * xilinx_dma_irq_handler - DMA Interrupt handler
- * @irq: IRQ number
- * @data: Pointer to the Xilinx DMA channel structure
- *
- * Return: IRQ_HANDLED/IRQ_NONE
- */
- static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
- {
- struct xilinx_dma_chan *chan = data;
- u32 status;
- /* Read the status and ack the interrupts. */
- status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
- if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
- return IRQ_NONE;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
- status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- if (status & XILINX_DMA_DMASR_ERR_IRQ) {
- /*
- * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
- * error is recoverable, ignore it. Otherwise flag the error.
- *
- * Only recoverable errors can be cleared in the DMASR register,
- * make sure not to write to other error bits to 1.
- */
- u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
- errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
- if (!chan->flush_on_fsync ||
- (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
- dev_err(chan->dev,
- "Channel %p has errors %x, cdr %x tdr %x\n",
- chan, errors,
- dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
- dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
- chan->err = true;
- }
- }
- if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
- if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
- spin_lock(&chan->lock);
- xilinx_dma_complete_descriptor(chan);
- chan->start_transfer(chan);
- spin_unlock(&chan->lock);
- }
- tasklet_schedule(&chan->tasklet);
- return IRQ_HANDLED;
- }
- /**
- * append_desc_queue - Queuing descriptor
- * @chan: Driver specific dma channel
- * @desc: dma transaction descriptor
- */
- static void append_desc_queue(struct xilinx_dma_chan *chan,
- struct xilinx_dma_tx_descriptor *desc)
- {
- struct xilinx_vdma_tx_segment *tail_segment;
- struct xilinx_dma_tx_descriptor *tail_desc;
- struct xilinx_axidma_tx_segment *axidma_tail_segment;
- struct xilinx_cdma_tx_segment *cdma_tail_segment;
- if (list_empty(&chan->pending_list))
- goto append;
- /*
- * Add the hardware descriptor to the chain of hardware descriptors
- * that already exists in memory.
- */
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment,
- node);
- tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- cdma_tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_cdma_tx_segment,
- node);
- cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- } else {
- axidma_tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- }
- /*
- * Add the software descriptor and all children to the list
- * of pending transactions
- */
- append:
- list_add_tail(&desc->node, &chan->pending_list);
- chan->desc_pendingcount++;
- if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
- && unlikely(chan->desc_pendingcount > chan->num_frms)) {
- dev_dbg(chan->dev, "desc pendingcount is too high\n");
- chan->desc_pendingcount = chan->num_frms;
- }
- }
- /**
- * xilinx_dma_tx_submit - Submit DMA transaction
- * @tx: Async transaction descriptor
- *
- * Return: cookie value on success and failure value on error
- */
- static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
- {
- struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
- struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
- dma_cookie_t cookie;
- unsigned long flags;
- int err;
- if (chan->cyclic) {
- xilinx_dma_free_tx_descriptor(chan, desc);
- return -EBUSY;
- }
- if (chan->err) {
- /*
- * If reset fails, need to hard reset the system.
- * Channel is no longer functional
- */
- err = xilinx_dma_chan_reset(chan);
- if (err < 0)
- return err;
- }
- spin_lock_irqsave(&chan->lock, flags);
- cookie = dma_cookie_assign(tx);
- /* Put this transaction onto the tail of the pending queue */
- append_desc_queue(chan, desc);
- if (desc->cyclic)
- chan->cyclic = true;
- spin_unlock_irqrestore(&chan->lock, flags);
- return cookie;
- }
- /**
- * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
- struct xilinx_vdma_desc_hw *hw;
- if (!is_slave_direction(xt->dir))
- return NULL;
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
- if (xt->frame_size != 1)
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- async_tx_ack(&desc->async_tx);
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_vdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /* Fill in the hardware descriptor */
- hw = &segment->hw;
- hw->vsize = xt->numf;
- hw->hsize = xt->sgl[0].size;
- hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
- XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
- hw->stride |= chan->config.frm_dly <<
- XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
- if (xt->dir != DMA_MEM_TO_DEV) {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(xt->dst_start);
- hw->buf_addr_msb = upper_32_bits(xt->dst_start);
- } else {
- hw->buf_addr = xt->dst_start;
- }
- } else {
- if (chan->ext_addr) {
- hw->buf_addr = lower_32_bits(xt->src_start);
- hw->buf_addr_msb = upper_32_bits(xt->src_start);
- } else {
- hw->buf_addr = xt->src_start;
- }
- }
- /* Insert the segment into the descriptor segments list. */
- list_add_tail(&segment->node, &desc->segments);
- prev = segment;
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
- * @dchan: DMA channel
- * @dma_dst: destination address
- * @dma_src: source address
- * @len: transfer length
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
- dma_addr_t dma_src, size_t len, unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev;
- struct xilinx_cdma_desc_hw *hw;
- if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
- return NULL;
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
- /* Fill the previous next descriptor with current */
- prev = list_last_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
- /* Insert the segment into the descriptor segments list. */
- list_add_tail(&segment->node, &desc->segments);
- prev = segment;
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @dchan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- * @context: APP words of the descriptor
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
- struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
- u32 *app_w = (u32 *)context;
- struct scatterlist *sg;
- size_t copy;
- size_t sg_used;
- unsigned int i;
- if (!is_slave_direction(direction))
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Build transactions using information in the scatter gather list */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
- struct xilinx_axidma_desc_hw *hw;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
- hw = &segment->hw;
- /* Fill in the descriptor */
- xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
- sg_used, 0);
- hw->control = copy;
- if (chan->direction == DMA_MEM_TO_DEV) {
- if (app_w)
- memcpy(hw->app, app_w, sizeof(u32) *
- XILINX_DMA_NUM_APP_WORDS);
- }
- if (prev)
- prev->hw.next_desc = segment->phys;
- prev = segment;
- sg_used += copy;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (chan->direction == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- */
- static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
- struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
- size_t copy, sg_used;
- unsigned int num_periods;
- int i;
- u32 reg;
- if (!period_len)
- return NULL;
- num_periods = buf_len / period_len;
- if (!num_periods)
- return NULL;
- if (!is_slave_direction(direction))
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- chan->direction = direction;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- for (i = 0; i < num_periods; ++i) {
- sg_used = 0;
- while (sg_used < period_len) {
- struct xilinx_axidma_desc_hw *hw;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = min_t(size_t, period_len - sg_used,
- XILINX_DMA_MAX_TRANS_LEN);
- hw = &segment->hw;
- xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
- period_len * i);
- hw->control = copy;
- if (prev)
- prev->hw.next_desc = segment->phys;
- prev = segment;
- sg_used += copy;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- head_segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = head_segment->phys;
- desc->cyclic = true;
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.next_desc = (u32) head_segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (direction == DMA_MEM_TO_DEV) {
- head_segment->hw.control |= XILINX_DMA_BD_SOP;
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
- static struct dma_async_tx_descriptor *
- xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
- if (!is_slave_direction(xt->dir))
- return NULL;
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
- if (xt->frame_size != 1)
- return NULL;
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
- chan->direction = xt->dir;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
- hw = &segment->hw;
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
- segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
- return &desc->async_tx;
- error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
- }
- /**
- * xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
- */
- static int xilinx_dma_terminate_all(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 reg;
- if (chan->cyclic)
- xilinx_dma_chan_reset(chan);
- /* Halt the DMA engine */
- xilinx_dma_halt(chan);
- /* Remove and free all of the descriptors in the lists */
- xilinx_dma_free_descriptors(chan);
- if (chan->cyclic) {
- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- chan->cyclic = false;
- }
- return 0;
- }
- /**
- * xilinx_dma_channel_set_config - Configure VDMA channel
- * Run-time configuration for Axi VDMA, supports:
- * . halt the channel
- * . configure interrupt coalescing and inter-packet delay threshold
- * . start/stop parking
- * . enable genlock
- *
- * @dchan: DMA channel
- * @cfg: VDMA device configuration pointer
- *
- * Return: '0' on success and failure value on error
- */
- int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
- struct xilinx_vdma_config *cfg)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 dmacr;
- if (cfg->reset)
- return xilinx_dma_chan_reset(chan);
- dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
- chan->config.frm_dly = cfg->frm_dly;
- chan->config.park = cfg->park;
- /* genlock settings */
- chan->config.gen_lock = cfg->gen_lock;
- chan->config.master = cfg->master;
- if (cfg->gen_lock && chan->genlock) {
- dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
- dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
- }
- chan->config.frm_cnt_en = cfg->frm_cnt_en;
- if (cfg->park)
- chan->config.park_frm = cfg->park_frm;
- else
- chan->config.park_frm = -1;
- chan->config.coalesc = cfg->coalesc;
- chan->config.delay = cfg->delay;
- if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
- dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
- chan->config.coalesc = cfg->coalesc;
- }
- if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
- dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
- chan->config.delay = cfg->delay;
- }
- /* FSync Source selection */
- dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
- dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
- return 0;
- }
- EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
- /* -----------------------------------------------------------------------------
- * Probe and remove
- */
- /**
- * xilinx_dma_chan_remove - Per Channel remove function
- * @chan: Driver specific DMA channel
- */
- static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
- {
- /* Disable all interrupts */
- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
- XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- if (chan->irq > 0)
- free_irq(chan->irq, chan);
- tasklet_kill(&chan->tasklet);
- list_del(&chan->common.device_node);
- }
- static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **rx_clk,
- struct clk **sg_clk, struct clk **tmp_clk)
- {
- int err;
- *tmp_clk = NULL;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
- return err;
- }
- *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
- if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
- *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
- if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
- *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
- if (IS_ERR(*sg_clk))
- *sg_clk = NULL;
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
- return err;
- }
- err = clk_prepare_enable(*tx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
- goto err_disable_axiclk;
- }
- err = clk_prepare_enable(*rx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
- goto err_disable_txclk;
- }
- err = clk_prepare_enable(*sg_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
- goto err_disable_rxclk;
- }
- return 0;
- err_disable_rxclk:
- clk_disable_unprepare(*rx_clk);
- err_disable_txclk:
- clk_disable_unprepare(*tx_clk);
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **dev_clk, struct clk **tmp_clk,
- struct clk **tmp1_clk, struct clk **tmp2_clk)
- {
- int err;
- *tmp_clk = NULL;
- *tmp1_clk = NULL;
- *tmp2_clk = NULL;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
- return err;
- }
- *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
- if (IS_ERR(*dev_clk)) {
- err = PTR_ERR(*dev_clk);
- dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
- return err;
- }
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
- return err;
- }
- err = clk_prepare_enable(*dev_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
- goto err_disable_axiclk;
- }
- return 0;
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
- struct clk **tx_clk, struct clk **txs_clk,
- struct clk **rx_clk, struct clk **rxs_clk)
- {
- int err;
- *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
- return err;
- }
- *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
- if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
- *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
- if (IS_ERR(*txs_clk))
- *txs_clk = NULL;
- *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
- if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
- *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
- if (IS_ERR(*rxs_clk))
- *rxs_clk = NULL;
- err = clk_prepare_enable(*axi_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
- return err;
- }
- err = clk_prepare_enable(*tx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
- goto err_disable_axiclk;
- }
- err = clk_prepare_enable(*txs_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
- goto err_disable_txclk;
- }
- err = clk_prepare_enable(*rx_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
- goto err_disable_txsclk;
- }
- err = clk_prepare_enable(*rxs_clk);
- if (err) {
- dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
- goto err_disable_rxclk;
- }
- return 0;
- err_disable_rxclk:
- clk_disable_unprepare(*rx_clk);
- err_disable_txsclk:
- clk_disable_unprepare(*txs_clk);
- err_disable_txclk:
- clk_disable_unprepare(*tx_clk);
- err_disable_axiclk:
- clk_disable_unprepare(*axi_clk);
- return err;
- }
- static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
- {
- clk_disable_unprepare(xdev->rxs_clk);
- clk_disable_unprepare(xdev->rx_clk);
- clk_disable_unprepare(xdev->txs_clk);
- clk_disable_unprepare(xdev->tx_clk);
- clk_disable_unprepare(xdev->axi_clk);
- }
- /**
- * xilinx_dma_chan_probe - Per Channel Probing
- * It get channel features from the device tree entry and
- * initialize special channel handling routines
- *
- * @xdev: Driver specific device structure
- * @node: Device node
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node, int chan_id)
- {
- struct xilinx_dma_chan *chan;
- bool has_dre = false;
- u32 value, width;
- int err;
- /* Allocate and initialize the channel structure */
- chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->dev = xdev->dev;
- chan->xdev = xdev;
- chan->has_sg = xdev->has_sg;
- chan->desc_pendingcount = 0x0;
- chan->ext_addr = xdev->ext_addr;
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->pending_list);
- INIT_LIST_HEAD(&chan->done_list);
- INIT_LIST_HEAD(&chan->active_list);
- /* Retrieve the channel properties from the device tree */
- has_dre = of_property_read_bool(node, "xlnx,include-dre");
- chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
- err = of_property_read_u32(node, "xlnx,datawidth", &value);
- if (err) {
- dev_err(xdev->dev, "missing xlnx,datawidth property\n");
- return err;
- }
- width = value >> 3; /* Convert bits to bytes */
- /* If data width is greater than 8 bytes, DRE is not in hw */
- if (width > 8)
- has_dre = false;
- if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
- if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
- of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
- of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
- chan->direction = DMA_MEM_TO_DEV;
- chan->id = chan_id;
- chan->tdest = chan_id;
- chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
- if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
- chan->flush_on_fsync = true;
- }
- } else if (of_device_is_compatible(node,
- "xlnx,axi-vdma-s2mm-channel") ||
- of_device_is_compatible(node,
- "xlnx,axi-dma-s2mm-channel")) {
- chan->direction = DMA_DEV_TO_MEM;
- chan->id = chan_id;
- chan->tdest = chan_id - xdev->nr_channels;
- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
- if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
- chan->flush_on_fsync = true;
- }
- } else {
- dev_err(xdev->dev, "Invalid channel compatible node\n");
- return -EINVAL;
- }
- /* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, 0);
- err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
- "xilinx-dma-controller", chan);
- if (err) {
- dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
- return err;
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
- chan->start_transfer = xilinx_dma_start_transfer;
- else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
- chan->start_transfer = xilinx_cdma_start_transfer;
- else
- chan->start_transfer = xilinx_vdma_start_transfer;
- /* Initialize the tasklet */
- tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
- (unsigned long)chan);
- /*
- * Initialize the DMA channel and add it to the DMA engine channels
- * list.
- */
- chan->common.device = &xdev->common;
- list_add_tail(&chan->common.device_node, &xdev->common.channels);
- xdev->chan[chan->id] = chan;
- /* Reset the channel */
- err = xilinx_dma_chan_reset(chan);
- if (err < 0) {
- dev_err(xdev->dev, "Reset channel failed\n");
- return err;
- }
- return 0;
- }
- /**
- * xilinx_dma_child_probe - Per child node probe
- * It get number of dma-channels per child node from
- * device-tree and initializes all the channels.
- *
- * @xdev: Driver specific device structure
- * @node: Device node
- *
- * Return: 0 always.
- */
- static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node) {
- int ret, i, nr_channels = 1;
- ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
- dev_warn(xdev->dev, "missing dma-channels property\n");
- for (i = 0; i < nr_channels; i++)
- xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
- xdev->nr_channels += nr_channels;
- return 0;
- }
- /**
- * of_dma_xilinx_xlate - Translation function
- * @dma_spec: Pointer to DMA specifier as found in the device tree
- * @ofdma: Pointer to DMA controller data
- *
- * Return: DMA channel pointer on success and NULL on error
- */
- static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
- {
- struct xilinx_dma_device *xdev = ofdma->of_dma_data;
- int chan_id = dma_spec->args[0];
- if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
- return NULL;
- return dma_get_slave_channel(&xdev->chan[chan_id]->common);
- }
- static const struct xilinx_dma_config axidma_config = {
- .dmatype = XDMA_TYPE_AXIDMA,
- .clk_init = axidma_clk_init,
- };
- static const struct xilinx_dma_config axicdma_config = {
- .dmatype = XDMA_TYPE_CDMA,
- .clk_init = axicdma_clk_init,
- };
- static const struct xilinx_dma_config axivdma_config = {
- .dmatype = XDMA_TYPE_VDMA,
- .clk_init = axivdma_clk_init,
- };
- static const struct of_device_id xilinx_dma_of_ids[] = {
- { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
- { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
- { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
- {}
- };
- MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
- /**
- * xilinx_dma_probe - Driver probe function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: '0' on success and failure value on error
- */
- static int xilinx_dma_probe(struct platform_device *pdev)
- {
- int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
- struct clk **, struct clk **, struct clk **)
- = axivdma_clk_init;
- struct device_node *node = pdev->dev.of_node;
- struct xilinx_dma_device *xdev;
- struct device_node *child, *np = pdev->dev.of_node;
- struct resource *io;
- u32 num_frames, addr_width;
- int i, err;
- /* Allocate and initialize the DMA engine structure */
- xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
- if (!xdev)
- return -ENOMEM;
- xdev->dev = &pdev->dev;
- if (np) {
- const struct of_device_id *match;
- match = of_match_node(xilinx_dma_of_ids, np);
- if (match && match->data) {
- xdev->dma_config = match->data;
- clk_init = xdev->dma_config->clk_init;
- }
- }
- err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
- &xdev->rx_clk, &xdev->rxs_clk);
- if (err)
- return err;
- /* Request and map I/O memory */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
- /* Retrieve the DMA engine properties from the device tree */
- xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- err = of_property_read_u32(node, "xlnx,num-fstores",
- &num_frames);
- if (err < 0) {
- dev_err(xdev->dev,
- "missing xlnx,num-fstores property\n");
- return err;
- }
- err = of_property_read_u32(node, "xlnx,flush-fsync",
- &xdev->flush_on_fsync);
- if (err < 0)
- dev_warn(xdev->dev,
- "missing xlnx,flush-fsync property\n");
- }
- err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
- if (err < 0)
- dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
- if (addr_width > 32)
- xdev->ext_addr = true;
- else
- xdev->ext_addr = false;
- /* Set the dma mask bits */
- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
- /* Initialize the DMA engine */
- xdev->common.dev = &pdev->dev;
- INIT_LIST_HEAD(&xdev->common.channels);
- if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
- dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
- dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
- }
- xdev->common.device_alloc_chan_resources =
- xilinx_dma_alloc_chan_resources;
- xdev->common.device_free_chan_resources =
- xilinx_dma_free_chan_resources;
- xdev->common.device_terminate_all = xilinx_dma_terminate_all;
- xdev->common.device_tx_status = xilinx_dma_tx_status;
- xdev->common.device_issue_pending = xilinx_dma_issue_pending;
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
- xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
- xdev->common.device_prep_dma_cyclic =
- xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
- /* Residue calculation is supported by only AXI DMA */
- xdev->common.residue_granularity =
- DMA_RESIDUE_GRANULARITY_SEGMENT;
- } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
- dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- } else {
- xdev->common.device_prep_interleaved_dma =
- xilinx_vdma_dma_prep_interleaved;
- }
- platform_set_drvdata(pdev, xdev);
- /* Initialize the channels */
- for_each_child_of_node(node, child) {
- err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
- goto disable_clks;
- }
- if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- for (i = 0; i < xdev->nr_channels; i++)
- if (xdev->chan[i])
- xdev->chan[i]->num_frms = num_frames;
- }
- /* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
- err = of_dma_controller_register(node, of_dma_xilinx_xlate,
- xdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Unable to register DMA to DT\n");
- dma_async_device_unregister(&xdev->common);
- goto error;
- }
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
- return 0;
- disable_clks:
- xdma_disable_allclks(xdev);
- error:
- for (i = 0; i < xdev->nr_channels; i++)
- if (xdev->chan[i])
- xilinx_dma_chan_remove(xdev->chan[i]);
- return err;
- }
- /**
- * xilinx_dma_remove - Driver remove function
- * @pdev: Pointer to the platform_device structure
- *
- * Return: Always '0'
- */
- static int xilinx_dma_remove(struct platform_device *pdev)
- {
- struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
- int i;
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&xdev->common);
- for (i = 0; i < xdev->nr_channels; i++)
- if (xdev->chan[i])
- xilinx_dma_chan_remove(xdev->chan[i]);
- xdma_disable_allclks(xdev);
- return 0;
- }
- static struct platform_driver xilinx_vdma_driver = {
- .driver = {
- .name = "xilinx-vdma",
- .of_match_table = xilinx_dma_of_ids,
- },
- .probe = xilinx_dma_probe,
- .remove = xilinx_dma_remove,
- };
- module_platform_driver(xilinx_vdma_driver);
- MODULE_AUTHOR("Xilinx, Inc.");
- MODULE_DESCRIPTION("Xilinx VDMA driver");
- MODULE_LICENSE("GPL v2");
|