sunxi_nand.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302
  1. /*
  2. * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
  3. *
  4. * Derived from:
  5. * https://github.com/yuq/sunxi-nfc-mtd
  6. * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
  7. *
  8. * https://github.com/hno/Allwinner-Info
  9. * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
  10. *
  11. * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
  12. * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_gpio.h>
  32. #include <linux/mtd/mtd.h>
  33. #include <linux/mtd/nand.h>
  34. #include <linux/mtd/partitions.h>
  35. #include <linux/clk.h>
  36. #include <linux/delay.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/gpio.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/iopoll.h>
  41. #include <linux/reset.h>
  42. #define NFC_REG_CTL 0x0000
  43. #define NFC_REG_ST 0x0004
  44. #define NFC_REG_INT 0x0008
  45. #define NFC_REG_TIMING_CTL 0x000C
  46. #define NFC_REG_TIMING_CFG 0x0010
  47. #define NFC_REG_ADDR_LOW 0x0014
  48. #define NFC_REG_ADDR_HIGH 0x0018
  49. #define NFC_REG_SECTOR_NUM 0x001C
  50. #define NFC_REG_CNT 0x0020
  51. #define NFC_REG_CMD 0x0024
  52. #define NFC_REG_RCMD_SET 0x0028
  53. #define NFC_REG_WCMD_SET 0x002C
  54. #define NFC_REG_IO_DATA 0x0030
  55. #define NFC_REG_ECC_CTL 0x0034
  56. #define NFC_REG_ECC_ST 0x0038
  57. #define NFC_REG_DEBUG 0x003C
  58. #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
  59. #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
  60. #define NFC_REG_SPARE_AREA 0x00A0
  61. #define NFC_REG_PAT_ID 0x00A4
  62. #define NFC_RAM0_BASE 0x0400
  63. #define NFC_RAM1_BASE 0x0800
  64. /* define bit use in NFC_CTL */
  65. #define NFC_EN BIT(0)
  66. #define NFC_RESET BIT(1)
  67. #define NFC_BUS_WIDTH_MSK BIT(2)
  68. #define NFC_BUS_WIDTH_8 (0 << 2)
  69. #define NFC_BUS_WIDTH_16 (1 << 2)
  70. #define NFC_RB_SEL_MSK BIT(3)
  71. #define NFC_RB_SEL(x) ((x) << 3)
  72. #define NFC_CE_SEL_MSK GENMASK(26, 24)
  73. #define NFC_CE_SEL(x) ((x) << 24)
  74. #define NFC_CE_CTL BIT(6)
  75. #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
  76. #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
  77. #define NFC_SAM BIT(12)
  78. #define NFC_RAM_METHOD BIT(14)
  79. #define NFC_DEBUG_CTL BIT(31)
  80. /* define bit use in NFC_ST */
  81. #define NFC_RB_B2R BIT(0)
  82. #define NFC_CMD_INT_FLAG BIT(1)
  83. #define NFC_DMA_INT_FLAG BIT(2)
  84. #define NFC_CMD_FIFO_STATUS BIT(3)
  85. #define NFC_STA BIT(4)
  86. #define NFC_NATCH_INT_FLAG BIT(5)
  87. #define NFC_RB_STATE(x) BIT(x + 8)
  88. /* define bit use in NFC_INT */
  89. #define NFC_B2R_INT_ENABLE BIT(0)
  90. #define NFC_CMD_INT_ENABLE BIT(1)
  91. #define NFC_DMA_INT_ENABLE BIT(2)
  92. #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
  93. NFC_CMD_INT_ENABLE | \
  94. NFC_DMA_INT_ENABLE)
  95. /* define bit use in NFC_TIMING_CTL */
  96. #define NFC_TIMING_CTL_EDO BIT(8)
  97. /* define NFC_TIMING_CFG register layout */
  98. #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
  99. (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
  100. (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
  101. (((tCAD) & 0x7) << 8))
  102. /* define bit use in NFC_CMD */
  103. #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
  104. #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
  105. #define NFC_CMD(x) (x)
  106. #define NFC_ADR_NUM_MSK GENMASK(18, 16)
  107. #define NFC_ADR_NUM(x) (((x) - 1) << 16)
  108. #define NFC_SEND_ADR BIT(19)
  109. #define NFC_ACCESS_DIR BIT(20)
  110. #define NFC_DATA_TRANS BIT(21)
  111. #define NFC_SEND_CMD1 BIT(22)
  112. #define NFC_WAIT_FLAG BIT(23)
  113. #define NFC_SEND_CMD2 BIT(24)
  114. #define NFC_SEQ BIT(25)
  115. #define NFC_DATA_SWAP_METHOD BIT(26)
  116. #define NFC_ROW_AUTO_INC BIT(27)
  117. #define NFC_SEND_CMD3 BIT(28)
  118. #define NFC_SEND_CMD4 BIT(29)
  119. #define NFC_CMD_TYPE_MSK GENMASK(31, 30)
  120. #define NFC_NORMAL_OP (0 << 30)
  121. #define NFC_ECC_OP (1 << 30)
  122. #define NFC_PAGE_OP (2 << 30)
  123. /* define bit use in NFC_RCMD_SET */
  124. #define NFC_READ_CMD_MSK GENMASK(7, 0)
  125. #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
  126. #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
  127. /* define bit use in NFC_WCMD_SET */
  128. #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
  129. #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
  130. #define NFC_READ_CMD0_MSK GENMASK(23, 16)
  131. #define NFC_READ_CMD1_MSK GENMASK(31, 24)
  132. /* define bit use in NFC_ECC_CTL */
  133. #define NFC_ECC_EN BIT(0)
  134. #define NFC_ECC_PIPELINE BIT(3)
  135. #define NFC_ECC_EXCEPTION BIT(4)
  136. #define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
  137. #define NFC_RANDOM_EN BIT(9)
  138. #define NFC_RANDOM_DIRECTION BIT(10)
  139. #define NFC_ECC_MODE_MSK GENMASK(15, 12)
  140. #define NFC_ECC_MODE(x) ((x) << 12)
  141. #define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
  142. #define NFC_RANDOM_SEED(x) ((x) << 16)
  143. /* define bit use in NFC_ECC_ST */
  144. #define NFC_ECC_ERR(x) BIT(x)
  145. #define NFC_ECC_ERR_MSK GENMASK(15, 0)
  146. #define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
  147. #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
  148. #define NFC_DEFAULT_TIMEOUT_MS 1000
  149. #define NFC_SRAM_SIZE 1024
  150. #define NFC_MAX_CS 7
  151. /*
  152. * Ready/Busy detection type: describes the Ready/Busy detection modes
  153. *
  154. * @RB_NONE: no external detection available, rely on STATUS command
  155. * and software timeouts
  156. * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
  157. * pin of the NAND flash chip must be connected to one of the
  158. * native NAND R/B pins (those which can be muxed to the NAND
  159. * Controller)
  160. * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
  161. * pin of the NAND flash chip must be connected to a GPIO capable
  162. * pin.
  163. */
  164. enum sunxi_nand_rb_type {
  165. RB_NONE,
  166. RB_NATIVE,
  167. RB_GPIO,
  168. };
  169. /*
  170. * Ready/Busy structure: stores information related to Ready/Busy detection
  171. *
  172. * @type: the Ready/Busy detection mode
  173. * @info: information related to the R/B detection mode. Either a gpio
  174. * id or a native R/B id (those supported by the NAND controller).
  175. */
  176. struct sunxi_nand_rb {
  177. enum sunxi_nand_rb_type type;
  178. union {
  179. int gpio;
  180. int nativeid;
  181. } info;
  182. };
  183. /*
  184. * Chip Select structure: stores information related to NAND Chip Select
  185. *
  186. * @cs: the NAND CS id used to communicate with a NAND Chip
  187. * @rb: the Ready/Busy description
  188. */
  189. struct sunxi_nand_chip_sel {
  190. u8 cs;
  191. struct sunxi_nand_rb rb;
  192. };
  193. /*
  194. * sunxi HW ECC infos: stores information related to HW ECC support
  195. *
  196. * @mode: the sunxi ECC mode field deduced from ECC requirements
  197. */
  198. struct sunxi_nand_hw_ecc {
  199. int mode;
  200. };
  201. /*
  202. * NAND chip structure: stores NAND chip device related information
  203. *
  204. * @node: used to store NAND chips into a list
  205. * @nand: base NAND chip structure
  206. * @mtd: base MTD structure
  207. * @clk_rate: clk_rate required for this NAND chip
  208. * @timing_cfg TIMING_CFG register value for this NAND chip
  209. * @selected: current active CS
  210. * @nsels: number of CS lines required by the NAND chip
  211. * @sels: array of CS lines descriptions
  212. */
  213. struct sunxi_nand_chip {
  214. struct list_head node;
  215. struct nand_chip nand;
  216. unsigned long clk_rate;
  217. u32 timing_cfg;
  218. u32 timing_ctl;
  219. int selected;
  220. int addr_cycles;
  221. u32 addr[2];
  222. int cmd_cycles;
  223. u8 cmd[2];
  224. int nsels;
  225. struct sunxi_nand_chip_sel sels[0];
  226. };
  227. static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
  228. {
  229. return container_of(nand, struct sunxi_nand_chip, nand);
  230. }
  231. /*
  232. * NAND Controller structure: stores sunxi NAND controller information
  233. *
  234. * @controller: base controller structure
  235. * @dev: parent device (used to print error messages)
  236. * @regs: NAND controller registers
  237. * @ahb_clk: NAND Controller AHB clock
  238. * @mod_clk: NAND Controller mod clock
  239. * @assigned_cs: bitmask describing already assigned CS lines
  240. * @clk_rate: NAND controller current clock rate
  241. * @chips: a list containing all the NAND chips attached to
  242. * this NAND controller
  243. * @complete: a completion object used to wait for NAND
  244. * controller events
  245. */
  246. struct sunxi_nfc {
  247. struct nand_hw_control controller;
  248. struct device *dev;
  249. void __iomem *regs;
  250. struct clk *ahb_clk;
  251. struct clk *mod_clk;
  252. struct reset_control *reset;
  253. unsigned long assigned_cs;
  254. unsigned long clk_rate;
  255. struct list_head chips;
  256. struct completion complete;
  257. struct dma_chan *dmac;
  258. };
  259. static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
  260. {
  261. return container_of(ctrl, struct sunxi_nfc, controller);
  262. }
  263. static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
  264. {
  265. struct sunxi_nfc *nfc = dev_id;
  266. u32 st = readl(nfc->regs + NFC_REG_ST);
  267. u32 ien = readl(nfc->regs + NFC_REG_INT);
  268. if (!(ien & st))
  269. return IRQ_NONE;
  270. if ((ien & st) == ien)
  271. complete(&nfc->complete);
  272. writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  273. writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
  274. return IRQ_HANDLED;
  275. }
  276. static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
  277. bool use_polling, unsigned int timeout_ms)
  278. {
  279. int ret;
  280. if (events & ~NFC_INT_MASK)
  281. return -EINVAL;
  282. if (!timeout_ms)
  283. timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
  284. if (!use_polling) {
  285. init_completion(&nfc->complete);
  286. writel(events, nfc->regs + NFC_REG_INT);
  287. ret = wait_for_completion_timeout(&nfc->complete,
  288. msecs_to_jiffies(timeout_ms));
  289. if (!ret)
  290. ret = -ETIMEDOUT;
  291. else
  292. ret = 0;
  293. writel(0, nfc->regs + NFC_REG_INT);
  294. } else {
  295. u32 status;
  296. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  297. (status & events) == events, 1,
  298. timeout_ms * 1000);
  299. }
  300. writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  301. if (ret)
  302. dev_err(nfc->dev, "wait interrupt timedout\n");
  303. return ret;
  304. }
  305. static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
  306. {
  307. u32 status;
  308. int ret;
  309. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  310. !(status & NFC_CMD_FIFO_STATUS), 1,
  311. NFC_DEFAULT_TIMEOUT_MS * 1000);
  312. if (ret)
  313. dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
  314. return ret;
  315. }
  316. static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
  317. {
  318. u32 ctl;
  319. int ret;
  320. writel(0, nfc->regs + NFC_REG_ECC_CTL);
  321. writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
  322. ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
  323. !(ctl & NFC_RESET), 1,
  324. NFC_DEFAULT_TIMEOUT_MS * 1000);
  325. if (ret)
  326. dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
  327. return ret;
  328. }
  329. static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
  330. int chunksize, int nchunks,
  331. enum dma_data_direction ddir,
  332. struct scatterlist *sg)
  333. {
  334. struct nand_chip *nand = mtd_to_nand(mtd);
  335. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  336. struct dma_async_tx_descriptor *dmad;
  337. enum dma_transfer_direction tdir;
  338. dma_cookie_t dmat;
  339. int ret;
  340. if (ddir == DMA_FROM_DEVICE)
  341. tdir = DMA_DEV_TO_MEM;
  342. else
  343. tdir = DMA_MEM_TO_DEV;
  344. sg_init_one(sg, buf, nchunks * chunksize);
  345. ret = dma_map_sg(nfc->dev, sg, 1, ddir);
  346. if (!ret)
  347. return -ENOMEM;
  348. dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
  349. if (!dmad) {
  350. ret = -EINVAL;
  351. goto err_unmap_buf;
  352. }
  353. writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
  354. nfc->regs + NFC_REG_CTL);
  355. writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
  356. writel(chunksize, nfc->regs + NFC_REG_CNT);
  357. dmat = dmaengine_submit(dmad);
  358. ret = dma_submit_error(dmat);
  359. if (ret)
  360. goto err_clr_dma_flag;
  361. return 0;
  362. err_clr_dma_flag:
  363. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  364. nfc->regs + NFC_REG_CTL);
  365. err_unmap_buf:
  366. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  367. return ret;
  368. }
  369. static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
  370. enum dma_data_direction ddir,
  371. struct scatterlist *sg)
  372. {
  373. struct nand_chip *nand = mtd_to_nand(mtd);
  374. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  375. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  376. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  377. nfc->regs + NFC_REG_CTL);
  378. }
  379. static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
  380. {
  381. struct nand_chip *nand = mtd_to_nand(mtd);
  382. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  383. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  384. struct sunxi_nand_rb *rb;
  385. int ret;
  386. if (sunxi_nand->selected < 0)
  387. return 0;
  388. rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
  389. switch (rb->type) {
  390. case RB_NATIVE:
  391. ret = !!(readl(nfc->regs + NFC_REG_ST) &
  392. NFC_RB_STATE(rb->info.nativeid));
  393. break;
  394. case RB_GPIO:
  395. ret = gpio_get_value(rb->info.gpio);
  396. break;
  397. case RB_NONE:
  398. default:
  399. ret = 0;
  400. dev_err(nfc->dev, "cannot check R/B NAND status!\n");
  401. break;
  402. }
  403. return ret;
  404. }
  405. static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
  406. {
  407. struct nand_chip *nand = mtd_to_nand(mtd);
  408. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  409. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  410. struct sunxi_nand_chip_sel *sel;
  411. u32 ctl;
  412. if (chip > 0 && chip >= sunxi_nand->nsels)
  413. return;
  414. if (chip == sunxi_nand->selected)
  415. return;
  416. ctl = readl(nfc->regs + NFC_REG_CTL) &
  417. ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
  418. if (chip >= 0) {
  419. sel = &sunxi_nand->sels[chip];
  420. ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
  421. NFC_PAGE_SHIFT(nand->page_shift);
  422. if (sel->rb.type == RB_NONE) {
  423. nand->dev_ready = NULL;
  424. } else {
  425. nand->dev_ready = sunxi_nfc_dev_ready;
  426. if (sel->rb.type == RB_NATIVE)
  427. ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
  428. }
  429. writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
  430. if (nfc->clk_rate != sunxi_nand->clk_rate) {
  431. clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
  432. nfc->clk_rate = sunxi_nand->clk_rate;
  433. }
  434. }
  435. writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
  436. writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
  437. writel(ctl, nfc->regs + NFC_REG_CTL);
  438. sunxi_nand->selected = chip;
  439. }
  440. static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  441. {
  442. struct nand_chip *nand = mtd_to_nand(mtd);
  443. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  444. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  445. int ret;
  446. int cnt;
  447. int offs = 0;
  448. u32 tmp;
  449. while (len > offs) {
  450. cnt = min(len - offs, NFC_SRAM_SIZE);
  451. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  452. if (ret)
  453. break;
  454. writel(cnt, nfc->regs + NFC_REG_CNT);
  455. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
  456. writel(tmp, nfc->regs + NFC_REG_CMD);
  457. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  458. if (ret)
  459. break;
  460. if (buf)
  461. memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
  462. cnt);
  463. offs += cnt;
  464. }
  465. }
  466. static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  467. int len)
  468. {
  469. struct nand_chip *nand = mtd_to_nand(mtd);
  470. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  471. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  472. int ret;
  473. int cnt;
  474. int offs = 0;
  475. u32 tmp;
  476. while (len > offs) {
  477. cnt = min(len - offs, NFC_SRAM_SIZE);
  478. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  479. if (ret)
  480. break;
  481. writel(cnt, nfc->regs + NFC_REG_CNT);
  482. memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
  483. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  484. NFC_ACCESS_DIR;
  485. writel(tmp, nfc->regs + NFC_REG_CMD);
  486. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  487. if (ret)
  488. break;
  489. offs += cnt;
  490. }
  491. }
  492. static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
  493. {
  494. uint8_t ret;
  495. sunxi_nfc_read_buf(mtd, &ret, 1);
  496. return ret;
  497. }
  498. static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
  499. unsigned int ctrl)
  500. {
  501. struct nand_chip *nand = mtd_to_nand(mtd);
  502. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  503. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  504. int ret;
  505. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  506. if (ret)
  507. return;
  508. if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
  509. !(ctrl & (NAND_CLE | NAND_ALE))) {
  510. u32 cmd = 0;
  511. if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
  512. return;
  513. if (sunxi_nand->cmd_cycles--)
  514. cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
  515. if (sunxi_nand->cmd_cycles--) {
  516. cmd |= NFC_SEND_CMD2;
  517. writel(sunxi_nand->cmd[1],
  518. nfc->regs + NFC_REG_RCMD_SET);
  519. }
  520. sunxi_nand->cmd_cycles = 0;
  521. if (sunxi_nand->addr_cycles) {
  522. cmd |= NFC_SEND_ADR |
  523. NFC_ADR_NUM(sunxi_nand->addr_cycles);
  524. writel(sunxi_nand->addr[0],
  525. nfc->regs + NFC_REG_ADDR_LOW);
  526. }
  527. if (sunxi_nand->addr_cycles > 4)
  528. writel(sunxi_nand->addr[1],
  529. nfc->regs + NFC_REG_ADDR_HIGH);
  530. writel(cmd, nfc->regs + NFC_REG_CMD);
  531. sunxi_nand->addr[0] = 0;
  532. sunxi_nand->addr[1] = 0;
  533. sunxi_nand->addr_cycles = 0;
  534. sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  535. }
  536. if (ctrl & NAND_CLE) {
  537. sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
  538. } else if (ctrl & NAND_ALE) {
  539. sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
  540. dat << ((sunxi_nand->addr_cycles % 4) * 8);
  541. sunxi_nand->addr_cycles++;
  542. }
  543. }
  544. /* These seed values have been extracted from Allwinner's BSP */
  545. static const u16 sunxi_nfc_randomizer_page_seeds[] = {
  546. 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
  547. 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
  548. 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
  549. 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
  550. 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
  551. 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
  552. 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
  553. 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
  554. 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
  555. 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
  556. 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
  557. 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
  558. 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
  559. 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
  560. 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
  561. 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
  562. };
  563. /*
  564. * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
  565. * have been generated using
  566. * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
  567. * the randomizer engine does internally before de/scrambling OOB data.
  568. *
  569. * Those tables are statically defined to avoid calculating randomizer state
  570. * at runtime.
  571. */
  572. static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
  573. 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
  574. 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
  575. 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
  576. 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
  577. 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
  578. 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
  579. 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
  580. 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
  581. 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
  582. 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
  583. 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
  584. 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
  585. 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
  586. 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
  587. 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
  588. 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
  589. };
  590. static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
  591. 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
  592. 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
  593. 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
  594. 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
  595. 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
  596. 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
  597. 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
  598. 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
  599. 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
  600. 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
  601. 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
  602. 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
  603. 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
  604. 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
  605. 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
  606. 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
  607. };
  608. static u16 sunxi_nfc_randomizer_step(u16 state, int count)
  609. {
  610. state &= 0x7fff;
  611. /*
  612. * This loop is just a simple implementation of a Fibonacci LFSR using
  613. * the x16 + x15 + 1 polynomial.
  614. */
  615. while (count--)
  616. state = ((state >> 1) |
  617. (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
  618. return state;
  619. }
  620. static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
  621. {
  622. const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
  623. int mod = mtd_div_by_ws(mtd->erasesize, mtd);
  624. if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
  625. mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
  626. if (ecc) {
  627. if (mtd->ecc_step_size == 512)
  628. seeds = sunxi_nfc_randomizer_ecc512_seeds;
  629. else
  630. seeds = sunxi_nfc_randomizer_ecc1024_seeds;
  631. }
  632. return seeds[page % mod];
  633. }
  634. static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
  635. int page, bool ecc)
  636. {
  637. struct nand_chip *nand = mtd_to_nand(mtd);
  638. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  639. u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  640. u16 state;
  641. if (!(nand->options & NAND_NEED_SCRAMBLING))
  642. return;
  643. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  644. state = sunxi_nfc_randomizer_state(mtd, page, ecc);
  645. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
  646. writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
  647. }
  648. static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
  649. {
  650. struct nand_chip *nand = mtd_to_nand(mtd);
  651. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  652. if (!(nand->options & NAND_NEED_SCRAMBLING))
  653. return;
  654. writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
  655. nfc->regs + NFC_REG_ECC_CTL);
  656. }
  657. static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
  658. {
  659. struct nand_chip *nand = mtd_to_nand(mtd);
  660. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  661. if (!(nand->options & NAND_NEED_SCRAMBLING))
  662. return;
  663. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
  664. nfc->regs + NFC_REG_ECC_CTL);
  665. }
  666. static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
  667. {
  668. u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
  669. bbm[0] ^= state;
  670. bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
  671. }
  672. static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
  673. const uint8_t *buf, int len,
  674. bool ecc, int page)
  675. {
  676. sunxi_nfc_randomizer_config(mtd, page, ecc);
  677. sunxi_nfc_randomizer_enable(mtd);
  678. sunxi_nfc_write_buf(mtd, buf, len);
  679. sunxi_nfc_randomizer_disable(mtd);
  680. }
  681. static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
  682. int len, bool ecc, int page)
  683. {
  684. sunxi_nfc_randomizer_config(mtd, page, ecc);
  685. sunxi_nfc_randomizer_enable(mtd);
  686. sunxi_nfc_read_buf(mtd, buf, len);
  687. sunxi_nfc_randomizer_disable(mtd);
  688. }
  689. static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
  690. {
  691. struct nand_chip *nand = mtd_to_nand(mtd);
  692. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  693. struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
  694. u32 ecc_ctl;
  695. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  696. ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
  697. NFC_ECC_BLOCK_SIZE_MSK);
  698. ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
  699. NFC_ECC_PIPELINE;
  700. writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
  701. }
  702. static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
  703. {
  704. struct nand_chip *nand = mtd_to_nand(mtd);
  705. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  706. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
  707. nfc->regs + NFC_REG_ECC_CTL);
  708. }
  709. static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
  710. {
  711. buf[0] = user_data;
  712. buf[1] = user_data >> 8;
  713. buf[2] = user_data >> 16;
  714. buf[3] = user_data >> 24;
  715. }
  716. static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
  717. {
  718. return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
  719. }
  720. static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
  721. int step, bool bbm, int page)
  722. {
  723. struct nand_chip *nand = mtd_to_nand(mtd);
  724. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  725. sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
  726. oob);
  727. /* De-randomize the Bad Block Marker. */
  728. if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
  729. sunxi_nfc_randomize_bbm(mtd, page, oob);
  730. }
  731. static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
  732. const u8 *oob, int step,
  733. bool bbm, int page)
  734. {
  735. struct nand_chip *nand = mtd_to_nand(mtd);
  736. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  737. u8 user_data[4];
  738. /* Randomize the Bad Block Marker. */
  739. if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
  740. memcpy(user_data, oob, sizeof(user_data));
  741. sunxi_nfc_randomize_bbm(mtd, page, user_data);
  742. oob = user_data;
  743. }
  744. writel(sunxi_nfc_buf_to_user_data(oob),
  745. nfc->regs + NFC_REG_USER_DATA(step));
  746. }
  747. static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
  748. unsigned int *max_bitflips, int ret)
  749. {
  750. if (ret < 0) {
  751. mtd->ecc_stats.failed++;
  752. } else {
  753. mtd->ecc_stats.corrected += ret;
  754. *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
  755. }
  756. }
  757. static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
  758. int step, u32 status, bool *erased)
  759. {
  760. struct nand_chip *nand = mtd_to_nand(mtd);
  761. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  762. struct nand_ecc_ctrl *ecc = &nand->ecc;
  763. u32 tmp;
  764. *erased = false;
  765. if (status & NFC_ECC_ERR(step))
  766. return -EBADMSG;
  767. if (status & NFC_ECC_PAT_FOUND(step)) {
  768. u8 pattern;
  769. if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
  770. pattern = 0x0;
  771. } else {
  772. pattern = 0xff;
  773. *erased = true;
  774. }
  775. if (data)
  776. memset(data, pattern, ecc->size);
  777. if (oob)
  778. memset(oob, pattern, ecc->bytes + 4);
  779. return 0;
  780. }
  781. tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
  782. return NFC_ECC_ERR_CNT(step, tmp);
  783. }
  784. static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
  785. u8 *data, int data_off,
  786. u8 *oob, int oob_off,
  787. int *cur_off,
  788. unsigned int *max_bitflips,
  789. bool bbm, bool oob_required, int page)
  790. {
  791. struct nand_chip *nand = mtd_to_nand(mtd);
  792. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  793. struct nand_ecc_ctrl *ecc = &nand->ecc;
  794. int raw_mode = 0;
  795. bool erased;
  796. int ret;
  797. if (*cur_off != data_off)
  798. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  799. sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
  800. if (data_off + ecc->size != oob_off)
  801. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  802. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  803. if (ret)
  804. return ret;
  805. sunxi_nfc_randomizer_enable(mtd);
  806. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
  807. nfc->regs + NFC_REG_CMD);
  808. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  809. sunxi_nfc_randomizer_disable(mtd);
  810. if (ret)
  811. return ret;
  812. *cur_off = oob_off + ecc->bytes + 4;
  813. ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
  814. readl(nfc->regs + NFC_REG_ECC_ST),
  815. &erased);
  816. if (erased)
  817. return 1;
  818. if (ret < 0) {
  819. /*
  820. * Re-read the data with the randomizer disabled to identify
  821. * bitflips in erased pages.
  822. */
  823. if (nand->options & NAND_NEED_SCRAMBLING) {
  824. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  825. nand->read_buf(mtd, data, ecc->size);
  826. } else {
  827. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
  828. ecc->size);
  829. }
  830. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  831. nand->read_buf(mtd, oob, ecc->bytes + 4);
  832. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  833. oob, ecc->bytes + 4,
  834. NULL, 0, ecc->strength);
  835. if (ret >= 0)
  836. raw_mode = 1;
  837. } else {
  838. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
  839. if (oob_required) {
  840. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  841. sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
  842. true, page);
  843. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
  844. bbm, page);
  845. }
  846. }
  847. sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
  848. return raw_mode;
  849. }
  850. static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
  851. u8 *oob, int *cur_off,
  852. bool randomize, int page)
  853. {
  854. struct nand_chip *nand = mtd_to_nand(mtd);
  855. struct nand_ecc_ctrl *ecc = &nand->ecc;
  856. int offset = ((ecc->bytes + 4) * ecc->steps);
  857. int len = mtd->oobsize - offset;
  858. if (len <= 0)
  859. return;
  860. if (!cur_off || *cur_off != offset)
  861. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  862. offset + mtd->writesize, -1);
  863. if (!randomize)
  864. sunxi_nfc_read_buf(mtd, oob + offset, len);
  865. else
  866. sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
  867. false, page);
  868. if (cur_off)
  869. *cur_off = mtd->oobsize + mtd->writesize;
  870. }
  871. static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
  872. int oob_required, int page,
  873. int nchunks)
  874. {
  875. struct nand_chip *nand = mtd_to_nand(mtd);
  876. bool randomized = nand->options & NAND_NEED_SCRAMBLING;
  877. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  878. struct nand_ecc_ctrl *ecc = &nand->ecc;
  879. unsigned int max_bitflips = 0;
  880. int ret, i, raw_mode = 0;
  881. struct scatterlist sg;
  882. u32 status;
  883. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  884. if (ret)
  885. return ret;
  886. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
  887. DMA_FROM_DEVICE, &sg);
  888. if (ret)
  889. return ret;
  890. sunxi_nfc_hw_ecc_enable(mtd);
  891. sunxi_nfc_randomizer_config(mtd, page, false);
  892. sunxi_nfc_randomizer_enable(mtd);
  893. writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
  894. NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
  895. dma_async_issue_pending(nfc->dmac);
  896. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
  897. nfc->regs + NFC_REG_CMD);
  898. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  899. if (ret)
  900. dmaengine_terminate_all(nfc->dmac);
  901. sunxi_nfc_randomizer_disable(mtd);
  902. sunxi_nfc_hw_ecc_disable(mtd);
  903. sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
  904. if (ret)
  905. return ret;
  906. status = readl(nfc->regs + NFC_REG_ECC_ST);
  907. for (i = 0; i < nchunks; i++) {
  908. int data_off = i * ecc->size;
  909. int oob_off = i * (ecc->bytes + 4);
  910. u8 *data = buf + data_off;
  911. u8 *oob = nand->oob_poi + oob_off;
  912. bool erased;
  913. ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
  914. oob_required ? oob : NULL,
  915. i, status, &erased);
  916. /* ECC errors are handled in the second loop. */
  917. if (ret < 0)
  918. continue;
  919. if (oob_required && !erased) {
  920. /* TODO: use DMA to retrieve OOB */
  921. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  922. mtd->writesize + oob_off, -1);
  923. nand->read_buf(mtd, oob, ecc->bytes + 4);
  924. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
  925. !i, page);
  926. }
  927. if (erased)
  928. raw_mode = 1;
  929. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  930. }
  931. if (status & NFC_ECC_ERR_MSK) {
  932. for (i = 0; i < nchunks; i++) {
  933. int data_off = i * ecc->size;
  934. int oob_off = i * (ecc->bytes + 4);
  935. u8 *data = buf + data_off;
  936. u8 *oob = nand->oob_poi + oob_off;
  937. if (!(status & NFC_ECC_ERR(i)))
  938. continue;
  939. /*
  940. * Re-read the data with the randomizer disabled to
  941. * identify bitflips in erased pages.
  942. */
  943. if (randomized) {
  944. /* TODO: use DMA to read page in raw mode */
  945. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  946. data_off, -1);
  947. nand->read_buf(mtd, data, ecc->size);
  948. }
  949. /* TODO: use DMA to retrieve OOB */
  950. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  951. mtd->writesize + oob_off, -1);
  952. nand->read_buf(mtd, oob, ecc->bytes + 4);
  953. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  954. oob, ecc->bytes + 4,
  955. NULL, 0,
  956. ecc->strength);
  957. if (ret >= 0)
  958. raw_mode = 1;
  959. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  960. }
  961. }
  962. if (oob_required)
  963. sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
  964. NULL, !raw_mode,
  965. page);
  966. return max_bitflips;
  967. }
  968. static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
  969. const u8 *data, int data_off,
  970. const u8 *oob, int oob_off,
  971. int *cur_off, bool bbm,
  972. int page)
  973. {
  974. struct nand_chip *nand = mtd_to_nand(mtd);
  975. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  976. struct nand_ecc_ctrl *ecc = &nand->ecc;
  977. int ret;
  978. if (data_off != *cur_off)
  979. nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
  980. sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
  981. if (data_off + ecc->size != oob_off)
  982. nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
  983. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  984. if (ret)
  985. return ret;
  986. sunxi_nfc_randomizer_enable(mtd);
  987. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
  988. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  989. NFC_ACCESS_DIR | NFC_ECC_OP,
  990. nfc->regs + NFC_REG_CMD);
  991. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  992. sunxi_nfc_randomizer_disable(mtd);
  993. if (ret)
  994. return ret;
  995. *cur_off = oob_off + ecc->bytes + 4;
  996. return 0;
  997. }
  998. static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
  999. u8 *oob, int *cur_off,
  1000. int page)
  1001. {
  1002. struct nand_chip *nand = mtd_to_nand(mtd);
  1003. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1004. int offset = ((ecc->bytes + 4) * ecc->steps);
  1005. int len = mtd->oobsize - offset;
  1006. if (len <= 0)
  1007. return;
  1008. if (!cur_off || *cur_off != offset)
  1009. nand->cmdfunc(mtd, NAND_CMD_RNDIN,
  1010. offset + mtd->writesize, -1);
  1011. sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
  1012. if (cur_off)
  1013. *cur_off = mtd->oobsize + mtd->writesize;
  1014. }
  1015. static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
  1016. struct nand_chip *chip, uint8_t *buf,
  1017. int oob_required, int page)
  1018. {
  1019. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1020. unsigned int max_bitflips = 0;
  1021. int ret, i, cur_off = 0;
  1022. bool raw_mode = false;
  1023. sunxi_nfc_hw_ecc_enable(mtd);
  1024. for (i = 0; i < ecc->steps; i++) {
  1025. int data_off = i * ecc->size;
  1026. int oob_off = i * (ecc->bytes + 4);
  1027. u8 *data = buf + data_off;
  1028. u8 *oob = chip->oob_poi + oob_off;
  1029. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1030. oob_off + mtd->writesize,
  1031. &cur_off, &max_bitflips,
  1032. !i, oob_required, page);
  1033. if (ret < 0)
  1034. return ret;
  1035. else if (ret)
  1036. raw_mode = true;
  1037. }
  1038. if (oob_required)
  1039. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1040. !raw_mode, page);
  1041. sunxi_nfc_hw_ecc_disable(mtd);
  1042. return max_bitflips;
  1043. }
  1044. static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
  1045. struct nand_chip *chip, u8 *buf,
  1046. int oob_required, int page)
  1047. {
  1048. int ret;
  1049. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
  1050. chip->ecc.steps);
  1051. if (ret >= 0)
  1052. return ret;
  1053. /* Fallback to PIO mode */
  1054. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1055. return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
  1056. }
  1057. static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
  1058. struct nand_chip *chip,
  1059. u32 data_offs, u32 readlen,
  1060. u8 *bufpoi, int page)
  1061. {
  1062. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1063. int ret, i, cur_off = 0;
  1064. unsigned int max_bitflips = 0;
  1065. sunxi_nfc_hw_ecc_enable(mtd);
  1066. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1067. for (i = data_offs / ecc->size;
  1068. i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
  1069. int data_off = i * ecc->size;
  1070. int oob_off = i * (ecc->bytes + 4);
  1071. u8 *data = bufpoi + data_off;
  1072. u8 *oob = chip->oob_poi + oob_off;
  1073. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
  1074. oob,
  1075. oob_off + mtd->writesize,
  1076. &cur_off, &max_bitflips, !i,
  1077. false, page);
  1078. if (ret < 0)
  1079. return ret;
  1080. }
  1081. sunxi_nfc_hw_ecc_disable(mtd);
  1082. return max_bitflips;
  1083. }
  1084. static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
  1085. struct nand_chip *chip,
  1086. u32 data_offs, u32 readlen,
  1087. u8 *buf, int page)
  1088. {
  1089. int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  1090. int ret;
  1091. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
  1092. if (ret >= 0)
  1093. return ret;
  1094. /* Fallback to PIO mode */
  1095. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1096. return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
  1097. buf, page);
  1098. }
  1099. static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
  1100. struct nand_chip *chip,
  1101. const uint8_t *buf, int oob_required,
  1102. int page)
  1103. {
  1104. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1105. int ret, i, cur_off = 0;
  1106. sunxi_nfc_hw_ecc_enable(mtd);
  1107. for (i = 0; i < ecc->steps; i++) {
  1108. int data_off = i * ecc->size;
  1109. int oob_off = i * (ecc->bytes + 4);
  1110. const u8 *data = buf + data_off;
  1111. const u8 *oob = chip->oob_poi + oob_off;
  1112. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1113. oob_off + mtd->writesize,
  1114. &cur_off, !i, page);
  1115. if (ret)
  1116. return ret;
  1117. }
  1118. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1119. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1120. &cur_off, page);
  1121. sunxi_nfc_hw_ecc_disable(mtd);
  1122. return 0;
  1123. }
  1124. static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
  1125. struct nand_chip *chip,
  1126. u32 data_offs, u32 data_len,
  1127. const u8 *buf, int oob_required,
  1128. int page)
  1129. {
  1130. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1131. int ret, i, cur_off = 0;
  1132. sunxi_nfc_hw_ecc_enable(mtd);
  1133. for (i = data_offs / ecc->size;
  1134. i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
  1135. int data_off = i * ecc->size;
  1136. int oob_off = i * (ecc->bytes + 4);
  1137. const u8 *data = buf + data_off;
  1138. const u8 *oob = chip->oob_poi + oob_off;
  1139. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1140. oob_off + mtd->writesize,
  1141. &cur_off, !i, page);
  1142. if (ret)
  1143. return ret;
  1144. }
  1145. sunxi_nfc_hw_ecc_disable(mtd);
  1146. return 0;
  1147. }
  1148. static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
  1149. struct nand_chip *chip,
  1150. const u8 *buf,
  1151. int oob_required,
  1152. int page)
  1153. {
  1154. struct nand_chip *nand = mtd_to_nand(mtd);
  1155. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1156. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1157. struct scatterlist sg;
  1158. int ret, i;
  1159. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  1160. if (ret)
  1161. return ret;
  1162. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
  1163. DMA_TO_DEVICE, &sg);
  1164. if (ret)
  1165. goto pio_fallback;
  1166. for (i = 0; i < ecc->steps; i++) {
  1167. const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
  1168. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
  1169. }
  1170. sunxi_nfc_hw_ecc_enable(mtd);
  1171. sunxi_nfc_randomizer_config(mtd, page, false);
  1172. sunxi_nfc_randomizer_enable(mtd);
  1173. writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
  1174. nfc->regs + NFC_REG_RCMD_SET);
  1175. dma_async_issue_pending(nfc->dmac);
  1176. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
  1177. NFC_DATA_TRANS | NFC_ACCESS_DIR,
  1178. nfc->regs + NFC_REG_CMD);
  1179. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  1180. if (ret)
  1181. dmaengine_terminate_all(nfc->dmac);
  1182. sunxi_nfc_randomizer_disable(mtd);
  1183. sunxi_nfc_hw_ecc_disable(mtd);
  1184. sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
  1185. if (ret)
  1186. return ret;
  1187. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1188. /* TODO: use DMA to transfer extra OOB bytes ? */
  1189. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1190. NULL, page);
  1191. return 0;
  1192. pio_fallback:
  1193. return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
  1194. }
  1195. static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
  1196. struct nand_chip *chip,
  1197. uint8_t *buf, int oob_required,
  1198. int page)
  1199. {
  1200. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1201. unsigned int max_bitflips = 0;
  1202. int ret, i, cur_off = 0;
  1203. bool raw_mode = false;
  1204. sunxi_nfc_hw_ecc_enable(mtd);
  1205. for (i = 0; i < ecc->steps; i++) {
  1206. int data_off = i * (ecc->size + ecc->bytes + 4);
  1207. int oob_off = data_off + ecc->size;
  1208. u8 *data = buf + (i * ecc->size);
  1209. u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1210. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1211. oob_off, &cur_off,
  1212. &max_bitflips, !i,
  1213. oob_required,
  1214. page);
  1215. if (ret < 0)
  1216. return ret;
  1217. else if (ret)
  1218. raw_mode = true;
  1219. }
  1220. if (oob_required)
  1221. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1222. !raw_mode, page);
  1223. sunxi_nfc_hw_ecc_disable(mtd);
  1224. return max_bitflips;
  1225. }
  1226. static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
  1227. struct nand_chip *chip,
  1228. const uint8_t *buf,
  1229. int oob_required, int page)
  1230. {
  1231. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1232. int ret, i, cur_off = 0;
  1233. sunxi_nfc_hw_ecc_enable(mtd);
  1234. for (i = 0; i < ecc->steps; i++) {
  1235. int data_off = i * (ecc->size + ecc->bytes + 4);
  1236. int oob_off = data_off + ecc->size;
  1237. const u8 *data = buf + (i * ecc->size);
  1238. const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1239. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
  1240. oob, oob_off, &cur_off,
  1241. false, page);
  1242. if (ret)
  1243. return ret;
  1244. }
  1245. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1246. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1247. &cur_off, page);
  1248. sunxi_nfc_hw_ecc_disable(mtd);
  1249. return 0;
  1250. }
  1251. static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
  1252. struct nand_chip *chip,
  1253. int page)
  1254. {
  1255. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1256. chip->pagebuf = -1;
  1257. return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
  1258. }
  1259. static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
  1260. struct nand_chip *chip,
  1261. int page)
  1262. {
  1263. int ret, status;
  1264. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
  1265. chip->pagebuf = -1;
  1266. memset(chip->buffers->databuf, 0xff, mtd->writesize);
  1267. ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
  1268. if (ret)
  1269. return ret;
  1270. /* Send command to program the OOB data */
  1271. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1272. status = chip->waitfunc(mtd, chip);
  1273. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1274. }
  1275. static const s32 tWB_lut[] = {6, 12, 16, 20};
  1276. static const s32 tRHW_lut[] = {4, 8, 12, 20};
  1277. static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
  1278. u32 clk_period)
  1279. {
  1280. u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
  1281. int i;
  1282. for (i = 0; i < lut_size; i++) {
  1283. if (clk_cycles <= lut[i])
  1284. return i;
  1285. }
  1286. /* Doesn't fit */
  1287. return -EINVAL;
  1288. }
  1289. #define sunxi_nand_lookup_timing(l, p, c) \
  1290. _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
  1291. static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
  1292. const struct nand_data_interface *conf,
  1293. bool check_only)
  1294. {
  1295. struct nand_chip *nand = mtd_to_nand(mtd);
  1296. struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
  1297. struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
  1298. const struct nand_sdr_timings *timings;
  1299. u32 min_clk_period = 0;
  1300. s32 tWB, tADL, tWHR, tRHW, tCAD;
  1301. long real_clk_rate;
  1302. timings = nand_get_sdr_timings(conf);
  1303. if (IS_ERR(timings))
  1304. return -ENOTSUPP;
  1305. /* T1 <=> tCLS */
  1306. if (timings->tCLS_min > min_clk_period)
  1307. min_clk_period = timings->tCLS_min;
  1308. /* T2 <=> tCLH */
  1309. if (timings->tCLH_min > min_clk_period)
  1310. min_clk_period = timings->tCLH_min;
  1311. /* T3 <=> tCS */
  1312. if (timings->tCS_min > min_clk_period)
  1313. min_clk_period = timings->tCS_min;
  1314. /* T4 <=> tCH */
  1315. if (timings->tCH_min > min_clk_period)
  1316. min_clk_period = timings->tCH_min;
  1317. /* T5 <=> tWP */
  1318. if (timings->tWP_min > min_clk_period)
  1319. min_clk_period = timings->tWP_min;
  1320. /* T6 <=> tWH */
  1321. if (timings->tWH_min > min_clk_period)
  1322. min_clk_period = timings->tWH_min;
  1323. /* T7 <=> tALS */
  1324. if (timings->tALS_min > min_clk_period)
  1325. min_clk_period = timings->tALS_min;
  1326. /* T8 <=> tDS */
  1327. if (timings->tDS_min > min_clk_period)
  1328. min_clk_period = timings->tDS_min;
  1329. /* T9 <=> tDH */
  1330. if (timings->tDH_min > min_clk_period)
  1331. min_clk_period = timings->tDH_min;
  1332. /* T10 <=> tRR */
  1333. if (timings->tRR_min > (min_clk_period * 3))
  1334. min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
  1335. /* T11 <=> tALH */
  1336. if (timings->tALH_min > min_clk_period)
  1337. min_clk_period = timings->tALH_min;
  1338. /* T12 <=> tRP */
  1339. if (timings->tRP_min > min_clk_period)
  1340. min_clk_period = timings->tRP_min;
  1341. /* T13 <=> tREH */
  1342. if (timings->tREH_min > min_clk_period)
  1343. min_clk_period = timings->tREH_min;
  1344. /* T14 <=> tRC */
  1345. if (timings->tRC_min > (min_clk_period * 2))
  1346. min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
  1347. /* T15 <=> tWC */
  1348. if (timings->tWC_min > (min_clk_period * 2))
  1349. min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
  1350. /* T16 - T19 + tCAD */
  1351. if (timings->tWB_max > (min_clk_period * 20))
  1352. min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
  1353. if (timings->tADL_min > (min_clk_period * 32))
  1354. min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
  1355. if (timings->tWHR_min > (min_clk_period * 32))
  1356. min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
  1357. if (timings->tRHW_min > (min_clk_period * 20))
  1358. min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
  1359. tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
  1360. min_clk_period);
  1361. if (tWB < 0) {
  1362. dev_err(nfc->dev, "unsupported tWB\n");
  1363. return tWB;
  1364. }
  1365. tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
  1366. if (tADL > 3) {
  1367. dev_err(nfc->dev, "unsupported tADL\n");
  1368. return -EINVAL;
  1369. }
  1370. tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
  1371. if (tWHR > 3) {
  1372. dev_err(nfc->dev, "unsupported tWHR\n");
  1373. return -EINVAL;
  1374. }
  1375. tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
  1376. min_clk_period);
  1377. if (tRHW < 0) {
  1378. dev_err(nfc->dev, "unsupported tRHW\n");
  1379. return tRHW;
  1380. }
  1381. if (check_only)
  1382. return 0;
  1383. /*
  1384. * TODO: according to ONFI specs this value only applies for DDR NAND,
  1385. * but Allwinner seems to set this to 0x7. Mimic them for now.
  1386. */
  1387. tCAD = 0x7;
  1388. /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
  1389. chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
  1390. /* Convert min_clk_period from picoseconds to nanoseconds */
  1391. min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
  1392. /*
  1393. * Unlike what is stated in Allwinner datasheet, the clk_rate should
  1394. * be set to (1 / min_clk_period), and not (2 / min_clk_period).
  1395. * This new formula was verified with a scope and validated by
  1396. * Allwinner engineers.
  1397. */
  1398. chip->clk_rate = NSEC_PER_SEC / min_clk_period;
  1399. real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
  1400. /*
  1401. * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
  1402. * output cycle timings shall be used if the host drives tRC less than
  1403. * 30 ns.
  1404. */
  1405. min_clk_period = NSEC_PER_SEC / real_clk_rate;
  1406. chip->timing_ctl = ((min_clk_period * 2) < 30) ?
  1407. NFC_TIMING_CTL_EDO : 0;
  1408. return 0;
  1409. }
  1410. static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1411. struct mtd_oob_region *oobregion)
  1412. {
  1413. struct nand_chip *nand = mtd_to_nand(mtd);
  1414. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1415. if (section >= ecc->steps)
  1416. return -ERANGE;
  1417. oobregion->offset = section * (ecc->bytes + 4) + 4;
  1418. oobregion->length = ecc->bytes;
  1419. return 0;
  1420. }
  1421. static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1422. struct mtd_oob_region *oobregion)
  1423. {
  1424. struct nand_chip *nand = mtd_to_nand(mtd);
  1425. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1426. if (section > ecc->steps)
  1427. return -ERANGE;
  1428. /*
  1429. * The first 2 bytes are used for BB markers, hence we
  1430. * only have 2 bytes available in the first user data
  1431. * section.
  1432. */
  1433. if (!section && ecc->mode == NAND_ECC_HW) {
  1434. oobregion->offset = 2;
  1435. oobregion->length = 2;
  1436. return 0;
  1437. }
  1438. oobregion->offset = section * (ecc->bytes + 4);
  1439. if (section < ecc->steps)
  1440. oobregion->length = 4;
  1441. else
  1442. oobregion->offset = mtd->oobsize - oobregion->offset;
  1443. return 0;
  1444. }
  1445. static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
  1446. .ecc = sunxi_nand_ooblayout_ecc,
  1447. .free = sunxi_nand_ooblayout_free,
  1448. };
  1449. static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
  1450. struct nand_ecc_ctrl *ecc,
  1451. struct device_node *np)
  1452. {
  1453. static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
  1454. struct nand_chip *nand = mtd_to_nand(mtd);
  1455. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1456. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1457. struct sunxi_nand_hw_ecc *data;
  1458. int nsectors;
  1459. int ret;
  1460. int i;
  1461. if (ecc->options & NAND_ECC_MAXIMIZE) {
  1462. int bytes;
  1463. ecc->size = 1024;
  1464. nsectors = mtd->writesize / ecc->size;
  1465. /* Reserve 2 bytes for the BBM */
  1466. bytes = (mtd->oobsize - 2) / nsectors;
  1467. /* 4 non-ECC bytes are added before each ECC bytes section */
  1468. bytes -= 4;
  1469. /* and bytes has to be even. */
  1470. if (bytes % 2)
  1471. bytes--;
  1472. ecc->strength = bytes * 8 / fls(8 * ecc->size);
  1473. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1474. if (strengths[i] > ecc->strength)
  1475. break;
  1476. }
  1477. if (!i)
  1478. ecc->strength = 0;
  1479. else
  1480. ecc->strength = strengths[i - 1];
  1481. }
  1482. if (ecc->size != 512 && ecc->size != 1024)
  1483. return -EINVAL;
  1484. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1485. if (!data)
  1486. return -ENOMEM;
  1487. /* Prefer 1k ECC chunk over 512 ones */
  1488. if (ecc->size == 512 && mtd->writesize > 512) {
  1489. ecc->size = 1024;
  1490. ecc->strength *= 2;
  1491. }
  1492. /* Add ECC info retrieval from DT */
  1493. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1494. if (ecc->strength <= strengths[i]) {
  1495. /*
  1496. * Update ecc->strength value with the actual strength
  1497. * that will be used by the ECC engine.
  1498. */
  1499. ecc->strength = strengths[i];
  1500. break;
  1501. }
  1502. }
  1503. if (i >= ARRAY_SIZE(strengths)) {
  1504. dev_err(nfc->dev, "unsupported strength\n");
  1505. ret = -ENOTSUPP;
  1506. goto err;
  1507. }
  1508. data->mode = i;
  1509. /* HW ECC always request ECC bytes for 1024 bytes blocks */
  1510. ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
  1511. /* HW ECC always work with even numbers of ECC bytes */
  1512. ecc->bytes = ALIGN(ecc->bytes, 2);
  1513. nsectors = mtd->writesize / ecc->size;
  1514. if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
  1515. ret = -EINVAL;
  1516. goto err;
  1517. }
  1518. ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
  1519. ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
  1520. mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
  1521. ecc->priv = data;
  1522. return 0;
  1523. err:
  1524. kfree(data);
  1525. return ret;
  1526. }
  1527. static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
  1528. {
  1529. kfree(ecc->priv);
  1530. }
  1531. static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
  1532. struct nand_ecc_ctrl *ecc,
  1533. struct device_node *np)
  1534. {
  1535. struct nand_chip *nand = mtd_to_nand(mtd);
  1536. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1537. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1538. int ret;
  1539. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1540. if (ret)
  1541. return ret;
  1542. if (nfc->dmac) {
  1543. ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
  1544. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
  1545. ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
  1546. nand->options |= NAND_USE_BOUNCE_BUFFER;
  1547. } else {
  1548. ecc->read_page = sunxi_nfc_hw_ecc_read_page;
  1549. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1550. ecc->write_page = sunxi_nfc_hw_ecc_write_page;
  1551. }
  1552. /* TODO: support DMA for raw accesses and subpage write */
  1553. ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
  1554. ecc->read_oob_raw = nand_read_oob_std;
  1555. ecc->write_oob_raw = nand_write_oob_std;
  1556. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1557. return 0;
  1558. }
  1559. static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
  1560. struct nand_ecc_ctrl *ecc,
  1561. struct device_node *np)
  1562. {
  1563. int ret;
  1564. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1565. if (ret)
  1566. return ret;
  1567. ecc->prepad = 4;
  1568. ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
  1569. ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
  1570. ecc->read_oob_raw = nand_read_oob_syndrome;
  1571. ecc->write_oob_raw = nand_write_oob_syndrome;
  1572. return 0;
  1573. }
  1574. static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
  1575. {
  1576. switch (ecc->mode) {
  1577. case NAND_ECC_HW:
  1578. case NAND_ECC_HW_SYNDROME:
  1579. sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
  1580. break;
  1581. case NAND_ECC_NONE:
  1582. default:
  1583. break;
  1584. }
  1585. }
  1586. static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
  1587. struct device_node *np)
  1588. {
  1589. struct nand_chip *nand = mtd_to_nand(mtd);
  1590. int ret;
  1591. if (!ecc->size) {
  1592. ecc->size = nand->ecc_step_ds;
  1593. ecc->strength = nand->ecc_strength_ds;
  1594. }
  1595. if (!ecc->size || !ecc->strength)
  1596. return -EINVAL;
  1597. switch (ecc->mode) {
  1598. case NAND_ECC_HW:
  1599. ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
  1600. if (ret)
  1601. return ret;
  1602. break;
  1603. case NAND_ECC_HW_SYNDROME:
  1604. ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
  1605. if (ret)
  1606. return ret;
  1607. break;
  1608. case NAND_ECC_NONE:
  1609. case NAND_ECC_SOFT:
  1610. break;
  1611. default:
  1612. return -EINVAL;
  1613. }
  1614. return 0;
  1615. }
  1616. static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
  1617. struct device_node *np)
  1618. {
  1619. struct sunxi_nand_chip *chip;
  1620. struct mtd_info *mtd;
  1621. struct nand_chip *nand;
  1622. int nsels;
  1623. int ret;
  1624. int i;
  1625. u32 tmp;
  1626. if (!of_get_property(np, "reg", &nsels))
  1627. return -EINVAL;
  1628. nsels /= sizeof(u32);
  1629. if (!nsels) {
  1630. dev_err(dev, "invalid reg property size\n");
  1631. return -EINVAL;
  1632. }
  1633. chip = devm_kzalloc(dev,
  1634. sizeof(*chip) +
  1635. (nsels * sizeof(struct sunxi_nand_chip_sel)),
  1636. GFP_KERNEL);
  1637. if (!chip) {
  1638. dev_err(dev, "could not allocate chip\n");
  1639. return -ENOMEM;
  1640. }
  1641. chip->nsels = nsels;
  1642. chip->selected = -1;
  1643. for (i = 0; i < nsels; i++) {
  1644. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1645. if (ret) {
  1646. dev_err(dev, "could not retrieve reg property: %d\n",
  1647. ret);
  1648. return ret;
  1649. }
  1650. if (tmp > NFC_MAX_CS) {
  1651. dev_err(dev,
  1652. "invalid reg value: %u (max CS = 7)\n",
  1653. tmp);
  1654. return -EINVAL;
  1655. }
  1656. if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
  1657. dev_err(dev, "CS %d already assigned\n", tmp);
  1658. return -EINVAL;
  1659. }
  1660. chip->sels[i].cs = tmp;
  1661. if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
  1662. tmp < 2) {
  1663. chip->sels[i].rb.type = RB_NATIVE;
  1664. chip->sels[i].rb.info.nativeid = tmp;
  1665. } else {
  1666. ret = of_get_named_gpio(np, "rb-gpios", i);
  1667. if (ret >= 0) {
  1668. tmp = ret;
  1669. chip->sels[i].rb.type = RB_GPIO;
  1670. chip->sels[i].rb.info.gpio = tmp;
  1671. ret = devm_gpio_request(dev, tmp, "nand-rb");
  1672. if (ret)
  1673. return ret;
  1674. ret = gpio_direction_input(tmp);
  1675. if (ret)
  1676. return ret;
  1677. } else {
  1678. chip->sels[i].rb.type = RB_NONE;
  1679. }
  1680. }
  1681. }
  1682. nand = &chip->nand;
  1683. /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
  1684. nand->chip_delay = 200;
  1685. nand->controller = &nfc->controller;
  1686. /*
  1687. * Set the ECC mode to the default value in case nothing is specified
  1688. * in the DT.
  1689. */
  1690. nand->ecc.mode = NAND_ECC_HW;
  1691. nand_set_flash_node(nand, np);
  1692. nand->select_chip = sunxi_nfc_select_chip;
  1693. nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
  1694. nand->read_buf = sunxi_nfc_read_buf;
  1695. nand->write_buf = sunxi_nfc_write_buf;
  1696. nand->read_byte = sunxi_nfc_read_byte;
  1697. nand->setup_data_interface = sunxi_nfc_setup_data_interface;
  1698. mtd = nand_to_mtd(nand);
  1699. mtd->dev.parent = dev;
  1700. ret = nand_scan_ident(mtd, nsels, NULL);
  1701. if (ret)
  1702. return ret;
  1703. if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1704. nand->bbt_options |= NAND_BBT_NO_OOB;
  1705. if (nand->options & NAND_NEED_SCRAMBLING)
  1706. nand->options |= NAND_NO_SUBPAGE_WRITE;
  1707. nand->options |= NAND_SUBPAGE_READ;
  1708. ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
  1709. if (ret) {
  1710. dev_err(dev, "ECC init failed: %d\n", ret);
  1711. return ret;
  1712. }
  1713. ret = nand_scan_tail(mtd);
  1714. if (ret) {
  1715. dev_err(dev, "nand_scan_tail failed: %d\n", ret);
  1716. return ret;
  1717. }
  1718. ret = mtd_device_register(mtd, NULL, 0);
  1719. if (ret) {
  1720. dev_err(dev, "failed to register mtd device: %d\n", ret);
  1721. nand_release(mtd);
  1722. return ret;
  1723. }
  1724. list_add_tail(&chip->node, &nfc->chips);
  1725. return 0;
  1726. }
  1727. static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
  1728. {
  1729. struct device_node *np = dev->of_node;
  1730. struct device_node *nand_np;
  1731. int nchips = of_get_child_count(np);
  1732. int ret;
  1733. if (nchips > 8) {
  1734. dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
  1735. return -EINVAL;
  1736. }
  1737. for_each_child_of_node(np, nand_np) {
  1738. ret = sunxi_nand_chip_init(dev, nfc, nand_np);
  1739. if (ret) {
  1740. of_node_put(nand_np);
  1741. return ret;
  1742. }
  1743. }
  1744. return 0;
  1745. }
  1746. static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
  1747. {
  1748. struct sunxi_nand_chip *chip;
  1749. while (!list_empty(&nfc->chips)) {
  1750. chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
  1751. node);
  1752. nand_release(nand_to_mtd(&chip->nand));
  1753. sunxi_nand_ecc_cleanup(&chip->nand.ecc);
  1754. list_del(&chip->node);
  1755. }
  1756. }
  1757. static int sunxi_nfc_probe(struct platform_device *pdev)
  1758. {
  1759. struct device *dev = &pdev->dev;
  1760. struct resource *r;
  1761. struct sunxi_nfc *nfc;
  1762. int irq;
  1763. int ret;
  1764. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1765. if (!nfc)
  1766. return -ENOMEM;
  1767. nfc->dev = dev;
  1768. nand_hw_control_init(&nfc->controller);
  1769. INIT_LIST_HEAD(&nfc->chips);
  1770. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1771. nfc->regs = devm_ioremap_resource(dev, r);
  1772. if (IS_ERR(nfc->regs))
  1773. return PTR_ERR(nfc->regs);
  1774. irq = platform_get_irq(pdev, 0);
  1775. if (irq < 0) {
  1776. dev_err(dev, "failed to retrieve irq\n");
  1777. return irq;
  1778. }
  1779. nfc->ahb_clk = devm_clk_get(dev, "ahb");
  1780. if (IS_ERR(nfc->ahb_clk)) {
  1781. dev_err(dev, "failed to retrieve ahb clk\n");
  1782. return PTR_ERR(nfc->ahb_clk);
  1783. }
  1784. ret = clk_prepare_enable(nfc->ahb_clk);
  1785. if (ret)
  1786. return ret;
  1787. nfc->mod_clk = devm_clk_get(dev, "mod");
  1788. if (IS_ERR(nfc->mod_clk)) {
  1789. dev_err(dev, "failed to retrieve mod clk\n");
  1790. ret = PTR_ERR(nfc->mod_clk);
  1791. goto out_ahb_clk_unprepare;
  1792. }
  1793. ret = clk_prepare_enable(nfc->mod_clk);
  1794. if (ret)
  1795. goto out_ahb_clk_unprepare;
  1796. nfc->reset = devm_reset_control_get_optional(dev, "ahb");
  1797. if (!IS_ERR(nfc->reset)) {
  1798. ret = reset_control_deassert(nfc->reset);
  1799. if (ret) {
  1800. dev_err(dev, "reset err %d\n", ret);
  1801. goto out_mod_clk_unprepare;
  1802. }
  1803. } else if (PTR_ERR(nfc->reset) != -ENOENT) {
  1804. ret = PTR_ERR(nfc->reset);
  1805. goto out_mod_clk_unprepare;
  1806. }
  1807. ret = sunxi_nfc_rst(nfc);
  1808. if (ret)
  1809. goto out_ahb_reset_reassert;
  1810. writel(0, nfc->regs + NFC_REG_INT);
  1811. ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
  1812. 0, "sunxi-nand", nfc);
  1813. if (ret)
  1814. goto out_ahb_reset_reassert;
  1815. nfc->dmac = dma_request_slave_channel(dev, "rxtx");
  1816. if (nfc->dmac) {
  1817. struct dma_slave_config dmac_cfg = { };
  1818. dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
  1819. dmac_cfg.dst_addr = dmac_cfg.src_addr;
  1820. dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1821. dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
  1822. dmac_cfg.src_maxburst = 4;
  1823. dmac_cfg.dst_maxburst = 4;
  1824. dmaengine_slave_config(nfc->dmac, &dmac_cfg);
  1825. } else {
  1826. dev_warn(dev, "failed to request rxtx DMA channel\n");
  1827. }
  1828. platform_set_drvdata(pdev, nfc);
  1829. ret = sunxi_nand_chips_init(dev, nfc);
  1830. if (ret) {
  1831. dev_err(dev, "failed to init nand chips\n");
  1832. goto out_release_dmac;
  1833. }
  1834. return 0;
  1835. out_release_dmac:
  1836. if (nfc->dmac)
  1837. dma_release_channel(nfc->dmac);
  1838. out_ahb_reset_reassert:
  1839. if (!IS_ERR(nfc->reset))
  1840. reset_control_assert(nfc->reset);
  1841. out_mod_clk_unprepare:
  1842. clk_disable_unprepare(nfc->mod_clk);
  1843. out_ahb_clk_unprepare:
  1844. clk_disable_unprepare(nfc->ahb_clk);
  1845. return ret;
  1846. }
  1847. static int sunxi_nfc_remove(struct platform_device *pdev)
  1848. {
  1849. struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
  1850. sunxi_nand_chips_cleanup(nfc);
  1851. if (!IS_ERR(nfc->reset))
  1852. reset_control_assert(nfc->reset);
  1853. if (nfc->dmac)
  1854. dma_release_channel(nfc->dmac);
  1855. clk_disable_unprepare(nfc->mod_clk);
  1856. clk_disable_unprepare(nfc->ahb_clk);
  1857. return 0;
  1858. }
  1859. static const struct of_device_id sunxi_nfc_ids[] = {
  1860. { .compatible = "allwinner,sun4i-a10-nand" },
  1861. { /* sentinel */ }
  1862. };
  1863. MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
  1864. static struct platform_driver sunxi_nfc_driver = {
  1865. .driver = {
  1866. .name = "sunxi_nand",
  1867. .of_match_table = sunxi_nfc_ids,
  1868. },
  1869. .probe = sunxi_nfc_probe,
  1870. .remove = sunxi_nfc_remove,
  1871. };
  1872. module_platform_driver(sunxi_nfc_driver);
  1873. MODULE_LICENSE("GPL v2");
  1874. MODULE_AUTHOR("Boris BREZILLON");
  1875. MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
  1876. MODULE_ALIAS("platform:sunxi_nand");