mtk_nand.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530
  1. /*
  2. * MTK NAND Flash controller driver.
  3. * Copyright (C) 2016 MediaTek Inc.
  4. * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
  5. * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/platform_device.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/delay.h>
  20. #include <linux/clk.h>
  21. #include <linux/mtd/nand.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/module.h>
  24. #include <linux/iopoll.h>
  25. #include <linux/of.h>
  26. #include "mtk_ecc.h"
  27. /* NAND controller register definition */
  28. #define NFI_CNFG (0x00)
  29. #define CNFG_AHB BIT(0)
  30. #define CNFG_READ_EN BIT(1)
  31. #define CNFG_DMA_BURST_EN BIT(2)
  32. #define CNFG_BYTE_RW BIT(6)
  33. #define CNFG_HW_ECC_EN BIT(8)
  34. #define CNFG_AUTO_FMT_EN BIT(9)
  35. #define CNFG_OP_CUST (6 << 12)
  36. #define NFI_PAGEFMT (0x04)
  37. #define PAGEFMT_FDM_ECC_SHIFT (12)
  38. #define PAGEFMT_FDM_SHIFT (8)
  39. #define PAGEFMT_SPARE_16 (0)
  40. #define PAGEFMT_SPARE_26 (1)
  41. #define PAGEFMT_SPARE_27 (2)
  42. #define PAGEFMT_SPARE_28 (3)
  43. #define PAGEFMT_SPARE_32 (4)
  44. #define PAGEFMT_SPARE_36 (5)
  45. #define PAGEFMT_SPARE_40 (6)
  46. #define PAGEFMT_SPARE_44 (7)
  47. #define PAGEFMT_SPARE_48 (8)
  48. #define PAGEFMT_SPARE_49 (9)
  49. #define PAGEFMT_SPARE_50 (0xa)
  50. #define PAGEFMT_SPARE_51 (0xb)
  51. #define PAGEFMT_SPARE_52 (0xc)
  52. #define PAGEFMT_SPARE_62 (0xd)
  53. #define PAGEFMT_SPARE_63 (0xe)
  54. #define PAGEFMT_SPARE_64 (0xf)
  55. #define PAGEFMT_SPARE_SHIFT (4)
  56. #define PAGEFMT_SEC_SEL_512 BIT(2)
  57. #define PAGEFMT_512_2K (0)
  58. #define PAGEFMT_2K_4K (1)
  59. #define PAGEFMT_4K_8K (2)
  60. #define PAGEFMT_8K_16K (3)
  61. /* NFI control */
  62. #define NFI_CON (0x08)
  63. #define CON_FIFO_FLUSH BIT(0)
  64. #define CON_NFI_RST BIT(1)
  65. #define CON_BRD BIT(8) /* burst read */
  66. #define CON_BWR BIT(9) /* burst write */
  67. #define CON_SEC_SHIFT (12)
  68. /* Timming control register */
  69. #define NFI_ACCCON (0x0C)
  70. #define NFI_INTR_EN (0x10)
  71. #define INTR_AHB_DONE_EN BIT(6)
  72. #define NFI_INTR_STA (0x14)
  73. #define NFI_CMD (0x20)
  74. #define NFI_ADDRNOB (0x30)
  75. #define NFI_COLADDR (0x34)
  76. #define NFI_ROWADDR (0x38)
  77. #define NFI_STRDATA (0x40)
  78. #define STAR_EN (1)
  79. #define STAR_DE (0)
  80. #define NFI_CNRNB (0x44)
  81. #define NFI_DATAW (0x50)
  82. #define NFI_DATAR (0x54)
  83. #define NFI_PIO_DIRDY (0x58)
  84. #define PIO_DI_RDY (0x01)
  85. #define NFI_STA (0x60)
  86. #define STA_CMD BIT(0)
  87. #define STA_ADDR BIT(1)
  88. #define STA_BUSY BIT(8)
  89. #define STA_EMP_PAGE BIT(12)
  90. #define NFI_FSM_CUSTDATA (0xe << 16)
  91. #define NFI_FSM_MASK (0xf << 16)
  92. #define NFI_ADDRCNTR (0x70)
  93. #define CNTR_MASK GENMASK(16, 12)
  94. #define ADDRCNTR_SEC_SHIFT (12)
  95. #define ADDRCNTR_SEC(val) \
  96. (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
  97. #define NFI_STRADDR (0x80)
  98. #define NFI_BYTELEN (0x84)
  99. #define NFI_CSEL (0x90)
  100. #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
  101. #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
  102. #define NFI_FDM_MAX_SIZE (8)
  103. #define NFI_FDM_MIN_SIZE (1)
  104. #define NFI_MASTER_STA (0x224)
  105. #define MASTER_STA_MASK (0x0FFF)
  106. #define NFI_EMPTY_THRESH (0x23C)
  107. #define MTK_NAME "mtk-nand"
  108. #define KB(x) ((x) * 1024UL)
  109. #define MB(x) (KB(x) * 1024UL)
  110. #define MTK_TIMEOUT (500000)
  111. #define MTK_RESET_TIMEOUT (1000000)
  112. #define MTK_MAX_SECTOR (16)
  113. #define MTK_NAND_MAX_NSELS (2)
  114. struct mtk_nfc_bad_mark_ctl {
  115. void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
  116. u32 sec;
  117. u32 pos;
  118. };
  119. /*
  120. * FDM: region used to store free OOB data
  121. */
  122. struct mtk_nfc_fdm {
  123. u32 reg_size;
  124. u32 ecc_size;
  125. };
  126. struct mtk_nfc_nand_chip {
  127. struct list_head node;
  128. struct nand_chip nand;
  129. struct mtk_nfc_bad_mark_ctl bad_mark;
  130. struct mtk_nfc_fdm fdm;
  131. u32 spare_per_sector;
  132. int nsels;
  133. u8 sels[0];
  134. /* nothing after this field */
  135. };
  136. struct mtk_nfc_clk {
  137. struct clk *nfi_clk;
  138. struct clk *pad_clk;
  139. };
  140. struct mtk_nfc {
  141. struct nand_hw_control controller;
  142. struct mtk_ecc_config ecc_cfg;
  143. struct mtk_nfc_clk clk;
  144. struct mtk_ecc *ecc;
  145. struct device *dev;
  146. void __iomem *regs;
  147. struct completion done;
  148. struct list_head chips;
  149. u8 *buffer;
  150. };
  151. static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
  152. {
  153. return container_of(nand, struct mtk_nfc_nand_chip, nand);
  154. }
  155. static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
  156. {
  157. return (u8 *)p + i * chip->ecc.size;
  158. }
  159. static inline u8 *oob_ptr(struct nand_chip *chip, int i)
  160. {
  161. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  162. u8 *poi;
  163. /* map the sector's FDM data to free oob:
  164. * the beginning of the oob area stores the FDM data of bad mark sectors
  165. */
  166. if (i < mtk_nand->bad_mark.sec)
  167. poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
  168. else if (i == mtk_nand->bad_mark.sec)
  169. poi = chip->oob_poi;
  170. else
  171. poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
  172. return poi;
  173. }
  174. static inline int mtk_data_len(struct nand_chip *chip)
  175. {
  176. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  177. return chip->ecc.size + mtk_nand->spare_per_sector;
  178. }
  179. static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
  180. {
  181. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  182. return nfc->buffer + i * mtk_data_len(chip);
  183. }
  184. static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
  185. {
  186. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  187. return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
  188. }
  189. static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
  190. {
  191. writel(val, nfc->regs + reg);
  192. }
  193. static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
  194. {
  195. writew(val, nfc->regs + reg);
  196. }
  197. static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
  198. {
  199. writeb(val, nfc->regs + reg);
  200. }
  201. static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
  202. {
  203. return readl_relaxed(nfc->regs + reg);
  204. }
  205. static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
  206. {
  207. return readw_relaxed(nfc->regs + reg);
  208. }
  209. static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
  210. {
  211. return readb_relaxed(nfc->regs + reg);
  212. }
  213. static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
  214. {
  215. struct device *dev = nfc->dev;
  216. u32 val;
  217. int ret;
  218. /* reset all registers and force the NFI master to terminate */
  219. nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  220. /* wait for the master to finish the last transaction */
  221. ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
  222. !(val & MASTER_STA_MASK), 50,
  223. MTK_RESET_TIMEOUT);
  224. if (ret)
  225. dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
  226. NFI_MASTER_STA, val);
  227. /* ensure any status register affected by the NFI master is reset */
  228. nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  229. nfi_writew(nfc, STAR_DE, NFI_STRDATA);
  230. }
  231. static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
  232. {
  233. struct device *dev = nfc->dev;
  234. u32 val;
  235. int ret;
  236. nfi_writel(nfc, command, NFI_CMD);
  237. ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  238. !(val & STA_CMD), 10, MTK_TIMEOUT);
  239. if (ret) {
  240. dev_warn(dev, "nfi core timed out entering command mode\n");
  241. return -EIO;
  242. }
  243. return 0;
  244. }
  245. static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
  246. {
  247. struct device *dev = nfc->dev;
  248. u32 val;
  249. int ret;
  250. nfi_writel(nfc, addr, NFI_COLADDR);
  251. nfi_writel(nfc, 0, NFI_ROWADDR);
  252. nfi_writew(nfc, 1, NFI_ADDRNOB);
  253. ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  254. !(val & STA_ADDR), 10, MTK_TIMEOUT);
  255. if (ret) {
  256. dev_warn(dev, "nfi core timed out entering address mode\n");
  257. return -EIO;
  258. }
  259. return 0;
  260. }
  261. static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
  262. {
  263. struct nand_chip *chip = mtd_to_nand(mtd);
  264. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  265. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  266. u32 fmt, spare;
  267. if (!mtd->writesize)
  268. return 0;
  269. spare = mtk_nand->spare_per_sector;
  270. switch (mtd->writesize) {
  271. case 512:
  272. fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
  273. break;
  274. case KB(2):
  275. if (chip->ecc.size == 512)
  276. fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
  277. else
  278. fmt = PAGEFMT_512_2K;
  279. break;
  280. case KB(4):
  281. if (chip->ecc.size == 512)
  282. fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
  283. else
  284. fmt = PAGEFMT_2K_4K;
  285. break;
  286. case KB(8):
  287. if (chip->ecc.size == 512)
  288. fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
  289. else
  290. fmt = PAGEFMT_4K_8K;
  291. break;
  292. case KB(16):
  293. fmt = PAGEFMT_8K_16K;
  294. break;
  295. default:
  296. dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
  297. return -EINVAL;
  298. }
  299. /*
  300. * the hardware will double the value for this eccsize, so we need to
  301. * halve it
  302. */
  303. if (chip->ecc.size == 1024)
  304. spare >>= 1;
  305. switch (spare) {
  306. case 16:
  307. fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
  308. break;
  309. case 26:
  310. fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
  311. break;
  312. case 27:
  313. fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
  314. break;
  315. case 28:
  316. fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
  317. break;
  318. case 32:
  319. fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
  320. break;
  321. case 36:
  322. fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
  323. break;
  324. case 40:
  325. fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
  326. break;
  327. case 44:
  328. fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
  329. break;
  330. case 48:
  331. fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
  332. break;
  333. case 49:
  334. fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
  335. break;
  336. case 50:
  337. fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
  338. break;
  339. case 51:
  340. fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
  341. break;
  342. case 52:
  343. fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
  344. break;
  345. case 62:
  346. fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
  347. break;
  348. case 63:
  349. fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
  350. break;
  351. case 64:
  352. fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
  353. break;
  354. default:
  355. dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
  356. return -EINVAL;
  357. }
  358. fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
  359. fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
  360. nfi_writew(nfc, fmt, NFI_PAGEFMT);
  361. nfc->ecc_cfg.strength = chip->ecc.strength;
  362. nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
  363. return 0;
  364. }
  365. static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
  366. {
  367. struct nand_chip *nand = mtd_to_nand(mtd);
  368. struct mtk_nfc *nfc = nand_get_controller_data(nand);
  369. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
  370. if (chip < 0)
  371. return;
  372. mtk_nfc_hw_runtime_config(mtd);
  373. nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
  374. }
  375. static int mtk_nfc_dev_ready(struct mtd_info *mtd)
  376. {
  377. struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  378. if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
  379. return 0;
  380. return 1;
  381. }
  382. static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  383. {
  384. struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  385. if (ctrl & NAND_ALE) {
  386. mtk_nfc_send_address(nfc, dat);
  387. } else if (ctrl & NAND_CLE) {
  388. mtk_nfc_hw_reset(nfc);
  389. nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
  390. mtk_nfc_send_command(nfc, dat);
  391. }
  392. }
  393. static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
  394. {
  395. int rc;
  396. u8 val;
  397. rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
  398. val & PIO_DI_RDY, 10, MTK_TIMEOUT);
  399. if (rc < 0)
  400. dev_err(nfc->dev, "data not ready\n");
  401. }
  402. static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
  403. {
  404. struct nand_chip *chip = mtd_to_nand(mtd);
  405. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  406. u32 reg;
  407. /* after each byte read, the NFI_STA reg is reset by the hardware */
  408. reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  409. if (reg != NFI_FSM_CUSTDATA) {
  410. reg = nfi_readw(nfc, NFI_CNFG);
  411. reg |= CNFG_BYTE_RW | CNFG_READ_EN;
  412. nfi_writew(nfc, reg, NFI_CNFG);
  413. /*
  414. * set to max sector to allow the HW to continue reading over
  415. * unaligned accesses
  416. */
  417. reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
  418. nfi_writel(nfc, reg, NFI_CON);
  419. /* trigger to fetch data */
  420. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  421. }
  422. mtk_nfc_wait_ioready(nfc);
  423. return nfi_readb(nfc, NFI_DATAR);
  424. }
  425. static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
  426. {
  427. int i;
  428. for (i = 0; i < len; i++)
  429. buf[i] = mtk_nfc_read_byte(mtd);
  430. }
  431. static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
  432. {
  433. struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  434. u32 reg;
  435. reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  436. if (reg != NFI_FSM_CUSTDATA) {
  437. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
  438. nfi_writew(nfc, reg, NFI_CNFG);
  439. reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
  440. nfi_writel(nfc, reg, NFI_CON);
  441. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  442. }
  443. mtk_nfc_wait_ioready(nfc);
  444. nfi_writeb(nfc, byte, NFI_DATAW);
  445. }
  446. static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
  447. {
  448. int i;
  449. for (i = 0; i < len; i++)
  450. mtk_nfc_write_byte(mtd, buf[i]);
  451. }
  452. static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
  453. {
  454. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  455. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  456. int size = chip->ecc.size + mtk_nand->fdm.reg_size;
  457. nfc->ecc_cfg.mode = ECC_DMA_MODE;
  458. nfc->ecc_cfg.op = ECC_ENCODE;
  459. return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
  460. }
  461. static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
  462. {
  463. /* nop */
  464. }
  465. static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
  466. {
  467. struct nand_chip *chip = mtd_to_nand(mtd);
  468. struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
  469. u32 bad_pos = nand->bad_mark.pos;
  470. if (raw)
  471. bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
  472. else
  473. bad_pos += nand->bad_mark.sec * chip->ecc.size;
  474. swap(chip->oob_poi[0], buf[bad_pos]);
  475. }
  476. static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
  477. u32 len, const u8 *buf)
  478. {
  479. struct nand_chip *chip = mtd_to_nand(mtd);
  480. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  481. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  482. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  483. u32 start, end;
  484. int i, ret;
  485. start = offset / chip->ecc.size;
  486. end = DIV_ROUND_UP(offset + len, chip->ecc.size);
  487. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  488. for (i = 0; i < chip->ecc.steps; i++) {
  489. memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  490. chip->ecc.size);
  491. if (start > i || i >= end)
  492. continue;
  493. if (i == mtk_nand->bad_mark.sec)
  494. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  495. memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  496. /* program the CRC back to the OOB */
  497. ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
  498. if (ret < 0)
  499. return ret;
  500. }
  501. return 0;
  502. }
  503. static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
  504. {
  505. struct nand_chip *chip = mtd_to_nand(mtd);
  506. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  507. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  508. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  509. u32 i;
  510. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  511. for (i = 0; i < chip->ecc.steps; i++) {
  512. if (buf)
  513. memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  514. chip->ecc.size);
  515. if (i == mtk_nand->bad_mark.sec)
  516. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  517. memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  518. }
  519. }
  520. static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
  521. u32 sectors)
  522. {
  523. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  524. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  525. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  526. u32 vall, valm;
  527. u8 *oobptr;
  528. int i, j;
  529. for (i = 0; i < sectors; i++) {
  530. oobptr = oob_ptr(chip, start + i);
  531. vall = nfi_readl(nfc, NFI_FDML(i));
  532. valm = nfi_readl(nfc, NFI_FDMM(i));
  533. for (j = 0; j < fdm->reg_size; j++)
  534. oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
  535. }
  536. }
  537. static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
  538. {
  539. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  540. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  541. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  542. u32 vall, valm;
  543. u8 *oobptr;
  544. int i, j;
  545. for (i = 0; i < chip->ecc.steps; i++) {
  546. oobptr = oob_ptr(chip, i);
  547. vall = 0;
  548. valm = 0;
  549. for (j = 0; j < 8; j++) {
  550. if (j < 4)
  551. vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
  552. << (j * 8);
  553. else
  554. valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
  555. << ((j - 4) * 8);
  556. }
  557. nfi_writel(nfc, vall, NFI_FDML(i));
  558. nfi_writel(nfc, valm, NFI_FDMM(i));
  559. }
  560. }
  561. static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  562. const u8 *buf, int page, int len)
  563. {
  564. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  565. struct device *dev = nfc->dev;
  566. dma_addr_t addr;
  567. u32 reg;
  568. int ret;
  569. addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
  570. ret = dma_mapping_error(nfc->dev, addr);
  571. if (ret) {
  572. dev_err(nfc->dev, "dma mapping error\n");
  573. return -EINVAL;
  574. }
  575. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
  576. nfi_writew(nfc, reg, NFI_CNFG);
  577. nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
  578. nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  579. nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  580. init_completion(&nfc->done);
  581. reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
  582. nfi_writel(nfc, reg, NFI_CON);
  583. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  584. ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  585. if (!ret) {
  586. dev_err(dev, "program ahb done timeout\n");
  587. nfi_writew(nfc, 0, NFI_INTR_EN);
  588. ret = -ETIMEDOUT;
  589. goto timeout;
  590. }
  591. ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
  592. ADDRCNTR_SEC(reg) >= chip->ecc.steps,
  593. 10, MTK_TIMEOUT);
  594. if (ret)
  595. dev_err(dev, "hwecc write timeout\n");
  596. timeout:
  597. dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
  598. nfi_writel(nfc, 0, NFI_CON);
  599. return ret;
  600. }
  601. static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  602. const u8 *buf, int page, int raw)
  603. {
  604. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  605. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  606. size_t len;
  607. const u8 *bufpoi;
  608. u32 reg;
  609. int ret;
  610. if (!raw) {
  611. /* OOB => FDM: from register, ECC: from HW */
  612. reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
  613. nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
  614. nfc->ecc_cfg.op = ECC_ENCODE;
  615. nfc->ecc_cfg.mode = ECC_NFI_MODE;
  616. ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  617. if (ret) {
  618. /* clear NFI config */
  619. reg = nfi_readw(nfc, NFI_CNFG);
  620. reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  621. nfi_writew(nfc, reg, NFI_CNFG);
  622. return ret;
  623. }
  624. memcpy(nfc->buffer, buf, mtd->writesize);
  625. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
  626. bufpoi = nfc->buffer;
  627. /* write OOB into the FDM registers (OOB area in MTK NAND) */
  628. mtk_nfc_write_fdm(chip);
  629. } else {
  630. bufpoi = buf;
  631. }
  632. len = mtd->writesize + (raw ? mtd->oobsize : 0);
  633. ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
  634. if (!raw)
  635. mtk_ecc_disable(nfc->ecc);
  636. return ret;
  637. }
  638. static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
  639. struct nand_chip *chip, const u8 *buf,
  640. int oob_on, int page)
  641. {
  642. return mtk_nfc_write_page(mtd, chip, buf, page, 0);
  643. }
  644. static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  645. const u8 *buf, int oob_on, int pg)
  646. {
  647. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  648. mtk_nfc_format_page(mtd, buf);
  649. return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
  650. }
  651. static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
  652. struct nand_chip *chip, u32 offset,
  653. u32 data_len, const u8 *buf,
  654. int oob_on, int page)
  655. {
  656. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  657. int ret;
  658. ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
  659. if (ret < 0)
  660. return ret;
  661. /* use the data in the private buffer (now with FDM and CRC) */
  662. return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
  663. }
  664. static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  665. int page)
  666. {
  667. int ret;
  668. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
  669. ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
  670. if (ret < 0)
  671. return -EIO;
  672. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  673. ret = chip->waitfunc(mtd, chip);
  674. return ret & NAND_STATUS_FAIL ? -EIO : 0;
  675. }
  676. static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
  677. {
  678. struct nand_chip *chip = mtd_to_nand(mtd);
  679. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  680. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  681. struct mtk_ecc_stats stats;
  682. int rc, i;
  683. rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
  684. if (rc) {
  685. memset(buf, 0xff, sectors * chip->ecc.size);
  686. for (i = 0; i < sectors; i++)
  687. memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
  688. return 0;
  689. }
  690. mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
  691. mtd->ecc_stats.corrected += stats.corrected;
  692. mtd->ecc_stats.failed += stats.failed;
  693. return stats.bitflips;
  694. }
  695. static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  696. u32 data_offs, u32 readlen,
  697. u8 *bufpoi, int page, int raw)
  698. {
  699. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  700. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  701. u32 spare = mtk_nand->spare_per_sector;
  702. u32 column, sectors, start, end, reg;
  703. dma_addr_t addr;
  704. int bitflips;
  705. size_t len;
  706. u8 *buf;
  707. int rc;
  708. start = data_offs / chip->ecc.size;
  709. end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  710. sectors = end - start;
  711. column = start * (chip->ecc.size + spare);
  712. len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
  713. buf = bufpoi + start * chip->ecc.size;
  714. if (column != 0)
  715. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
  716. addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
  717. rc = dma_mapping_error(nfc->dev, addr);
  718. if (rc) {
  719. dev_err(nfc->dev, "dma mapping error\n");
  720. return -EINVAL;
  721. }
  722. reg = nfi_readw(nfc, NFI_CNFG);
  723. reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
  724. if (!raw) {
  725. reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
  726. nfi_writew(nfc, reg, NFI_CNFG);
  727. nfc->ecc_cfg.mode = ECC_NFI_MODE;
  728. nfc->ecc_cfg.sectors = sectors;
  729. nfc->ecc_cfg.op = ECC_DECODE;
  730. rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  731. if (rc) {
  732. dev_err(nfc->dev, "ecc enable\n");
  733. /* clear NFI_CNFG */
  734. reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
  735. CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  736. nfi_writew(nfc, reg, NFI_CNFG);
  737. dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  738. return rc;
  739. }
  740. } else {
  741. nfi_writew(nfc, reg, NFI_CNFG);
  742. }
  743. nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
  744. nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  745. nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  746. init_completion(&nfc->done);
  747. reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
  748. nfi_writel(nfc, reg, NFI_CON);
  749. nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  750. rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  751. if (!rc)
  752. dev_warn(nfc->dev, "read ahb/dma done timeout\n");
  753. rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
  754. ADDRCNTR_SEC(reg) >= sectors, 10,
  755. MTK_TIMEOUT);
  756. if (rc < 0) {
  757. dev_err(nfc->dev, "subpage done timeout\n");
  758. bitflips = -EIO;
  759. } else {
  760. bitflips = 0;
  761. if (!raw) {
  762. rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
  763. bitflips = rc < 0 ? -ETIMEDOUT :
  764. mtk_nfc_update_ecc_stats(mtd, buf, sectors);
  765. mtk_nfc_read_fdm(chip, start, sectors);
  766. }
  767. }
  768. dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  769. if (raw)
  770. goto done;
  771. mtk_ecc_disable(nfc->ecc);
  772. if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
  773. mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
  774. done:
  775. nfi_writel(nfc, 0, NFI_CON);
  776. return bitflips;
  777. }
  778. static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
  779. struct nand_chip *chip, u32 off,
  780. u32 len, u8 *p, int pg)
  781. {
  782. return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
  783. }
  784. static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
  785. struct nand_chip *chip, u8 *p,
  786. int oob_on, int pg)
  787. {
  788. return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
  789. }
  790. static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  791. u8 *buf, int oob_on, int page)
  792. {
  793. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  794. struct mtk_nfc *nfc = nand_get_controller_data(chip);
  795. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  796. int i, ret;
  797. memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  798. ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
  799. page, 1);
  800. if (ret < 0)
  801. return ret;
  802. for (i = 0; i < chip->ecc.steps; i++) {
  803. memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
  804. if (i == mtk_nand->bad_mark.sec)
  805. mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  806. if (buf)
  807. memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
  808. chip->ecc.size);
  809. }
  810. return ret;
  811. }
  812. static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  813. int page)
  814. {
  815. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  816. return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
  817. }
  818. static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
  819. {
  820. /*
  821. * ACCON: access timing control register
  822. * -------------------------------------
  823. * 31:28: minimum required time for CS post pulling down after accessing
  824. * the device
  825. * 27:22: minimum required time for CS pre pulling down before accessing
  826. * the device
  827. * 21:16: minimum required time from NCEB low to NREB low
  828. * 15:12: minimum required time from NWEB high to NREB low.
  829. * 11:08: write enable hold time
  830. * 07:04: write wait states
  831. * 03:00: read wait states
  832. */
  833. nfi_writel(nfc, 0x10804211, NFI_ACCCON);
  834. /*
  835. * CNRNB: nand ready/busy register
  836. * -------------------------------
  837. * 7:4: timeout register for polling the NAND busy/ready signal
  838. * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
  839. */
  840. nfi_writew(nfc, 0xf1, NFI_CNRNB);
  841. nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
  842. mtk_nfc_hw_reset(nfc);
  843. nfi_readl(nfc, NFI_INTR_STA);
  844. nfi_writel(nfc, 0, NFI_INTR_EN);
  845. }
  846. static irqreturn_t mtk_nfc_irq(int irq, void *id)
  847. {
  848. struct mtk_nfc *nfc = id;
  849. u16 sta, ien;
  850. sta = nfi_readw(nfc, NFI_INTR_STA);
  851. ien = nfi_readw(nfc, NFI_INTR_EN);
  852. if (!(sta & ien))
  853. return IRQ_NONE;
  854. nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
  855. complete(&nfc->done);
  856. return IRQ_HANDLED;
  857. }
  858. static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
  859. {
  860. int ret;
  861. ret = clk_prepare_enable(clk->nfi_clk);
  862. if (ret) {
  863. dev_err(dev, "failed to enable nfi clk\n");
  864. return ret;
  865. }
  866. ret = clk_prepare_enable(clk->pad_clk);
  867. if (ret) {
  868. dev_err(dev, "failed to enable pad clk\n");
  869. clk_disable_unprepare(clk->nfi_clk);
  870. return ret;
  871. }
  872. return 0;
  873. }
  874. static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
  875. {
  876. clk_disable_unprepare(clk->nfi_clk);
  877. clk_disable_unprepare(clk->pad_clk);
  878. }
  879. static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
  880. struct mtd_oob_region *oob_region)
  881. {
  882. struct nand_chip *chip = mtd_to_nand(mtd);
  883. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  884. struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  885. u32 eccsteps;
  886. eccsteps = mtd->writesize / chip->ecc.size;
  887. if (section >= eccsteps)
  888. return -ERANGE;
  889. oob_region->length = fdm->reg_size - fdm->ecc_size;
  890. oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
  891. return 0;
  892. }
  893. static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
  894. struct mtd_oob_region *oob_region)
  895. {
  896. struct nand_chip *chip = mtd_to_nand(mtd);
  897. struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  898. u32 eccsteps;
  899. if (section)
  900. return -ERANGE;
  901. eccsteps = mtd->writesize / chip->ecc.size;
  902. oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
  903. oob_region->length = mtd->oobsize - oob_region->offset;
  904. return 0;
  905. }
  906. static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
  907. .free = mtk_nfc_ooblayout_free,
  908. .ecc = mtk_nfc_ooblayout_ecc,
  909. };
  910. static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
  911. {
  912. struct nand_chip *nand = mtd_to_nand(mtd);
  913. struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
  914. u32 ecc_bytes;
  915. ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
  916. fdm->reg_size = chip->spare_per_sector - ecc_bytes;
  917. if (fdm->reg_size > NFI_FDM_MAX_SIZE)
  918. fdm->reg_size = NFI_FDM_MAX_SIZE;
  919. /* bad block mark storage */
  920. fdm->ecc_size = 1;
  921. }
  922. static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
  923. struct mtd_info *mtd)
  924. {
  925. struct nand_chip *nand = mtd_to_nand(mtd);
  926. if (mtd->writesize == 512) {
  927. bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
  928. } else {
  929. bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
  930. bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
  931. bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
  932. }
  933. }
  934. static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
  935. {
  936. struct nand_chip *nand = mtd_to_nand(mtd);
  937. u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
  938. 48, 49, 50, 51, 52, 62, 63, 64};
  939. u32 eccsteps, i;
  940. eccsteps = mtd->writesize / nand->ecc.size;
  941. *sps = mtd->oobsize / eccsteps;
  942. if (nand->ecc.size == 1024)
  943. *sps >>= 1;
  944. for (i = 0; i < ARRAY_SIZE(spare); i++) {
  945. if (*sps <= spare[i]) {
  946. if (!i)
  947. *sps = spare[i];
  948. else if (*sps != spare[i])
  949. *sps = spare[i - 1];
  950. break;
  951. }
  952. }
  953. if (i >= ARRAY_SIZE(spare))
  954. *sps = spare[ARRAY_SIZE(spare) - 1];
  955. if (nand->ecc.size == 1024)
  956. *sps <<= 1;
  957. }
  958. static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
  959. {
  960. struct nand_chip *nand = mtd_to_nand(mtd);
  961. u32 spare;
  962. int free;
  963. /* support only ecc hw mode */
  964. if (nand->ecc.mode != NAND_ECC_HW) {
  965. dev_err(dev, "ecc.mode not supported\n");
  966. return -EINVAL;
  967. }
  968. /* if optional dt settings not present */
  969. if (!nand->ecc.size || !nand->ecc.strength) {
  970. /* use datasheet requirements */
  971. nand->ecc.strength = nand->ecc_strength_ds;
  972. nand->ecc.size = nand->ecc_step_ds;
  973. /*
  974. * align eccstrength and eccsize
  975. * this controller only supports 512 and 1024 sizes
  976. */
  977. if (nand->ecc.size < 1024) {
  978. if (mtd->writesize > 512) {
  979. nand->ecc.size = 1024;
  980. nand->ecc.strength <<= 1;
  981. } else {
  982. nand->ecc.size = 512;
  983. }
  984. } else {
  985. nand->ecc.size = 1024;
  986. }
  987. mtk_nfc_set_spare_per_sector(&spare, mtd);
  988. /* calculate oob bytes except ecc parity data */
  989. free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
  990. free = spare - free;
  991. /*
  992. * enhance ecc strength if oob left is bigger than max FDM size
  993. * or reduce ecc strength if oob size is not enough for ecc
  994. * parity data.
  995. */
  996. if (free > NFI_FDM_MAX_SIZE) {
  997. spare -= NFI_FDM_MAX_SIZE;
  998. nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
  999. } else if (free < 0) {
  1000. spare -= NFI_FDM_MIN_SIZE;
  1001. nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
  1002. }
  1003. }
  1004. mtk_ecc_adjust_strength(&nand->ecc.strength);
  1005. dev_info(dev, "eccsize %d eccstrength %d\n",
  1006. nand->ecc.size, nand->ecc.strength);
  1007. return 0;
  1008. }
  1009. static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
  1010. struct device_node *np)
  1011. {
  1012. struct mtk_nfc_nand_chip *chip;
  1013. struct nand_chip *nand;
  1014. struct mtd_info *mtd;
  1015. int nsels, len;
  1016. u32 tmp;
  1017. int ret;
  1018. int i;
  1019. if (!of_get_property(np, "reg", &nsels))
  1020. return -ENODEV;
  1021. nsels /= sizeof(u32);
  1022. if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
  1023. dev_err(dev, "invalid reg property size %d\n", nsels);
  1024. return -EINVAL;
  1025. }
  1026. chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
  1027. GFP_KERNEL);
  1028. if (!chip)
  1029. return -ENOMEM;
  1030. chip->nsels = nsels;
  1031. for (i = 0; i < nsels; i++) {
  1032. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1033. if (ret) {
  1034. dev_err(dev, "reg property failure : %d\n", ret);
  1035. return ret;
  1036. }
  1037. chip->sels[i] = tmp;
  1038. }
  1039. nand = &chip->nand;
  1040. nand->controller = &nfc->controller;
  1041. nand_set_flash_node(nand, np);
  1042. nand_set_controller_data(nand, nfc);
  1043. nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
  1044. nand->dev_ready = mtk_nfc_dev_ready;
  1045. nand->select_chip = mtk_nfc_select_chip;
  1046. nand->write_byte = mtk_nfc_write_byte;
  1047. nand->write_buf = mtk_nfc_write_buf;
  1048. nand->read_byte = mtk_nfc_read_byte;
  1049. nand->read_buf = mtk_nfc_read_buf;
  1050. nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
  1051. /* set default mode in case dt entry is missing */
  1052. nand->ecc.mode = NAND_ECC_HW;
  1053. nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
  1054. nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
  1055. nand->ecc.write_page = mtk_nfc_write_page_hwecc;
  1056. nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
  1057. nand->ecc.write_oob = mtk_nfc_write_oob_std;
  1058. nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
  1059. nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
  1060. nand->ecc.read_page = mtk_nfc_read_page_hwecc;
  1061. nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
  1062. nand->ecc.read_oob = mtk_nfc_read_oob_std;
  1063. mtd = nand_to_mtd(nand);
  1064. mtd->owner = THIS_MODULE;
  1065. mtd->dev.parent = dev;
  1066. mtd->name = MTK_NAME;
  1067. mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
  1068. mtk_nfc_hw_init(nfc);
  1069. ret = nand_scan_ident(mtd, nsels, NULL);
  1070. if (ret)
  1071. return -ENODEV;
  1072. /* store bbt magic in page, cause OOB is not protected */
  1073. if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1074. nand->bbt_options |= NAND_BBT_NO_OOB;
  1075. ret = mtk_nfc_ecc_init(dev, mtd);
  1076. if (ret)
  1077. return -EINVAL;
  1078. if (nand->options & NAND_BUSWIDTH_16) {
  1079. dev_err(dev, "16bits buswidth not supported");
  1080. return -EINVAL;
  1081. }
  1082. mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
  1083. mtk_nfc_set_fdm(&chip->fdm, mtd);
  1084. mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
  1085. len = mtd->writesize + mtd->oobsize;
  1086. nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
  1087. if (!nfc->buffer)
  1088. return -ENOMEM;
  1089. ret = nand_scan_tail(mtd);
  1090. if (ret)
  1091. return -ENODEV;
  1092. ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
  1093. if (ret) {
  1094. dev_err(dev, "mtd parse partition error\n");
  1095. nand_release(mtd);
  1096. return ret;
  1097. }
  1098. list_add_tail(&chip->node, &nfc->chips);
  1099. return 0;
  1100. }
  1101. static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
  1102. {
  1103. struct device_node *np = dev->of_node;
  1104. struct device_node *nand_np;
  1105. int ret;
  1106. for_each_child_of_node(np, nand_np) {
  1107. ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
  1108. if (ret) {
  1109. of_node_put(nand_np);
  1110. return ret;
  1111. }
  1112. }
  1113. return 0;
  1114. }
  1115. static int mtk_nfc_probe(struct platform_device *pdev)
  1116. {
  1117. struct device *dev = &pdev->dev;
  1118. struct device_node *np = dev->of_node;
  1119. struct mtk_nfc *nfc;
  1120. struct resource *res;
  1121. int ret, irq;
  1122. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1123. if (!nfc)
  1124. return -ENOMEM;
  1125. spin_lock_init(&nfc->controller.lock);
  1126. init_waitqueue_head(&nfc->controller.wq);
  1127. INIT_LIST_HEAD(&nfc->chips);
  1128. /* probe defer if not ready */
  1129. nfc->ecc = of_mtk_ecc_get(np);
  1130. if (IS_ERR(nfc->ecc))
  1131. return PTR_ERR(nfc->ecc);
  1132. else if (!nfc->ecc)
  1133. return -ENODEV;
  1134. nfc->dev = dev;
  1135. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1136. nfc->regs = devm_ioremap_resource(dev, res);
  1137. if (IS_ERR(nfc->regs)) {
  1138. ret = PTR_ERR(nfc->regs);
  1139. dev_err(dev, "no nfi base\n");
  1140. goto release_ecc;
  1141. }
  1142. nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
  1143. if (IS_ERR(nfc->clk.nfi_clk)) {
  1144. dev_err(dev, "no clk\n");
  1145. ret = PTR_ERR(nfc->clk.nfi_clk);
  1146. goto release_ecc;
  1147. }
  1148. nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
  1149. if (IS_ERR(nfc->clk.pad_clk)) {
  1150. dev_err(dev, "no pad clk\n");
  1151. ret = PTR_ERR(nfc->clk.pad_clk);
  1152. goto release_ecc;
  1153. }
  1154. ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  1155. if (ret)
  1156. goto release_ecc;
  1157. irq = platform_get_irq(pdev, 0);
  1158. if (irq < 0) {
  1159. dev_err(dev, "no nfi irq resource\n");
  1160. ret = -EINVAL;
  1161. goto clk_disable;
  1162. }
  1163. ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
  1164. if (ret) {
  1165. dev_err(dev, "failed to request nfi irq\n");
  1166. goto clk_disable;
  1167. }
  1168. ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  1169. if (ret) {
  1170. dev_err(dev, "failed to set dma mask\n");
  1171. goto clk_disable;
  1172. }
  1173. platform_set_drvdata(pdev, nfc);
  1174. ret = mtk_nfc_nand_chips_init(dev, nfc);
  1175. if (ret) {
  1176. dev_err(dev, "failed to init nand chips\n");
  1177. goto clk_disable;
  1178. }
  1179. return 0;
  1180. clk_disable:
  1181. mtk_nfc_disable_clk(&nfc->clk);
  1182. release_ecc:
  1183. mtk_ecc_release(nfc->ecc);
  1184. return ret;
  1185. }
  1186. static int mtk_nfc_remove(struct platform_device *pdev)
  1187. {
  1188. struct mtk_nfc *nfc = platform_get_drvdata(pdev);
  1189. struct mtk_nfc_nand_chip *chip;
  1190. while (!list_empty(&nfc->chips)) {
  1191. chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
  1192. node);
  1193. nand_release(nand_to_mtd(&chip->nand));
  1194. list_del(&chip->node);
  1195. }
  1196. mtk_ecc_release(nfc->ecc);
  1197. mtk_nfc_disable_clk(&nfc->clk);
  1198. return 0;
  1199. }
  1200. #ifdef CONFIG_PM_SLEEP
  1201. static int mtk_nfc_suspend(struct device *dev)
  1202. {
  1203. struct mtk_nfc *nfc = dev_get_drvdata(dev);
  1204. mtk_nfc_disable_clk(&nfc->clk);
  1205. return 0;
  1206. }
  1207. static int mtk_nfc_resume(struct device *dev)
  1208. {
  1209. struct mtk_nfc *nfc = dev_get_drvdata(dev);
  1210. struct mtk_nfc_nand_chip *chip;
  1211. struct nand_chip *nand;
  1212. struct mtd_info *mtd;
  1213. int ret;
  1214. u32 i;
  1215. udelay(200);
  1216. ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  1217. if (ret)
  1218. return ret;
  1219. mtk_nfc_hw_init(nfc);
  1220. /* reset NAND chip if VCC was powered off */
  1221. list_for_each_entry(chip, &nfc->chips, node) {
  1222. nand = &chip->nand;
  1223. mtd = nand_to_mtd(nand);
  1224. for (i = 0; i < chip->nsels; i++) {
  1225. nand->select_chip(mtd, i);
  1226. nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  1227. }
  1228. }
  1229. return 0;
  1230. }
  1231. static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
  1232. #endif
  1233. static const struct of_device_id mtk_nfc_id_table[] = {
  1234. { .compatible = "mediatek,mt2701-nfc" },
  1235. {}
  1236. };
  1237. MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
  1238. static struct platform_driver mtk_nfc_driver = {
  1239. .probe = mtk_nfc_probe,
  1240. .remove = mtk_nfc_remove,
  1241. .driver = {
  1242. .name = MTK_NAME,
  1243. .of_match_table = mtk_nfc_id_table,
  1244. #ifdef CONFIG_PM_SLEEP
  1245. .pm = &mtk_nfc_pm_ops,
  1246. #endif
  1247. },
  1248. };
  1249. module_platform_driver(mtk_nfc_driver);
  1250. MODULE_LICENSE("GPL");
  1251. MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
  1252. MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");