qcom_nandc.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/slab.h>
  15. #include <linux/bitops.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/delay.h>
  24. /* NANDc reg offsets */
  25. #define NAND_FLASH_CMD 0x00
  26. #define NAND_ADDR0 0x04
  27. #define NAND_ADDR1 0x08
  28. #define NAND_FLASH_CHIP_SELECT 0x0c
  29. #define NAND_EXEC_CMD 0x10
  30. #define NAND_FLASH_STATUS 0x14
  31. #define NAND_BUFFER_STATUS 0x18
  32. #define NAND_DEV0_CFG0 0x20
  33. #define NAND_DEV0_CFG1 0x24
  34. #define NAND_DEV0_ECC_CFG 0x28
  35. #define NAND_DEV1_ECC_CFG 0x2c
  36. #define NAND_DEV1_CFG0 0x30
  37. #define NAND_DEV1_CFG1 0x34
  38. #define NAND_READ_ID 0x40
  39. #define NAND_READ_STATUS 0x44
  40. #define NAND_DEV_CMD0 0xa0
  41. #define NAND_DEV_CMD1 0xa4
  42. #define NAND_DEV_CMD2 0xa8
  43. #define NAND_DEV_CMD_VLD 0xac
  44. #define SFLASHC_BURST_CFG 0xe0
  45. #define NAND_ERASED_CW_DETECT_CFG 0xe8
  46. #define NAND_ERASED_CW_DETECT_STATUS 0xec
  47. #define NAND_EBI2_ECC_BUF_CFG 0xf0
  48. #define FLASH_BUF_ACC 0x100
  49. #define NAND_CTRL 0xf00
  50. #define NAND_VERSION 0xf08
  51. #define NAND_READ_LOCATION_0 0xf20
  52. #define NAND_READ_LOCATION_1 0xf24
  53. /* dummy register offsets, used by write_reg_dma */
  54. #define NAND_DEV_CMD1_RESTORE 0xdead
  55. #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
  56. /* NAND_FLASH_CMD bits */
  57. #define PAGE_ACC BIT(4)
  58. #define LAST_PAGE BIT(5)
  59. /* NAND_FLASH_CHIP_SELECT bits */
  60. #define NAND_DEV_SEL 0
  61. #define DM_EN BIT(2)
  62. /* NAND_FLASH_STATUS bits */
  63. #define FS_OP_ERR BIT(4)
  64. #define FS_READY_BSY_N BIT(5)
  65. #define FS_MPU_ERR BIT(8)
  66. #define FS_DEVICE_STS_ERR BIT(16)
  67. #define FS_DEVICE_WP BIT(23)
  68. /* NAND_BUFFER_STATUS bits */
  69. #define BS_UNCORRECTABLE_BIT BIT(8)
  70. #define BS_CORRECTABLE_ERR_MSK 0x1f
  71. /* NAND_DEVn_CFG0 bits */
  72. #define DISABLE_STATUS_AFTER_WRITE 4
  73. #define CW_PER_PAGE 6
  74. #define UD_SIZE_BYTES 9
  75. #define ECC_PARITY_SIZE_BYTES_RS 19
  76. #define SPARE_SIZE_BYTES 23
  77. #define NUM_ADDR_CYCLES 27
  78. #define STATUS_BFR_READ 30
  79. #define SET_RD_MODE_AFTER_STATUS 31
  80. /* NAND_DEVn_CFG0 bits */
  81. #define DEV0_CFG1_ECC_DISABLE 0
  82. #define WIDE_FLASH 1
  83. #define NAND_RECOVERY_CYCLES 2
  84. #define CS_ACTIVE_BSY 5
  85. #define BAD_BLOCK_BYTE_NUM 6
  86. #define BAD_BLOCK_IN_SPARE_AREA 16
  87. #define WR_RD_BSY_GAP 17
  88. #define ENABLE_BCH_ECC 27
  89. /* NAND_DEV0_ECC_CFG bits */
  90. #define ECC_CFG_ECC_DISABLE 0
  91. #define ECC_SW_RESET 1
  92. #define ECC_MODE 4
  93. #define ECC_PARITY_SIZE_BYTES_BCH 8
  94. #define ECC_NUM_DATA_BYTES 16
  95. #define ECC_FORCE_CLK_OPEN 30
  96. /* NAND_DEV_CMD1 bits */
  97. #define READ_ADDR 0
  98. /* NAND_DEV_CMD_VLD bits */
  99. #define READ_START_VLD BIT(0)
  100. #define READ_STOP_VLD BIT(1)
  101. #define WRITE_START_VLD BIT(2)
  102. #define ERASE_START_VLD BIT(3)
  103. #define SEQ_READ_START_VLD BIT(4)
  104. /* NAND_EBI2_ECC_BUF_CFG bits */
  105. #define NUM_STEPS 0
  106. /* NAND_ERASED_CW_DETECT_CFG bits */
  107. #define ERASED_CW_ECC_MASK 1
  108. #define AUTO_DETECT_RES 0
  109. #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
  110. #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
  111. #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
  112. #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
  113. #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
  114. /* NAND_ERASED_CW_DETECT_STATUS bits */
  115. #define PAGE_ALL_ERASED BIT(7)
  116. #define CODEWORD_ALL_ERASED BIT(6)
  117. #define PAGE_ERASED BIT(5)
  118. #define CODEWORD_ERASED BIT(4)
  119. #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
  120. #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
  121. /* Version Mask */
  122. #define NAND_VERSION_MAJOR_MASK 0xf0000000
  123. #define NAND_VERSION_MAJOR_SHIFT 28
  124. #define NAND_VERSION_MINOR_MASK 0x0fff0000
  125. #define NAND_VERSION_MINOR_SHIFT 16
  126. /* NAND OP_CMDs */
  127. #define PAGE_READ 0x2
  128. #define PAGE_READ_WITH_ECC 0x3
  129. #define PAGE_READ_WITH_ECC_SPARE 0x4
  130. #define PROGRAM_PAGE 0x6
  131. #define PAGE_PROGRAM_WITH_ECC 0x7
  132. #define PROGRAM_PAGE_SPARE 0x9
  133. #define BLOCK_ERASE 0xa
  134. #define FETCH_ID 0xb
  135. #define RESET_DEVICE 0xd
  136. /* Default Value for NAND_DEV_CMD_VLD */
  137. #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
  138. ERASE_START_VLD | SEQ_READ_START_VLD)
  139. /*
  140. * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  141. * the driver calls the chunks 'step' or 'codeword' interchangeably
  142. */
  143. #define NANDC_STEP_SIZE 512
  144. /*
  145. * the largest page size we support is 8K, this will have 16 steps/codewords
  146. * of 512 bytes each
  147. */
  148. #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
  149. /* we read at most 3 registers per codeword scan */
  150. #define MAX_REG_RD (3 * MAX_NUM_STEPS)
  151. /* ECC modes supported by the controller */
  152. #define ECC_NONE BIT(0)
  153. #define ECC_RS_4BIT BIT(1)
  154. #define ECC_BCH_4BIT BIT(2)
  155. #define ECC_BCH_8BIT BIT(3)
  156. struct desc_info {
  157. struct list_head node;
  158. enum dma_data_direction dir;
  159. struct scatterlist sgl;
  160. struct dma_async_tx_descriptor *dma_desc;
  161. };
  162. /*
  163. * holds the current register values that we want to write. acts as a contiguous
  164. * chunk of memory which we use to write the controller registers through DMA.
  165. */
  166. struct nandc_regs {
  167. __le32 cmd;
  168. __le32 addr0;
  169. __le32 addr1;
  170. __le32 chip_sel;
  171. __le32 exec;
  172. __le32 cfg0;
  173. __le32 cfg1;
  174. __le32 ecc_bch_cfg;
  175. __le32 clrflashstatus;
  176. __le32 clrreadstatus;
  177. __le32 cmd1;
  178. __le32 vld;
  179. __le32 orig_cmd1;
  180. __le32 orig_vld;
  181. __le32 ecc_buf_cfg;
  182. };
  183. /*
  184. * NAND controller data struct
  185. *
  186. * @controller: base controller structure
  187. * @host_list: list containing all the chips attached to the
  188. * controller
  189. * @dev: parent device
  190. * @base: MMIO base
  191. * @base_dma: physical base address of controller registers
  192. * @core_clk: controller clock
  193. * @aon_clk: another controller clock
  194. *
  195. * @chan: dma channel
  196. * @cmd_crci: ADM DMA CRCI for command flow control
  197. * @data_crci: ADM DMA CRCI for data flow control
  198. * @desc_list: DMA descriptor list (list of desc_infos)
  199. *
  200. * @data_buffer: our local DMA buffer for page read/writes,
  201. * used when we can't use the buffer provided
  202. * by upper layers directly
  203. * @buf_size/count/start: markers for chip->read_buf/write_buf functions
  204. * @reg_read_buf: local buffer for reading back registers via DMA
  205. * @reg_read_pos: marker for data read in reg_read_buf
  206. *
  207. * @regs: a contiguous chunk of memory for DMA register
  208. * writes. contains the register values to be
  209. * written to controller
  210. * @cmd1/vld: some fixed controller register values
  211. * @ecc_modes: supported ECC modes by the current controller,
  212. * initialized via DT match data
  213. */
  214. struct qcom_nand_controller {
  215. struct nand_hw_control controller;
  216. struct list_head host_list;
  217. struct device *dev;
  218. void __iomem *base;
  219. dma_addr_t base_dma;
  220. struct clk *core_clk;
  221. struct clk *aon_clk;
  222. struct dma_chan *chan;
  223. unsigned int cmd_crci;
  224. unsigned int data_crci;
  225. struct list_head desc_list;
  226. u8 *data_buffer;
  227. int buf_size;
  228. int buf_count;
  229. int buf_start;
  230. __le32 *reg_read_buf;
  231. int reg_read_pos;
  232. struct nandc_regs *regs;
  233. u32 cmd1, vld;
  234. u32 ecc_modes;
  235. };
  236. /*
  237. * NAND chip structure
  238. *
  239. * @chip: base NAND chip structure
  240. * @node: list node to add itself to host_list in
  241. * qcom_nand_controller
  242. *
  243. * @cs: chip select value for this chip
  244. * @cw_size: the number of bytes in a single step/codeword
  245. * of a page, consisting of all data, ecc, spare
  246. * and reserved bytes
  247. * @cw_data: the number of bytes within a codeword protected
  248. * by ECC
  249. * @use_ecc: request the controller to use ECC for the
  250. * upcoming read/write
  251. * @bch_enabled: flag to tell whether BCH ECC mode is used
  252. * @ecc_bytes_hw: ECC bytes used by controller hardware for this
  253. * chip
  254. * @status: value to be returned if NAND_CMD_STATUS command
  255. * is executed
  256. * @last_command: keeps track of last command on this chip. used
  257. * for reading correct status
  258. *
  259. * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
  260. * ecc/non-ecc mode for the current nand flash
  261. * device
  262. */
  263. struct qcom_nand_host {
  264. struct nand_chip chip;
  265. struct list_head node;
  266. int cs;
  267. int cw_size;
  268. int cw_data;
  269. bool use_ecc;
  270. bool bch_enabled;
  271. int ecc_bytes_hw;
  272. int spare_bytes;
  273. int bbm_size;
  274. u8 status;
  275. int last_command;
  276. u32 cfg0, cfg1;
  277. u32 cfg0_raw, cfg1_raw;
  278. u32 ecc_buf_cfg;
  279. u32 ecc_bch_cfg;
  280. u32 clrflashstatus;
  281. u32 clrreadstatus;
  282. };
  283. static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
  284. {
  285. return container_of(chip, struct qcom_nand_host, chip);
  286. }
  287. static inline struct qcom_nand_controller *
  288. get_qcom_nand_controller(struct nand_chip *chip)
  289. {
  290. return container_of(chip->controller, struct qcom_nand_controller,
  291. controller);
  292. }
  293. static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
  294. {
  295. return ioread32(nandc->base + offset);
  296. }
  297. static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
  298. u32 val)
  299. {
  300. iowrite32(val, nandc->base + offset);
  301. }
  302. static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
  303. {
  304. switch (offset) {
  305. case NAND_FLASH_CMD:
  306. return &regs->cmd;
  307. case NAND_ADDR0:
  308. return &regs->addr0;
  309. case NAND_ADDR1:
  310. return &regs->addr1;
  311. case NAND_FLASH_CHIP_SELECT:
  312. return &regs->chip_sel;
  313. case NAND_EXEC_CMD:
  314. return &regs->exec;
  315. case NAND_FLASH_STATUS:
  316. return &regs->clrflashstatus;
  317. case NAND_DEV0_CFG0:
  318. return &regs->cfg0;
  319. case NAND_DEV0_CFG1:
  320. return &regs->cfg1;
  321. case NAND_DEV0_ECC_CFG:
  322. return &regs->ecc_bch_cfg;
  323. case NAND_READ_STATUS:
  324. return &regs->clrreadstatus;
  325. case NAND_DEV_CMD1:
  326. return &regs->cmd1;
  327. case NAND_DEV_CMD1_RESTORE:
  328. return &regs->orig_cmd1;
  329. case NAND_DEV_CMD_VLD:
  330. return &regs->vld;
  331. case NAND_DEV_CMD_VLD_RESTORE:
  332. return &regs->orig_vld;
  333. case NAND_EBI2_ECC_BUF_CFG:
  334. return &regs->ecc_buf_cfg;
  335. default:
  336. return NULL;
  337. }
  338. }
  339. static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
  340. u32 val)
  341. {
  342. struct nandc_regs *regs = nandc->regs;
  343. __le32 *reg;
  344. reg = offset_to_nandc_reg(regs, offset);
  345. if (reg)
  346. *reg = cpu_to_le32(val);
  347. }
  348. /* helper to configure address register values */
  349. static void set_address(struct qcom_nand_host *host, u16 column, int page)
  350. {
  351. struct nand_chip *chip = &host->chip;
  352. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  353. if (chip->options & NAND_BUSWIDTH_16)
  354. column >>= 1;
  355. nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
  356. nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
  357. }
  358. /*
  359. * update_rw_regs: set up read/write register values, these will be
  360. * written to the NAND controller registers via DMA
  361. *
  362. * @num_cw: number of steps for the read/write operation
  363. * @read: read or write operation
  364. */
  365. static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
  366. {
  367. struct nand_chip *chip = &host->chip;
  368. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  369. u32 cmd, cfg0, cfg1, ecc_bch_cfg;
  370. if (read) {
  371. if (host->use_ecc)
  372. cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
  373. else
  374. cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
  375. } else {
  376. cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
  377. }
  378. if (host->use_ecc) {
  379. cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
  380. (num_cw - 1) << CW_PER_PAGE;
  381. cfg1 = host->cfg1;
  382. ecc_bch_cfg = host->ecc_bch_cfg;
  383. } else {
  384. cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
  385. (num_cw - 1) << CW_PER_PAGE;
  386. cfg1 = host->cfg1_raw;
  387. ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  388. }
  389. nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
  390. nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
  391. nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
  392. nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
  393. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
  394. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  395. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  396. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  397. }
  398. static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
  399. int reg_off, const void *vaddr, int size,
  400. bool flow_control)
  401. {
  402. struct desc_info *desc;
  403. struct dma_async_tx_descriptor *dma_desc;
  404. struct scatterlist *sgl;
  405. struct dma_slave_config slave_conf;
  406. enum dma_transfer_direction dir_eng;
  407. int ret;
  408. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  409. if (!desc)
  410. return -ENOMEM;
  411. sgl = &desc->sgl;
  412. sg_init_one(sgl, vaddr, size);
  413. if (read) {
  414. dir_eng = DMA_DEV_TO_MEM;
  415. desc->dir = DMA_FROM_DEVICE;
  416. } else {
  417. dir_eng = DMA_MEM_TO_DEV;
  418. desc->dir = DMA_TO_DEVICE;
  419. }
  420. ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
  421. if (ret == 0) {
  422. ret = -ENOMEM;
  423. goto err;
  424. }
  425. memset(&slave_conf, 0x00, sizeof(slave_conf));
  426. slave_conf.device_fc = flow_control;
  427. if (read) {
  428. slave_conf.src_maxburst = 16;
  429. slave_conf.src_addr = nandc->base_dma + reg_off;
  430. slave_conf.slave_id = nandc->data_crci;
  431. } else {
  432. slave_conf.dst_maxburst = 16;
  433. slave_conf.dst_addr = nandc->base_dma + reg_off;
  434. slave_conf.slave_id = nandc->cmd_crci;
  435. }
  436. ret = dmaengine_slave_config(nandc->chan, &slave_conf);
  437. if (ret) {
  438. dev_err(nandc->dev, "failed to configure dma channel\n");
  439. goto err;
  440. }
  441. dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
  442. if (!dma_desc) {
  443. dev_err(nandc->dev, "failed to prepare desc\n");
  444. ret = -EINVAL;
  445. goto err;
  446. }
  447. desc->dma_desc = dma_desc;
  448. list_add_tail(&desc->node, &nandc->desc_list);
  449. return 0;
  450. err:
  451. kfree(desc);
  452. return ret;
  453. }
  454. /*
  455. * read_reg_dma: prepares a descriptor to read a given number of
  456. * contiguous registers to the reg_read_buf pointer
  457. *
  458. * @first: offset of the first register in the contiguous block
  459. * @num_regs: number of registers to read
  460. */
  461. static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
  462. int num_regs)
  463. {
  464. bool flow_control = false;
  465. void *vaddr;
  466. int size;
  467. if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  468. flow_control = true;
  469. size = num_regs * sizeof(u32);
  470. vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  471. nandc->reg_read_pos += num_regs;
  472. return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
  473. }
  474. /*
  475. * write_reg_dma: prepares a descriptor to write a given number of
  476. * contiguous registers
  477. *
  478. * @first: offset of the first register in the contiguous block
  479. * @num_regs: number of registers to write
  480. */
  481. static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
  482. int num_regs)
  483. {
  484. bool flow_control = false;
  485. struct nandc_regs *regs = nandc->regs;
  486. void *vaddr;
  487. int size;
  488. vaddr = offset_to_nandc_reg(regs, first);
  489. if (first == NAND_FLASH_CMD)
  490. flow_control = true;
  491. if (first == NAND_DEV_CMD1_RESTORE)
  492. first = NAND_DEV_CMD1;
  493. if (first == NAND_DEV_CMD_VLD_RESTORE)
  494. first = NAND_DEV_CMD_VLD;
  495. size = num_regs * sizeof(u32);
  496. return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
  497. }
  498. /*
  499. * read_data_dma: prepares a DMA descriptor to transfer data from the
  500. * controller's internal buffer to the buffer 'vaddr'
  501. *
  502. * @reg_off: offset within the controller's data buffer
  503. * @vaddr: virtual address of the buffer we want to write to
  504. * @size: DMA transaction size in bytes
  505. */
  506. static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  507. const u8 *vaddr, int size)
  508. {
  509. return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
  510. }
  511. /*
  512. * write_data_dma: prepares a DMA descriptor to transfer data from
  513. * 'vaddr' to the controller's internal buffer
  514. *
  515. * @reg_off: offset within the controller's data buffer
  516. * @vaddr: virtual address of the buffer we want to read from
  517. * @size: DMA transaction size in bytes
  518. */
  519. static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
  520. const u8 *vaddr, int size)
  521. {
  522. return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
  523. }
  524. /*
  525. * helper to prepare dma descriptors to configure registers needed for reading a
  526. * codeword/step in a page
  527. */
  528. static void config_cw_read(struct qcom_nand_controller *nandc)
  529. {
  530. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  531. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  532. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  533. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  534. read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
  535. read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
  536. }
  537. /*
  538. * helpers to prepare dma descriptors used to configure registers needed for
  539. * writing a codeword/step in a page
  540. */
  541. static void config_cw_write_pre(struct qcom_nand_controller *nandc)
  542. {
  543. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  544. write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
  545. write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
  546. }
  547. static void config_cw_write_post(struct qcom_nand_controller *nandc)
  548. {
  549. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  550. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  551. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  552. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  553. }
  554. /*
  555. * the following functions are used within chip->cmdfunc() to perform different
  556. * NAND_CMD_* commands
  557. */
  558. /* sets up descriptors for NAND_CMD_PARAM */
  559. static int nandc_param(struct qcom_nand_host *host)
  560. {
  561. struct nand_chip *chip = &host->chip;
  562. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  563. /*
  564. * NAND_CMD_PARAM is called before we know much about the FLASH chip
  565. * in use. we configure the controller to perform a raw read of 512
  566. * bytes to read onfi params
  567. */
  568. nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
  569. nandc_set_reg(nandc, NAND_ADDR0, 0);
  570. nandc_set_reg(nandc, NAND_ADDR1, 0);
  571. nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
  572. | 512 << UD_SIZE_BYTES
  573. | 5 << NUM_ADDR_CYCLES
  574. | 0 << SPARE_SIZE_BYTES);
  575. nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
  576. | 0 << CS_ACTIVE_BSY
  577. | 17 << BAD_BLOCK_BYTE_NUM
  578. | 1 << BAD_BLOCK_IN_SPARE_AREA
  579. | 2 << WR_RD_BSY_GAP
  580. | 0 << WIDE_FLASH
  581. | 1 << DEV0_CFG1_ECC_DISABLE);
  582. nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
  583. /* configure CMD1 and VLD for ONFI param probing */
  584. nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
  585. (nandc->vld & ~READ_START_VLD));
  586. nandc_set_reg(nandc, NAND_DEV_CMD1,
  587. (nandc->cmd1 & ~(0xFF << READ_ADDR))
  588. | NAND_CMD_PARAM << READ_ADDR);
  589. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  590. nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
  591. nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
  592. write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
  593. write_reg_dma(nandc, NAND_DEV_CMD1, 1);
  594. nandc->buf_count = 512;
  595. memset(nandc->data_buffer, 0xff, nandc->buf_count);
  596. config_cw_read(nandc);
  597. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  598. nandc->buf_count);
  599. /* restore CMD1 and VLD regs */
  600. write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
  601. write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
  602. return 0;
  603. }
  604. /* sets up descriptors for NAND_CMD_ERASE1 */
  605. static int erase_block(struct qcom_nand_host *host, int page_addr)
  606. {
  607. struct nand_chip *chip = &host->chip;
  608. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  609. nandc_set_reg(nandc, NAND_FLASH_CMD,
  610. BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
  611. nandc_set_reg(nandc, NAND_ADDR0, page_addr);
  612. nandc_set_reg(nandc, NAND_ADDR1, 0);
  613. nandc_set_reg(nandc, NAND_DEV0_CFG0,
  614. host->cfg0_raw & ~(7 << CW_PER_PAGE));
  615. nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
  616. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  617. nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
  618. nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
  619. write_reg_dma(nandc, NAND_FLASH_CMD, 3);
  620. write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
  621. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  622. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  623. write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  624. write_reg_dma(nandc, NAND_READ_STATUS, 1);
  625. return 0;
  626. }
  627. /* sets up descriptors for NAND_CMD_READID */
  628. static int read_id(struct qcom_nand_host *host, int column)
  629. {
  630. struct nand_chip *chip = &host->chip;
  631. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  632. if (column == -1)
  633. return 0;
  634. nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
  635. nandc_set_reg(nandc, NAND_ADDR0, column);
  636. nandc_set_reg(nandc, NAND_ADDR1, 0);
  637. nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  638. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  639. write_reg_dma(nandc, NAND_FLASH_CMD, 4);
  640. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  641. read_reg_dma(nandc, NAND_READ_ID, 1);
  642. return 0;
  643. }
  644. /* sets up descriptors for NAND_CMD_RESET */
  645. static int reset(struct qcom_nand_host *host)
  646. {
  647. struct nand_chip *chip = &host->chip;
  648. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  649. nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
  650. nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
  651. write_reg_dma(nandc, NAND_FLASH_CMD, 1);
  652. write_reg_dma(nandc, NAND_EXEC_CMD, 1);
  653. read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
  654. return 0;
  655. }
  656. /* helpers to submit/free our list of dma descriptors */
  657. static int submit_descs(struct qcom_nand_controller *nandc)
  658. {
  659. struct desc_info *desc;
  660. dma_cookie_t cookie = 0;
  661. list_for_each_entry(desc, &nandc->desc_list, node)
  662. cookie = dmaengine_submit(desc->dma_desc);
  663. if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
  664. return -ETIMEDOUT;
  665. return 0;
  666. }
  667. static void free_descs(struct qcom_nand_controller *nandc)
  668. {
  669. struct desc_info *desc, *n;
  670. list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
  671. list_del(&desc->node);
  672. dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
  673. kfree(desc);
  674. }
  675. }
  676. /* reset the register read buffer for next NAND operation */
  677. static void clear_read_regs(struct qcom_nand_controller *nandc)
  678. {
  679. nandc->reg_read_pos = 0;
  680. memset(nandc->reg_read_buf, 0,
  681. MAX_REG_RD * sizeof(*nandc->reg_read_buf));
  682. }
  683. static void pre_command(struct qcom_nand_host *host, int command)
  684. {
  685. struct nand_chip *chip = &host->chip;
  686. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  687. nandc->buf_count = 0;
  688. nandc->buf_start = 0;
  689. host->use_ecc = false;
  690. host->last_command = command;
  691. clear_read_regs(nandc);
  692. }
  693. /*
  694. * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
  695. * privately maintained status byte, this status byte can be read after
  696. * NAND_CMD_STATUS is called
  697. */
  698. static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
  699. {
  700. struct nand_chip *chip = &host->chip;
  701. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  702. struct nand_ecc_ctrl *ecc = &chip->ecc;
  703. int num_cw;
  704. int i;
  705. num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
  706. for (i = 0; i < num_cw; i++) {
  707. u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
  708. if (flash_status & FS_MPU_ERR)
  709. host->status &= ~NAND_STATUS_WP;
  710. if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
  711. (flash_status &
  712. FS_DEVICE_STS_ERR)))
  713. host->status |= NAND_STATUS_FAIL;
  714. }
  715. }
  716. static void post_command(struct qcom_nand_host *host, int command)
  717. {
  718. struct nand_chip *chip = &host->chip;
  719. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  720. switch (command) {
  721. case NAND_CMD_READID:
  722. memcpy(nandc->data_buffer, nandc->reg_read_buf,
  723. nandc->buf_count);
  724. break;
  725. case NAND_CMD_PAGEPROG:
  726. case NAND_CMD_ERASE1:
  727. parse_erase_write_errors(host, command);
  728. break;
  729. default:
  730. break;
  731. }
  732. }
  733. /*
  734. * Implements chip->cmdfunc. It's only used for a limited set of commands.
  735. * The rest of the commands wouldn't be called by upper layers. For example,
  736. * NAND_CMD_READOOB would never be called because we have our own versions
  737. * of read_oob ops for nand_ecc_ctrl.
  738. */
  739. static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
  740. int column, int page_addr)
  741. {
  742. struct nand_chip *chip = mtd_to_nand(mtd);
  743. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  744. struct nand_ecc_ctrl *ecc = &chip->ecc;
  745. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  746. bool wait = false;
  747. int ret = 0;
  748. pre_command(host, command);
  749. switch (command) {
  750. case NAND_CMD_RESET:
  751. ret = reset(host);
  752. wait = true;
  753. break;
  754. case NAND_CMD_READID:
  755. nandc->buf_count = 4;
  756. ret = read_id(host, column);
  757. wait = true;
  758. break;
  759. case NAND_CMD_PARAM:
  760. ret = nandc_param(host);
  761. wait = true;
  762. break;
  763. case NAND_CMD_ERASE1:
  764. ret = erase_block(host, page_addr);
  765. wait = true;
  766. break;
  767. case NAND_CMD_READ0:
  768. /* we read the entire page for now */
  769. WARN_ON(column != 0);
  770. host->use_ecc = true;
  771. set_address(host, 0, page_addr);
  772. update_rw_regs(host, ecc->steps, true);
  773. break;
  774. case NAND_CMD_SEQIN:
  775. WARN_ON(column != 0);
  776. set_address(host, 0, page_addr);
  777. break;
  778. case NAND_CMD_PAGEPROG:
  779. case NAND_CMD_STATUS:
  780. case NAND_CMD_NONE:
  781. default:
  782. break;
  783. }
  784. if (ret) {
  785. dev_err(nandc->dev, "failure executing command %d\n",
  786. command);
  787. free_descs(nandc);
  788. return;
  789. }
  790. if (wait) {
  791. ret = submit_descs(nandc);
  792. if (ret)
  793. dev_err(nandc->dev,
  794. "failure submitting descs for command %d\n",
  795. command);
  796. }
  797. free_descs(nandc);
  798. post_command(host, command);
  799. }
  800. /*
  801. * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
  802. * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
  803. *
  804. * when using RS ECC, the HW reports the same erros when reading an erased CW,
  805. * but it notifies that it is an erased CW by placing special characters at
  806. * certain offsets in the buffer.
  807. *
  808. * verify if the page is erased or not, and fix up the page for RS ECC by
  809. * replacing the special characters with 0xff.
  810. */
  811. static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
  812. {
  813. u8 empty1, empty2;
  814. /*
  815. * an erased page flags an error in NAND_FLASH_STATUS, check if the page
  816. * is erased by looking for 0x54s at offsets 3 and 175 from the
  817. * beginning of each codeword
  818. */
  819. empty1 = data_buf[3];
  820. empty2 = data_buf[175];
  821. /*
  822. * if the erased codework markers, if they exist override them with
  823. * 0xffs
  824. */
  825. if ((empty1 == 0x54 && empty2 == 0xff) ||
  826. (empty1 == 0xff && empty2 == 0x54)) {
  827. data_buf[3] = 0xff;
  828. data_buf[175] = 0xff;
  829. }
  830. /*
  831. * check if the entire chunk contains 0xffs or not. if it doesn't, then
  832. * restore the original values at the special offsets
  833. */
  834. if (memchr_inv(data_buf, 0xff, data_len)) {
  835. data_buf[3] = empty1;
  836. data_buf[175] = empty2;
  837. return false;
  838. }
  839. return true;
  840. }
  841. struct read_stats {
  842. __le32 flash;
  843. __le32 buffer;
  844. __le32 erased_cw;
  845. };
  846. /*
  847. * reads back status registers set by the controller to notify page read
  848. * errors. this is equivalent to what 'ecc->correct()' would do.
  849. */
  850. static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
  851. u8 *oob_buf)
  852. {
  853. struct nand_chip *chip = &host->chip;
  854. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  855. struct mtd_info *mtd = nand_to_mtd(chip);
  856. struct nand_ecc_ctrl *ecc = &chip->ecc;
  857. unsigned int max_bitflips = 0;
  858. struct read_stats *buf;
  859. int i;
  860. buf = (struct read_stats *)nandc->reg_read_buf;
  861. for (i = 0; i < ecc->steps; i++, buf++) {
  862. u32 flash, buffer, erased_cw;
  863. int data_len, oob_len;
  864. if (i == (ecc->steps - 1)) {
  865. data_len = ecc->size - ((ecc->steps - 1) << 2);
  866. oob_len = ecc->steps << 2;
  867. } else {
  868. data_len = host->cw_data;
  869. oob_len = 0;
  870. }
  871. flash = le32_to_cpu(buf->flash);
  872. buffer = le32_to_cpu(buf->buffer);
  873. erased_cw = le32_to_cpu(buf->erased_cw);
  874. if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
  875. bool erased;
  876. /* ignore erased codeword errors */
  877. if (host->bch_enabled) {
  878. erased = (erased_cw & ERASED_CW) == ERASED_CW ?
  879. true : false;
  880. } else {
  881. erased = erased_chunk_check_and_fixup(data_buf,
  882. data_len);
  883. }
  884. if (erased) {
  885. data_buf += data_len;
  886. if (oob_buf)
  887. oob_buf += oob_len + ecc->bytes;
  888. continue;
  889. }
  890. if (buffer & BS_UNCORRECTABLE_BIT) {
  891. int ret, ecclen, extraooblen;
  892. void *eccbuf;
  893. eccbuf = oob_buf ? oob_buf + oob_len : NULL;
  894. ecclen = oob_buf ? host->ecc_bytes_hw : 0;
  895. extraooblen = oob_buf ? oob_len : 0;
  896. /*
  897. * make sure it isn't an erased page reported
  898. * as not-erased by HW because of a few bitflips
  899. */
  900. ret = nand_check_erased_ecc_chunk(data_buf,
  901. data_len, eccbuf, ecclen, oob_buf,
  902. extraooblen, ecc->strength);
  903. if (ret < 0) {
  904. mtd->ecc_stats.failed++;
  905. } else {
  906. mtd->ecc_stats.corrected += ret;
  907. max_bitflips =
  908. max_t(unsigned int, max_bitflips, ret);
  909. }
  910. }
  911. } else {
  912. unsigned int stat;
  913. stat = buffer & BS_CORRECTABLE_ERR_MSK;
  914. mtd->ecc_stats.corrected += stat;
  915. max_bitflips = max(max_bitflips, stat);
  916. }
  917. data_buf += data_len;
  918. if (oob_buf)
  919. oob_buf += oob_len + ecc->bytes;
  920. }
  921. return max_bitflips;
  922. }
  923. /*
  924. * helper to perform the actual page read operation, used by ecc->read_page(),
  925. * ecc->read_oob()
  926. */
  927. static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
  928. u8 *oob_buf)
  929. {
  930. struct nand_chip *chip = &host->chip;
  931. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  932. struct nand_ecc_ctrl *ecc = &chip->ecc;
  933. int i, ret;
  934. /* queue cmd descs for each codeword */
  935. for (i = 0; i < ecc->steps; i++) {
  936. int data_size, oob_size;
  937. if (i == (ecc->steps - 1)) {
  938. data_size = ecc->size - ((ecc->steps - 1) << 2);
  939. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  940. host->spare_bytes;
  941. } else {
  942. data_size = host->cw_data;
  943. oob_size = host->ecc_bytes_hw + host->spare_bytes;
  944. }
  945. config_cw_read(nandc);
  946. if (data_buf)
  947. read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
  948. data_size);
  949. /*
  950. * when ecc is enabled, the controller doesn't read the real
  951. * or dummy bad block markers in each chunk. To maintain a
  952. * consistent layout across RAW and ECC reads, we just
  953. * leave the real/dummy BBM offsets empty (i.e, filled with
  954. * 0xffs)
  955. */
  956. if (oob_buf) {
  957. int j;
  958. for (j = 0; j < host->bbm_size; j++)
  959. *oob_buf++ = 0xff;
  960. read_data_dma(nandc, FLASH_BUF_ACC + data_size,
  961. oob_buf, oob_size);
  962. }
  963. if (data_buf)
  964. data_buf += data_size;
  965. if (oob_buf)
  966. oob_buf += oob_size;
  967. }
  968. ret = submit_descs(nandc);
  969. if (ret)
  970. dev_err(nandc->dev, "failure to read page/oob\n");
  971. free_descs(nandc);
  972. return ret;
  973. }
  974. /*
  975. * a helper that copies the last step/codeword of a page (containing free oob)
  976. * into our local buffer
  977. */
  978. static int copy_last_cw(struct qcom_nand_host *host, int page)
  979. {
  980. struct nand_chip *chip = &host->chip;
  981. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  982. struct nand_ecc_ctrl *ecc = &chip->ecc;
  983. int size;
  984. int ret;
  985. clear_read_regs(nandc);
  986. size = host->use_ecc ? host->cw_data : host->cw_size;
  987. /* prepare a clean read buffer */
  988. memset(nandc->data_buffer, 0xff, size);
  989. set_address(host, host->cw_size * (ecc->steps - 1), page);
  990. update_rw_regs(host, 1, true);
  991. config_cw_read(nandc);
  992. read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
  993. ret = submit_descs(nandc);
  994. if (ret)
  995. dev_err(nandc->dev, "failed to copy last codeword\n");
  996. free_descs(nandc);
  997. return ret;
  998. }
  999. /* implements ecc->read_page() */
  1000. static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  1001. uint8_t *buf, int oob_required, int page)
  1002. {
  1003. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1004. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1005. u8 *data_buf, *oob_buf = NULL;
  1006. int ret;
  1007. data_buf = buf;
  1008. oob_buf = oob_required ? chip->oob_poi : NULL;
  1009. ret = read_page_ecc(host, data_buf, oob_buf);
  1010. if (ret) {
  1011. dev_err(nandc->dev, "failure to read page\n");
  1012. return ret;
  1013. }
  1014. return parse_read_errors(host, data_buf, oob_buf);
  1015. }
  1016. /* implements ecc->read_page_raw() */
  1017. static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
  1018. struct nand_chip *chip, uint8_t *buf,
  1019. int oob_required, int page)
  1020. {
  1021. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1022. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1023. u8 *data_buf, *oob_buf;
  1024. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1025. int i, ret;
  1026. data_buf = buf;
  1027. oob_buf = chip->oob_poi;
  1028. host->use_ecc = false;
  1029. update_rw_regs(host, ecc->steps, true);
  1030. for (i = 0; i < ecc->steps; i++) {
  1031. int data_size1, data_size2, oob_size1, oob_size2;
  1032. int reg_off = FLASH_BUF_ACC;
  1033. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1034. oob_size1 = host->bbm_size;
  1035. if (i == (ecc->steps - 1)) {
  1036. data_size2 = ecc->size - data_size1 -
  1037. ((ecc->steps - 1) << 2);
  1038. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1039. host->spare_bytes;
  1040. } else {
  1041. data_size2 = host->cw_data - data_size1;
  1042. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1043. }
  1044. config_cw_read(nandc);
  1045. read_data_dma(nandc, reg_off, data_buf, data_size1);
  1046. reg_off += data_size1;
  1047. data_buf += data_size1;
  1048. read_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1049. reg_off += oob_size1;
  1050. oob_buf += oob_size1;
  1051. read_data_dma(nandc, reg_off, data_buf, data_size2);
  1052. reg_off += data_size2;
  1053. data_buf += data_size2;
  1054. read_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1055. oob_buf += oob_size2;
  1056. }
  1057. ret = submit_descs(nandc);
  1058. if (ret)
  1059. dev_err(nandc->dev, "failure to read raw page\n");
  1060. free_descs(nandc);
  1061. return 0;
  1062. }
  1063. /* implements ecc->read_oob() */
  1064. static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1065. int page)
  1066. {
  1067. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1068. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1069. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1070. int ret;
  1071. clear_read_regs(nandc);
  1072. host->use_ecc = true;
  1073. set_address(host, 0, page);
  1074. update_rw_regs(host, ecc->steps, true);
  1075. ret = read_page_ecc(host, NULL, chip->oob_poi);
  1076. if (ret)
  1077. dev_err(nandc->dev, "failure to read oob\n");
  1078. return ret;
  1079. }
  1080. /* implements ecc->write_page() */
  1081. static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1082. const uint8_t *buf, int oob_required, int page)
  1083. {
  1084. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1085. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1086. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1087. u8 *data_buf, *oob_buf;
  1088. int i, ret;
  1089. clear_read_regs(nandc);
  1090. data_buf = (u8 *)buf;
  1091. oob_buf = chip->oob_poi;
  1092. host->use_ecc = true;
  1093. update_rw_regs(host, ecc->steps, false);
  1094. for (i = 0; i < ecc->steps; i++) {
  1095. int data_size, oob_size;
  1096. if (i == (ecc->steps - 1)) {
  1097. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1098. oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
  1099. host->spare_bytes;
  1100. } else {
  1101. data_size = host->cw_data;
  1102. oob_size = ecc->bytes;
  1103. }
  1104. config_cw_write_pre(nandc);
  1105. write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
  1106. /*
  1107. * when ECC is enabled, we don't really need to write anything
  1108. * to oob for the first n - 1 codewords since these oob regions
  1109. * just contain ECC bytes that's written by the controller
  1110. * itself. For the last codeword, we skip the bbm positions and
  1111. * write to the free oob area.
  1112. */
  1113. if (i == (ecc->steps - 1)) {
  1114. oob_buf += host->bbm_size;
  1115. write_data_dma(nandc, FLASH_BUF_ACC + data_size,
  1116. oob_buf, oob_size);
  1117. }
  1118. config_cw_write_post(nandc);
  1119. data_buf += data_size;
  1120. oob_buf += oob_size;
  1121. }
  1122. ret = submit_descs(nandc);
  1123. if (ret)
  1124. dev_err(nandc->dev, "failure to write page\n");
  1125. free_descs(nandc);
  1126. return ret;
  1127. }
  1128. /* implements ecc->write_page_raw() */
  1129. static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
  1130. struct nand_chip *chip, const uint8_t *buf,
  1131. int oob_required, int page)
  1132. {
  1133. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1134. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1135. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1136. u8 *data_buf, *oob_buf;
  1137. int i, ret;
  1138. clear_read_regs(nandc);
  1139. data_buf = (u8 *)buf;
  1140. oob_buf = chip->oob_poi;
  1141. host->use_ecc = false;
  1142. update_rw_regs(host, ecc->steps, false);
  1143. for (i = 0; i < ecc->steps; i++) {
  1144. int data_size1, data_size2, oob_size1, oob_size2;
  1145. int reg_off = FLASH_BUF_ACC;
  1146. data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1147. oob_size1 = host->bbm_size;
  1148. if (i == (ecc->steps - 1)) {
  1149. data_size2 = ecc->size - data_size1 -
  1150. ((ecc->steps - 1) << 2);
  1151. oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
  1152. host->spare_bytes;
  1153. } else {
  1154. data_size2 = host->cw_data - data_size1;
  1155. oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
  1156. }
  1157. config_cw_write_pre(nandc);
  1158. write_data_dma(nandc, reg_off, data_buf, data_size1);
  1159. reg_off += data_size1;
  1160. data_buf += data_size1;
  1161. write_data_dma(nandc, reg_off, oob_buf, oob_size1);
  1162. reg_off += oob_size1;
  1163. oob_buf += oob_size1;
  1164. write_data_dma(nandc, reg_off, data_buf, data_size2);
  1165. reg_off += data_size2;
  1166. data_buf += data_size2;
  1167. write_data_dma(nandc, reg_off, oob_buf, oob_size2);
  1168. oob_buf += oob_size2;
  1169. config_cw_write_post(nandc);
  1170. }
  1171. ret = submit_descs(nandc);
  1172. if (ret)
  1173. dev_err(nandc->dev, "failure to write raw page\n");
  1174. free_descs(nandc);
  1175. return ret;
  1176. }
  1177. /*
  1178. * implements ecc->write_oob()
  1179. *
  1180. * the NAND controller cannot write only data or only oob within a codeword,
  1181. * since ecc is calculated for the combined codeword. we first copy the
  1182. * entire contents for the last codeword(data + oob), replace the old oob
  1183. * with the new one in chip->oob_poi, and then write the entire codeword.
  1184. * this read-copy-write operation results in a slight performance loss.
  1185. */
  1186. static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1187. int page)
  1188. {
  1189. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1190. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1191. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1192. u8 *oob = chip->oob_poi;
  1193. int data_size, oob_size;
  1194. int ret, status = 0;
  1195. host->use_ecc = true;
  1196. ret = copy_last_cw(host, page);
  1197. if (ret)
  1198. return ret;
  1199. clear_read_regs(nandc);
  1200. /* calculate the data and oob size for the last codeword/step */
  1201. data_size = ecc->size - ((ecc->steps - 1) << 2);
  1202. oob_size = mtd->oobavail;
  1203. /* override new oob content to last codeword */
  1204. mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
  1205. 0, mtd->oobavail);
  1206. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1207. update_rw_regs(host, 1, false);
  1208. config_cw_write_pre(nandc);
  1209. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
  1210. data_size + oob_size);
  1211. config_cw_write_post(nandc);
  1212. ret = submit_descs(nandc);
  1213. free_descs(nandc);
  1214. if (ret) {
  1215. dev_err(nandc->dev, "failure to write oob\n");
  1216. return -EIO;
  1217. }
  1218. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1219. status = chip->waitfunc(mtd, chip);
  1220. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1221. }
  1222. static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
  1223. {
  1224. struct nand_chip *chip = mtd_to_nand(mtd);
  1225. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1226. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1227. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1228. int page, ret, bbpos, bad = 0;
  1229. u32 flash_status;
  1230. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1231. /*
  1232. * configure registers for a raw sub page read, the address is set to
  1233. * the beginning of the last codeword, we don't care about reading ecc
  1234. * portion of oob. we just want the first few bytes from this codeword
  1235. * that contains the BBM
  1236. */
  1237. host->use_ecc = false;
  1238. ret = copy_last_cw(host, page);
  1239. if (ret)
  1240. goto err;
  1241. flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
  1242. if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  1243. dev_warn(nandc->dev, "error when trying to read BBM\n");
  1244. goto err;
  1245. }
  1246. bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
  1247. bad = nandc->data_buffer[bbpos] != 0xff;
  1248. if (chip->options & NAND_BUSWIDTH_16)
  1249. bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
  1250. err:
  1251. return bad;
  1252. }
  1253. static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1254. {
  1255. struct nand_chip *chip = mtd_to_nand(mtd);
  1256. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1257. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1258. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1259. int page, ret, status = 0;
  1260. clear_read_regs(nandc);
  1261. /*
  1262. * to mark the BBM as bad, we flash the entire last codeword with 0s.
  1263. * we don't care about the rest of the content in the codeword since
  1264. * we aren't going to use this block again
  1265. */
  1266. memset(nandc->data_buffer, 0x00, host->cw_size);
  1267. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  1268. /* prepare write */
  1269. host->use_ecc = false;
  1270. set_address(host, host->cw_size * (ecc->steps - 1), page);
  1271. update_rw_regs(host, 1, false);
  1272. config_cw_write_pre(nandc);
  1273. write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
  1274. config_cw_write_post(nandc);
  1275. ret = submit_descs(nandc);
  1276. free_descs(nandc);
  1277. if (ret) {
  1278. dev_err(nandc->dev, "failure to update BBM\n");
  1279. return -EIO;
  1280. }
  1281. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1282. status = chip->waitfunc(mtd, chip);
  1283. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1284. }
  1285. /*
  1286. * the three functions below implement chip->read_byte(), chip->read_buf()
  1287. * and chip->write_buf() respectively. these aren't used for
  1288. * reading/writing page data, they are used for smaller data like reading
  1289. * id, status etc
  1290. */
  1291. static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
  1292. {
  1293. struct nand_chip *chip = mtd_to_nand(mtd);
  1294. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1295. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1296. u8 *buf = nandc->data_buffer;
  1297. u8 ret = 0x0;
  1298. if (host->last_command == NAND_CMD_STATUS) {
  1299. ret = host->status;
  1300. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1301. return ret;
  1302. }
  1303. if (nandc->buf_start < nandc->buf_count)
  1304. ret = buf[nandc->buf_start++];
  1305. return ret;
  1306. }
  1307. static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1308. {
  1309. struct nand_chip *chip = mtd_to_nand(mtd);
  1310. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1311. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1312. memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
  1313. nandc->buf_start += real_len;
  1314. }
  1315. static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1316. int len)
  1317. {
  1318. struct nand_chip *chip = mtd_to_nand(mtd);
  1319. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1320. int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
  1321. memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
  1322. nandc->buf_start += real_len;
  1323. }
  1324. /* we support only one external chip for now */
  1325. static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
  1326. {
  1327. struct nand_chip *chip = mtd_to_nand(mtd);
  1328. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1329. if (chipnr <= 0)
  1330. return;
  1331. dev_warn(nandc->dev, "invalid chip select\n");
  1332. }
  1333. /*
  1334. * NAND controller page layout info
  1335. *
  1336. * Layout with ECC enabled:
  1337. *
  1338. * |----------------------| |---------------------------------|
  1339. * | xx.......yy| | *********xx.......yy|
  1340. * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
  1341. * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
  1342. * | xx.......yy| | *********xx.......yy|
  1343. * |----------------------| |---------------------------------|
  1344. * codeword 1,2..n-1 codeword n
  1345. * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
  1346. *
  1347. * n = Number of codewords in the page
  1348. * . = ECC bytes
  1349. * * = Spare/free bytes
  1350. * x = Unused byte(s)
  1351. * y = Reserved byte(s)
  1352. *
  1353. * 2K page: n = 4, spare = 16 bytes
  1354. * 4K page: n = 8, spare = 32 bytes
  1355. * 8K page: n = 16, spare = 64 bytes
  1356. *
  1357. * the qcom nand controller operates at a sub page/codeword level. each
  1358. * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
  1359. * the number of ECC bytes vary based on the ECC strength and the bus width.
  1360. *
  1361. * the first n - 1 codewords contains 516 bytes of user data, the remaining
  1362. * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
  1363. * both user data and spare(oobavail) bytes that sum up to 516 bytes.
  1364. *
  1365. * When we access a page with ECC enabled, the reserved bytes(s) are not
  1366. * accessible at all. When reading, we fill up these unreadable positions
  1367. * with 0xffs. When writing, the controller skips writing the inaccessible
  1368. * bytes.
  1369. *
  1370. * Layout with ECC disabled:
  1371. *
  1372. * |------------------------------| |---------------------------------------|
  1373. * | yy xx.......| | bb *********xx.......|
  1374. * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
  1375. * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
  1376. * | yy xx.......| | bb *********xx.......|
  1377. * |------------------------------| |---------------------------------------|
  1378. * codeword 1,2..n-1 codeword n
  1379. * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
  1380. *
  1381. * n = Number of codewords in the page
  1382. * . = ECC bytes
  1383. * * = Spare/free bytes
  1384. * x = Unused byte(s)
  1385. * y = Dummy Bad Bock byte(s)
  1386. * b = Real Bad Block byte(s)
  1387. * size1/size2 = function of codeword size and 'n'
  1388. *
  1389. * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
  1390. * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
  1391. * Block Markers. In the last codeword, this position contains the real BBM
  1392. *
  1393. * In order to have a consistent layout between RAW and ECC modes, we assume
  1394. * the following OOB layout arrangement:
  1395. *
  1396. * |-----------| |--------------------|
  1397. * |yyxx.......| |bb*********xx.......|
  1398. * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
  1399. * |yyxx.......| |bb*********xx.......|
  1400. * |yyxx.......| |bb*********xx.......|
  1401. * |-----------| |--------------------|
  1402. * first n - 1 nth OOB region
  1403. * OOB regions
  1404. *
  1405. * n = Number of codewords in the page
  1406. * . = ECC bytes
  1407. * * = FREE OOB bytes
  1408. * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
  1409. * x = Unused byte(s)
  1410. * b = Real bad block byte(s) (inaccessible when ECC enabled)
  1411. *
  1412. * This layout is read as is when ECC is disabled. When ECC is enabled, the
  1413. * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  1414. * and assumed as 0xffs when we read a page/oob. The ECC, unused and
  1415. * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
  1416. * the sum of the three).
  1417. */
  1418. static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1419. struct mtd_oob_region *oobregion)
  1420. {
  1421. struct nand_chip *chip = mtd_to_nand(mtd);
  1422. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1423. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1424. if (section > 1)
  1425. return -ERANGE;
  1426. if (!section) {
  1427. oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
  1428. host->bbm_size;
  1429. oobregion->offset = 0;
  1430. } else {
  1431. oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
  1432. oobregion->offset = mtd->oobsize - oobregion->length;
  1433. }
  1434. return 0;
  1435. }
  1436. static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1437. struct mtd_oob_region *oobregion)
  1438. {
  1439. struct nand_chip *chip = mtd_to_nand(mtd);
  1440. struct qcom_nand_host *host = to_qcom_nand_host(chip);
  1441. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1442. if (section)
  1443. return -ERANGE;
  1444. oobregion->length = ecc->steps * 4;
  1445. oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
  1446. return 0;
  1447. }
  1448. static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
  1449. .ecc = qcom_nand_ooblayout_ecc,
  1450. .free = qcom_nand_ooblayout_free,
  1451. };
  1452. static int qcom_nand_host_setup(struct qcom_nand_host *host)
  1453. {
  1454. struct nand_chip *chip = &host->chip;
  1455. struct mtd_info *mtd = nand_to_mtd(chip);
  1456. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1457. struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
  1458. int cwperpage, bad_block_byte;
  1459. bool wide_bus;
  1460. int ecc_mode = 1;
  1461. /*
  1462. * the controller requires each step consists of 512 bytes of data.
  1463. * bail out if DT has populated a wrong step size.
  1464. */
  1465. if (ecc->size != NANDC_STEP_SIZE) {
  1466. dev_err(nandc->dev, "invalid ecc size\n");
  1467. return -EINVAL;
  1468. }
  1469. wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
  1470. if (ecc->strength >= 8) {
  1471. /* 8 bit ECC defaults to BCH ECC on all platforms */
  1472. host->bch_enabled = true;
  1473. ecc_mode = 1;
  1474. if (wide_bus) {
  1475. host->ecc_bytes_hw = 14;
  1476. host->spare_bytes = 0;
  1477. host->bbm_size = 2;
  1478. } else {
  1479. host->ecc_bytes_hw = 13;
  1480. host->spare_bytes = 2;
  1481. host->bbm_size = 1;
  1482. }
  1483. } else {
  1484. /*
  1485. * if the controller supports BCH for 4 bit ECC, the controller
  1486. * uses lesser bytes for ECC. If RS is used, the ECC bytes is
  1487. * always 10 bytes
  1488. */
  1489. if (nandc->ecc_modes & ECC_BCH_4BIT) {
  1490. /* BCH */
  1491. host->bch_enabled = true;
  1492. ecc_mode = 0;
  1493. if (wide_bus) {
  1494. host->ecc_bytes_hw = 8;
  1495. host->spare_bytes = 2;
  1496. host->bbm_size = 2;
  1497. } else {
  1498. host->ecc_bytes_hw = 7;
  1499. host->spare_bytes = 4;
  1500. host->bbm_size = 1;
  1501. }
  1502. } else {
  1503. /* RS */
  1504. host->ecc_bytes_hw = 10;
  1505. if (wide_bus) {
  1506. host->spare_bytes = 0;
  1507. host->bbm_size = 2;
  1508. } else {
  1509. host->spare_bytes = 1;
  1510. host->bbm_size = 1;
  1511. }
  1512. }
  1513. }
  1514. /*
  1515. * we consider ecc->bytes as the sum of all the non-data content in a
  1516. * step. It gives us a clean representation of the oob area (even if
  1517. * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
  1518. * ECC and 12 bytes for 4 bit ECC
  1519. */
  1520. ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
  1521. ecc->read_page = qcom_nandc_read_page;
  1522. ecc->read_page_raw = qcom_nandc_read_page_raw;
  1523. ecc->read_oob = qcom_nandc_read_oob;
  1524. ecc->write_page = qcom_nandc_write_page;
  1525. ecc->write_page_raw = qcom_nandc_write_page_raw;
  1526. ecc->write_oob = qcom_nandc_write_oob;
  1527. ecc->mode = NAND_ECC_HW;
  1528. mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
  1529. cwperpage = mtd->writesize / ecc->size;
  1530. /*
  1531. * DATA_UD_BYTES varies based on whether the read/write command protects
  1532. * spare data with ECC too. We protect spare data by default, so we set
  1533. * it to main + spare data, which are 512 and 4 bytes respectively.
  1534. */
  1535. host->cw_data = 516;
  1536. /*
  1537. * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
  1538. * for 8 bit ECC
  1539. */
  1540. host->cw_size = host->cw_data + ecc->bytes;
  1541. if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
  1542. dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
  1543. return -EINVAL;
  1544. }
  1545. bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
  1546. host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
  1547. | host->cw_data << UD_SIZE_BYTES
  1548. | 0 << DISABLE_STATUS_AFTER_WRITE
  1549. | 5 << NUM_ADDR_CYCLES
  1550. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
  1551. | 0 << STATUS_BFR_READ
  1552. | 1 << SET_RD_MODE_AFTER_STATUS
  1553. | host->spare_bytes << SPARE_SIZE_BYTES;
  1554. host->cfg1 = 7 << NAND_RECOVERY_CYCLES
  1555. | 0 << CS_ACTIVE_BSY
  1556. | bad_block_byte << BAD_BLOCK_BYTE_NUM
  1557. | 0 << BAD_BLOCK_IN_SPARE_AREA
  1558. | 2 << WR_RD_BSY_GAP
  1559. | wide_bus << WIDE_FLASH
  1560. | host->bch_enabled << ENABLE_BCH_ECC;
  1561. host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
  1562. | host->cw_size << UD_SIZE_BYTES
  1563. | 5 << NUM_ADDR_CYCLES
  1564. | 0 << SPARE_SIZE_BYTES;
  1565. host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
  1566. | 0 << CS_ACTIVE_BSY
  1567. | 17 << BAD_BLOCK_BYTE_NUM
  1568. | 1 << BAD_BLOCK_IN_SPARE_AREA
  1569. | 2 << WR_RD_BSY_GAP
  1570. | wide_bus << WIDE_FLASH
  1571. | 1 << DEV0_CFG1_ECC_DISABLE;
  1572. host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
  1573. | 0 << ECC_SW_RESET
  1574. | host->cw_data << ECC_NUM_DATA_BYTES
  1575. | 1 << ECC_FORCE_CLK_OPEN
  1576. | ecc_mode << ECC_MODE
  1577. | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
  1578. host->ecc_buf_cfg = 0x203 << NUM_STEPS;
  1579. host->clrflashstatus = FS_READY_BSY_N;
  1580. host->clrreadstatus = 0xc0;
  1581. dev_dbg(nandc->dev,
  1582. "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
  1583. host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
  1584. host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
  1585. cwperpage);
  1586. return 0;
  1587. }
  1588. static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
  1589. {
  1590. int ret;
  1591. ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
  1592. if (ret) {
  1593. dev_err(nandc->dev, "failed to set DMA mask\n");
  1594. return ret;
  1595. }
  1596. /*
  1597. * we use the internal buffer for reading ONFI params, reading small
  1598. * data like ID and status, and preforming read-copy-write operations
  1599. * when writing to a codeword partially. 532 is the maximum possible
  1600. * size of a codeword for our nand controller
  1601. */
  1602. nandc->buf_size = 532;
  1603. nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
  1604. GFP_KERNEL);
  1605. if (!nandc->data_buffer)
  1606. return -ENOMEM;
  1607. nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
  1608. GFP_KERNEL);
  1609. if (!nandc->regs)
  1610. return -ENOMEM;
  1611. nandc->reg_read_buf = devm_kzalloc(nandc->dev,
  1612. MAX_REG_RD * sizeof(*nandc->reg_read_buf),
  1613. GFP_KERNEL);
  1614. if (!nandc->reg_read_buf)
  1615. return -ENOMEM;
  1616. nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
  1617. if (!nandc->chan) {
  1618. dev_err(nandc->dev, "failed to request slave channel\n");
  1619. return -ENODEV;
  1620. }
  1621. INIT_LIST_HEAD(&nandc->desc_list);
  1622. INIT_LIST_HEAD(&nandc->host_list);
  1623. nand_hw_control_init(&nandc->controller);
  1624. return 0;
  1625. }
  1626. static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
  1627. {
  1628. dma_release_channel(nandc->chan);
  1629. }
  1630. /* one time setup of a few nand controller registers */
  1631. static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
  1632. {
  1633. /* kill onenand */
  1634. nandc_write(nandc, SFLASHC_BURST_CFG, 0);
  1635. nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
  1636. /* enable ADM DMA */
  1637. nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
  1638. /* save the original values of these registers */
  1639. nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
  1640. nandc->vld = NAND_DEV_CMD_VLD_VAL;
  1641. return 0;
  1642. }
  1643. static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
  1644. struct qcom_nand_host *host,
  1645. struct device_node *dn)
  1646. {
  1647. struct nand_chip *chip = &host->chip;
  1648. struct mtd_info *mtd = nand_to_mtd(chip);
  1649. struct device *dev = nandc->dev;
  1650. int ret;
  1651. ret = of_property_read_u32(dn, "reg", &host->cs);
  1652. if (ret) {
  1653. dev_err(dev, "can't get chip-select\n");
  1654. return -ENXIO;
  1655. }
  1656. nand_set_flash_node(chip, dn);
  1657. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
  1658. mtd->owner = THIS_MODULE;
  1659. mtd->dev.parent = dev;
  1660. chip->cmdfunc = qcom_nandc_command;
  1661. chip->select_chip = qcom_nandc_select_chip;
  1662. chip->read_byte = qcom_nandc_read_byte;
  1663. chip->read_buf = qcom_nandc_read_buf;
  1664. chip->write_buf = qcom_nandc_write_buf;
  1665. /*
  1666. * the bad block marker is readable only when we read the last codeword
  1667. * of a page with ECC disabled. currently, the nand_base and nand_bbt
  1668. * helpers don't allow us to read BB from a nand chip with ECC
  1669. * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
  1670. * and block_markbad helpers until we permanently switch to using
  1671. * MTD_OPS_RAW for all drivers (with the help of badblockbits)
  1672. */
  1673. chip->block_bad = qcom_nandc_block_bad;
  1674. chip->block_markbad = qcom_nandc_block_markbad;
  1675. chip->controller = &nandc->controller;
  1676. chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
  1677. NAND_SKIP_BBTSCAN;
  1678. /* set up initial status value */
  1679. host->status = NAND_STATUS_READY | NAND_STATUS_WP;
  1680. ret = nand_scan_ident(mtd, 1, NULL);
  1681. if (ret)
  1682. return ret;
  1683. ret = qcom_nand_host_setup(host);
  1684. if (ret)
  1685. return ret;
  1686. ret = nand_scan_tail(mtd);
  1687. if (ret)
  1688. return ret;
  1689. return mtd_device_register(mtd, NULL, 0);
  1690. }
  1691. /* parse custom DT properties here */
  1692. static int qcom_nandc_parse_dt(struct platform_device *pdev)
  1693. {
  1694. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1695. struct device_node *np = nandc->dev->of_node;
  1696. int ret;
  1697. ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
  1698. if (ret) {
  1699. dev_err(nandc->dev, "command CRCI unspecified\n");
  1700. return ret;
  1701. }
  1702. ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
  1703. if (ret) {
  1704. dev_err(nandc->dev, "data CRCI unspecified\n");
  1705. return ret;
  1706. }
  1707. return 0;
  1708. }
  1709. static int qcom_nandc_probe(struct platform_device *pdev)
  1710. {
  1711. struct qcom_nand_controller *nandc;
  1712. struct qcom_nand_host *host;
  1713. const void *dev_data;
  1714. struct device *dev = &pdev->dev;
  1715. struct device_node *dn = dev->of_node, *child;
  1716. struct resource *res;
  1717. int ret;
  1718. nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
  1719. if (!nandc)
  1720. return -ENOMEM;
  1721. platform_set_drvdata(pdev, nandc);
  1722. nandc->dev = dev;
  1723. dev_data = of_device_get_match_data(dev);
  1724. if (!dev_data) {
  1725. dev_err(&pdev->dev, "failed to get device data\n");
  1726. return -ENODEV;
  1727. }
  1728. nandc->ecc_modes = (unsigned long)dev_data;
  1729. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1730. nandc->base = devm_ioremap_resource(dev, res);
  1731. if (IS_ERR(nandc->base))
  1732. return PTR_ERR(nandc->base);
  1733. nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
  1734. nandc->core_clk = devm_clk_get(dev, "core");
  1735. if (IS_ERR(nandc->core_clk))
  1736. return PTR_ERR(nandc->core_clk);
  1737. nandc->aon_clk = devm_clk_get(dev, "aon");
  1738. if (IS_ERR(nandc->aon_clk))
  1739. return PTR_ERR(nandc->aon_clk);
  1740. ret = qcom_nandc_parse_dt(pdev);
  1741. if (ret)
  1742. return ret;
  1743. ret = qcom_nandc_alloc(nandc);
  1744. if (ret)
  1745. return ret;
  1746. ret = clk_prepare_enable(nandc->core_clk);
  1747. if (ret)
  1748. goto err_core_clk;
  1749. ret = clk_prepare_enable(nandc->aon_clk);
  1750. if (ret)
  1751. goto err_aon_clk;
  1752. ret = qcom_nandc_setup(nandc);
  1753. if (ret)
  1754. goto err_setup;
  1755. for_each_available_child_of_node(dn, child) {
  1756. if (of_device_is_compatible(child, "qcom,nandcs")) {
  1757. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  1758. if (!host) {
  1759. of_node_put(child);
  1760. ret = -ENOMEM;
  1761. goto err_cs_init;
  1762. }
  1763. ret = qcom_nand_host_init(nandc, host, child);
  1764. if (ret) {
  1765. devm_kfree(dev, host);
  1766. continue;
  1767. }
  1768. list_add_tail(&host->node, &nandc->host_list);
  1769. }
  1770. }
  1771. if (list_empty(&nandc->host_list)) {
  1772. ret = -ENODEV;
  1773. goto err_cs_init;
  1774. }
  1775. return 0;
  1776. err_cs_init:
  1777. list_for_each_entry(host, &nandc->host_list, node)
  1778. nand_release(nand_to_mtd(&host->chip));
  1779. err_setup:
  1780. clk_disable_unprepare(nandc->aon_clk);
  1781. err_aon_clk:
  1782. clk_disable_unprepare(nandc->core_clk);
  1783. err_core_clk:
  1784. qcom_nandc_unalloc(nandc);
  1785. return ret;
  1786. }
  1787. static int qcom_nandc_remove(struct platform_device *pdev)
  1788. {
  1789. struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
  1790. struct qcom_nand_host *host;
  1791. list_for_each_entry(host, &nandc->host_list, node)
  1792. nand_release(nand_to_mtd(&host->chip));
  1793. qcom_nandc_unalloc(nandc);
  1794. clk_disable_unprepare(nandc->aon_clk);
  1795. clk_disable_unprepare(nandc->core_clk);
  1796. return 0;
  1797. }
  1798. #define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
  1799. /*
  1800. * data will hold a struct pointer containing more differences once we support
  1801. * more controller variants
  1802. */
  1803. static const struct of_device_id qcom_nandc_of_match[] = {
  1804. { .compatible = "qcom,ipq806x-nand",
  1805. .data = (void *)EBI2_NANDC_ECC_MODES,
  1806. },
  1807. {}
  1808. };
  1809. MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
  1810. static struct platform_driver qcom_nandc_driver = {
  1811. .driver = {
  1812. .name = "qcom-nandc",
  1813. .of_match_table = qcom_nandc_of_match,
  1814. },
  1815. .probe = qcom_nandc_probe,
  1816. .remove = qcom_nandc_remove,
  1817. };
  1818. module_platform_driver(qcom_nandc_driver);
  1819. MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
  1820. MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
  1821. MODULE_LICENSE("GPL v2");