msm_qpic_nand.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670
  1. /*
  2. * Copyright (C) 2007 Google, Inc.
  3. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #define pr_fmt(fmt) "%s: " fmt, __func__
  16. #include <linux/slab.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/mtd/nand.h>
  21. #include <linux/mtd/partitions.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/io.h>
  25. #include <linux/crc16.h>
  26. #include <linux/bitrev.h>
  27. #include <linux/mutex.h>
  28. #include <linux/of.h>
  29. #include <linux/ctype.h>
  30. #include <mach/sps.h>
  31. #include <mach/msm_smem.h>
  32. #define PAGE_SIZE_2K 2048
  33. #define PAGE_SIZE_4K 4096
  34. #define WRITE 1
  35. #define READ 0
  36. /*
  37. * The maximum no of descriptors per transfer (page read/write) won't be more
  38. * than 64. For more details on what those commands are, please refer to the
  39. * page read and page write functions in the driver.
  40. */
  41. #define SPS_MAX_DESC_NUM 64
  42. #define SPS_DATA_CONS_PIPE_INDEX 0
  43. #define SPS_DATA_PROD_PIPE_INDEX 1
  44. #define SPS_CMD_CONS_PIPE_INDEX 2
  45. #define msm_virt_to_dma(chip, vaddr) \
  46. ((chip)->dma_phys_addr + \
  47. ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
  48. /*
  49. * A single page read/write request would typically need DMA memory of about
  50. * 1K memory approximately. So for a single request this memory is more than
  51. * enough.
  52. *
  53. * But to accommodate multiple clients we allocate 8K of memory. Though only
  54. * one client request can be submitted to NANDc at any time, other clients can
  55. * still prepare the descriptors while waiting for current client request to
  56. * be done. Thus for a total memory of 8K, the driver can currently support
  57. * maximum clients up to 7 or 8 at a time. The client for which there is no
  58. * free DMA memory shall wait on the wait queue until other clients free up
  59. * the required memory.
  60. */
  61. #define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
  62. /*
  63. * This defines the granularity at which the buffer management is done. The
  64. * total number of slots is based on the size of the atomic_t variable
  65. * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
  66. */
  67. #define MSM_NAND_DMA_BUFFER_SLOT_SZ \
  68. (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
  69. /* ONFI(Open NAND Flash Interface) parameters */
  70. #define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
  71. #define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
  72. #define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
  73. #define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
  74. #define ONFI_PARAM_INFO_LENGTH 0x0200
  75. #define ONFI_PARAM_PAGE_LENGTH 0x0100
  76. #define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
  77. #define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
  78. #define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
  79. #define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
  80. #define FLASH_READ_DEVICE_ID_ADDRESS 0x00
  81. #define MSM_NAND_RESET_FLASH_STS 0x00000020
  82. #define MSM_NAND_RESET_READ_STS 0x000000C0
  83. /* QPIC NANDc (NAND Controller) Register Set */
  84. #define MSM_NAND_REG(info, off) (info->nand_phys + off)
  85. #define MSM_NAND_QPIC_VERSION(info) MSM_NAND_REG(info, 0x20100)
  86. #define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
  87. #define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
  88. #define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
  89. #define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
  90. #define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
  91. #define FS_OP_ERR (1 << 4)
  92. #define FS_MPU_ERR (1 << 8)
  93. #define FS_DEVICE_STS_ERR (1 << 16)
  94. #define FS_DEVICE_WP (1 << 23)
  95. #define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
  96. #define BS_UNCORRECTABLE_BIT (1 << 8)
  97. #define BS_CORRECTABLE_ERR_MSK 0x1F
  98. #define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
  99. #define DISABLE_STATUS_AFTER_WRITE 4
  100. #define CW_PER_PAGE 6
  101. #define UD_SIZE_BYTES 9
  102. #define SPARE_SIZE_BYTES 23
  103. #define NUM_ADDR_CYCLES 27
  104. #define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
  105. #define DEV0_CFG1_ECC_DISABLE 0
  106. #define WIDE_FLASH 1
  107. #define NAND_RECOVERY_CYCLES 2
  108. #define CS_ACTIVE_BSY 5
  109. #define BAD_BLOCK_BYTE_NUM 6
  110. #define BAD_BLOCK_IN_SPARE_AREA 16
  111. #define WR_RD_BSY_GAP 17
  112. #define ENABLE_BCH_ECC 27
  113. #define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
  114. #define ECC_CFG_ECC_DISABLE 0
  115. #define ECC_SW_RESET 1
  116. #define ECC_MODE 4
  117. #define ECC_PARITY_SIZE_BYTES 8
  118. #define ECC_NUM_DATA_BYTES 16
  119. #define ECC_FORCE_CLK_OPEN 30
  120. #define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
  121. #define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
  122. #define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
  123. #define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
  124. #define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
  125. #define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
  126. #define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
  127. #define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
  128. #define BAM_MODE_EN 0
  129. #define MSM_NAND_VERSION(info) MSM_NAND_REG(info, 0x30F08)
  130. #define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
  131. #define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
  132. /* device commands */
  133. #define MSM_NAND_CMD_PAGE_READ 0x32
  134. #define MSM_NAND_CMD_PAGE_READ_ECC 0x33
  135. #define MSM_NAND_CMD_PAGE_READ_ALL 0x34
  136. #define MSM_NAND_CMD_PRG_PAGE 0x36
  137. #define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
  138. #define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
  139. #define MSM_NAND_CMD_BLOCK_ERASE 0x3A
  140. #define MSM_NAND_CMD_FETCH_ID 0x0B
  141. /* Version Mask */
  142. #define MSM_NAND_VERSION_MAJOR_MASK 0xF0000000
  143. #define MSM_NAND_VERSION_MAJOR_SHIFT 28
  144. #define MSM_NAND_VERSION_MINOR_MASK 0x0FFF0000
  145. #define MSM_NAND_VERSION_MINOR_SHIFT 16
  146. /* Structure that defines a NAND SPS command element */
  147. struct msm_nand_sps_cmd {
  148. struct sps_command_element ce;
  149. uint32_t flags;
  150. };
  151. /*
  152. * Structure that defines the NAND controller properties as per the
  153. * NAND flash device/chip that is attached.
  154. */
  155. struct msm_nand_chip {
  156. struct device *dev;
  157. /*
  158. * DMA memory will be allocated only once during probe and this memory
  159. * will be used by all NAND clients. This wait queue is needed to
  160. * make the applications wait for DMA memory to be free'd when the
  161. * complete memory is exhausted.
  162. */
  163. wait_queue_head_t dma_wait_queue;
  164. atomic_t dma_buffer_busy;
  165. uint8_t *dma_virt_addr;
  166. dma_addr_t dma_phys_addr;
  167. uint32_t ecc_parity_bytes;
  168. uint32_t bch_caps; /* Controller BCH ECC capabilities */
  169. #define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
  170. #define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
  171. uint32_t cw_size;
  172. /* NANDc register configurations */
  173. uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
  174. uint32_t ecc_buf_cfg;
  175. uint32_t ecc_bch_cfg;
  176. };
  177. /* Structure that defines an SPS end point for a NANDc BAM pipe. */
  178. struct msm_nand_sps_endpt {
  179. struct sps_pipe *handle;
  180. struct sps_connect config;
  181. struct sps_register_event event;
  182. struct completion completion;
  183. };
  184. /*
  185. * Structure that defines NANDc SPS data - BAM handle and an end point
  186. * for each BAM pipe.
  187. */
  188. struct msm_nand_sps_info {
  189. uint32_t bam_handle;
  190. struct msm_nand_sps_endpt data_prod;
  191. struct msm_nand_sps_endpt data_cons;
  192. struct msm_nand_sps_endpt cmd_pipe;
  193. };
  194. /*
  195. * Structure that contains flash device information. This gets updated after
  196. * the NAND flash device detection.
  197. */
  198. struct flash_identification {
  199. uint32_t flash_id;
  200. uint32_t density;
  201. uint32_t widebus;
  202. uint32_t pagesize;
  203. uint32_t blksize;
  204. uint32_t oobsize;
  205. uint32_t ecc_correctability;
  206. };
  207. /* Structure that defines NANDc private data. */
  208. struct msm_nand_info {
  209. struct mtd_info mtd;
  210. struct msm_nand_chip nand_chip;
  211. struct msm_nand_sps_info sps;
  212. unsigned long bam_phys;
  213. unsigned long nand_phys;
  214. void __iomem *bam_base;
  215. int bam_irq;
  216. /*
  217. * This lock must be acquired before submitting any command or data
  218. * descriptors to BAM pipes and must be held until all the submitted
  219. * descriptors are processed.
  220. *
  221. * This is required to ensure that both command and descriptors are
  222. * submitted atomically without interruption from other clients,
  223. * when there are requests from more than client at any time.
  224. * Othewise, data and command descriptors can be submitted out of
  225. * order for a request which can cause data corruption.
  226. */
  227. struct mutex bam_lock;
  228. struct flash_identification flash_dev;
  229. };
  230. /* Structure that defines an ONFI parameter page (512B) */
  231. struct onfi_param_page {
  232. uint32_t parameter_page_signature;
  233. uint16_t revision_number;
  234. uint16_t features_supported;
  235. uint16_t optional_commands_supported;
  236. uint8_t reserved0[22];
  237. uint8_t device_manufacturer[12];
  238. uint8_t device_model[20];
  239. uint8_t jedec_manufacturer_id;
  240. uint16_t date_code;
  241. uint8_t reserved1[13];
  242. uint32_t number_of_data_bytes_per_page;
  243. uint16_t number_of_spare_bytes_per_page;
  244. uint32_t number_of_data_bytes_per_partial_page;
  245. uint16_t number_of_spare_bytes_per_partial_page;
  246. uint32_t number_of_pages_per_block;
  247. uint32_t number_of_blocks_per_logical_unit;
  248. uint8_t number_of_logical_units;
  249. uint8_t number_of_address_cycles;
  250. uint8_t number_of_bits_per_cell;
  251. uint16_t maximum_bad_blocks_per_logical_unit;
  252. uint16_t block_endurance;
  253. uint8_t guaranteed_valid_begin_blocks;
  254. uint16_t guaranteed_valid_begin_blocks_endurance;
  255. uint8_t number_of_programs_per_page;
  256. uint8_t partial_program_attributes;
  257. uint8_t number_of_bits_ecc_correctability;
  258. uint8_t number_of_interleaved_address_bits;
  259. uint8_t interleaved_operation_attributes;
  260. uint8_t reserved2[13];
  261. uint8_t io_pin_capacitance;
  262. uint16_t timing_mode_support;
  263. uint16_t program_cache_timing_mode_support;
  264. uint16_t maximum_page_programming_time;
  265. uint16_t maximum_block_erase_time;
  266. uint16_t maximum_page_read_time;
  267. uint16_t maximum_change_column_setup_time;
  268. uint8_t reserved3[23];
  269. uint16_t vendor_specific_revision_number;
  270. uint8_t vendor_specific[88];
  271. uint16_t integrity_crc;
  272. } __attribute__((__packed__));
  273. #define FLASH_PART_MAGIC1 0x55EE73AA
  274. #define FLASH_PART_MAGIC2 0xE35EBDDB
  275. #define FLASH_PTABLE_V3 3
  276. #define FLASH_PTABLE_V4 4
  277. #define FLASH_PTABLE_MAX_PARTS_V3 16
  278. #define FLASH_PTABLE_MAX_PARTS_V4 32
  279. #define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t))
  280. #define FLASH_PTABLE_ENTRY_NAME_SIZE 16
  281. struct flash_partition_entry {
  282. char name[FLASH_PTABLE_ENTRY_NAME_SIZE];
  283. u32 offset; /* Offset in blocks from beginning of device */
  284. u32 length; /* Length of the partition in blocks */
  285. u8 attr; /* Flags for this partition */
  286. };
  287. struct flash_partition_table {
  288. u32 magic1;
  289. u32 magic2;
  290. u32 version;
  291. u32 numparts;
  292. struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4];
  293. };
  294. static struct flash_partition_table ptable;
  295. static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4];
  296. /*
  297. * Get the DMA memory for requested amount of size. It returns the pointer
  298. * to free memory available from the allocated pool. Returns NULL if there
  299. * is no free memory.
  300. */
  301. static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
  302. {
  303. uint32_t bitmask, free_bitmask, old_bitmask;
  304. uint32_t need_mask, current_need_mask;
  305. int free_index;
  306. need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
  307. - 1;
  308. bitmask = atomic_read(&chip->dma_buffer_busy);
  309. free_bitmask = ~bitmask;
  310. do {
  311. free_index = __ffs(free_bitmask);
  312. current_need_mask = need_mask << free_index;
  313. if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
  314. MSM_NAND_DMA_BUFFER_SIZE)
  315. return NULL;
  316. if ((bitmask & current_need_mask) == 0) {
  317. old_bitmask =
  318. atomic_cmpxchg(&chip->dma_buffer_busy,
  319. bitmask,
  320. bitmask | current_need_mask);
  321. if (old_bitmask == bitmask)
  322. return chip->dma_virt_addr +
  323. free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
  324. free_bitmask = 0;/* force return */
  325. }
  326. /* current free range was too small, clear all free bits */
  327. /* below the top busy bit within current_need_mask */
  328. free_bitmask &=
  329. ~(~0U >> (32 - fls(bitmask & current_need_mask)));
  330. } while (free_bitmask);
  331. return NULL;
  332. }
  333. /*
  334. * Releases the DMA memory used to the free pool and also wakes up any user
  335. * thread waiting on wait queue for free memory to be available.
  336. */
  337. static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
  338. void *buffer, size_t size)
  339. {
  340. int index;
  341. uint32_t used_mask;
  342. used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
  343. - 1;
  344. index = ((uint8_t *)buffer - chip->dma_virt_addr) /
  345. MSM_NAND_DMA_BUFFER_SLOT_SZ;
  346. atomic_sub(used_mask << index, &chip->dma_buffer_busy);
  347. wake_up(&chip->dma_wait_queue);
  348. }
  349. /*
  350. * Calculates page address of the buffer passed, offset of buffer within
  351. * that page and then maps it for DMA by calling dma_map_page().
  352. */
  353. static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
  354. enum dma_data_direction dir)
  355. {
  356. struct page *page;
  357. unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
  358. if (virt_addr_valid(addr))
  359. page = virt_to_page(addr);
  360. else {
  361. if (WARN_ON(size + offset > PAGE_SIZE))
  362. return ~0;
  363. page = vmalloc_to_page(addr);
  364. }
  365. return dma_map_page(dev, page, offset, size, dir);
  366. }
  367. /*
  368. * Wrapper function to prepare a SPS command element with the data that is
  369. * passed to this function.
  370. *
  371. * Since for any command element it is a must to have this flag
  372. * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
  373. * command element that is passed and thus, the caller need not explicilty
  374. * pass this flag. The other flags must be passed based on the need. If a
  375. * command element doesn't have any other flag, then 0 can be passed to flags.
  376. */
  377. static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
  378. uint32_t addr, uint32_t command,
  379. uint32_t data, uint32_t flags)
  380. {
  381. struct sps_command_element *cmd = &sps_cmd->ce;
  382. cmd->addr = addr;
  383. cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
  384. (uint32_t) SPS_READ_COMMAND;
  385. cmd->data = data;
  386. cmd->mask = 0xFFFFFFFF;
  387. sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
  388. }
  389. /*
  390. * Read a single NANDc register as mentioned by its parameter addr. The return
  391. * value indicates whether read is successful or not. The register value read
  392. * is stored in val.
  393. */
  394. static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
  395. uint32_t *val)
  396. {
  397. int ret = 0;
  398. struct msm_nand_sps_cmd *cmd;
  399. struct msm_nand_chip *chip = &info->nand_chip;
  400. struct {
  401. struct msm_nand_sps_cmd cmd;
  402. uint32_t data;
  403. } *dma_buffer;
  404. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
  405. chip, sizeof(*dma_buffer))));
  406. cmd = &dma_buffer->cmd;
  407. msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
  408. &dma_buffer->data), SPS_IOVEC_FLAG_INT);
  409. ret = sps_transfer_one(info->sps.cmd_pipe.handle,
  410. msm_virt_to_dma(chip, &cmd->ce),
  411. sizeof(struct sps_command_element), NULL, cmd->flags);
  412. if (ret) {
  413. pr_err("failed to submit command %x ret %d\n", addr, ret);
  414. goto out;
  415. }
  416. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  417. *val = dma_buffer->data;
  418. out:
  419. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  420. return ret;
  421. }
  422. /*
  423. * Read the Flash ID from the Nand Flash Device. The return value < 0
  424. * indicates failure. When successful, the Flash ID is stored in parameter
  425. * read_id.
  426. */
  427. static int msm_nand_flash_read_id(struct msm_nand_info *info,
  428. bool read_onfi_signature,
  429. uint32_t *read_id)
  430. {
  431. int err = 0, i;
  432. struct msm_nand_sps_cmd *cmd;
  433. struct sps_iovec *iovec;
  434. struct msm_nand_chip *chip = &info->nand_chip;
  435. uint32_t total_cnt = 4;
  436. /*
  437. * The following 4 commands are required to read id -
  438. * write commands - addr0, flash, exec
  439. * read_commands - read_id
  440. */
  441. struct {
  442. struct sps_transfer xfer;
  443. struct sps_iovec cmd_iovec[total_cnt];
  444. struct msm_nand_sps_cmd cmd[total_cnt];
  445. uint32_t data[total_cnt];
  446. } *dma_buffer;
  447. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  448. (chip, sizeof(*dma_buffer))));
  449. if (read_onfi_signature)
  450. dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
  451. else
  452. dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
  453. dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
  454. dma_buffer->data[2] = 1;
  455. dma_buffer->data[3] = 0xeeeeeeee;
  456. cmd = dma_buffer->cmd;
  457. msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
  458. dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
  459. cmd++;
  460. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
  461. dma_buffer->data[1], 0);
  462. cmd++;
  463. msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
  464. dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
  465. cmd++;
  466. msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
  467. msm_virt_to_dma(chip, &dma_buffer->data[3]),
  468. SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
  469. cmd++;
  470. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  471. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  472. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  473. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  474. &dma_buffer->cmd_iovec);
  475. iovec = dma_buffer->xfer.iovec;
  476. for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
  477. iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
  478. iovec->size = sizeof(struct sps_command_element);
  479. iovec->flags = dma_buffer->cmd[i].flags;
  480. iovec++;
  481. }
  482. mutex_lock(&info->bam_lock);
  483. err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
  484. if (err) {
  485. pr_err("Failed to submit commands %d\n", err);
  486. mutex_unlock(&info->bam_lock);
  487. goto out;
  488. }
  489. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  490. mutex_unlock(&info->bam_lock);
  491. pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
  492. if (!read_onfi_signature)
  493. pr_debug("nandid: %x maker %02x device %02x\n",
  494. dma_buffer->data[3], dma_buffer->data[3] & 0xff,
  495. (dma_buffer->data[3] >> 8) & 0xff);
  496. *read_id = dma_buffer->data[3];
  497. out:
  498. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  499. return err;
  500. }
  501. /*
  502. * Contains data for common configuration registers that must be programmed
  503. * for every NANDc operation.
  504. */
  505. struct msm_nand_common_cfgs {
  506. uint32_t cmd;
  507. uint32_t addr0;
  508. uint32_t addr1;
  509. uint32_t cfg0;
  510. uint32_t cfg1;
  511. };
  512. /*
  513. * Function to prepare SPS command elements to write into NANDc configuration
  514. * registers as per the data defined in struct msm_nand_common_cfgs. This is
  515. * required for the following NANDc operations - Erase, Bad Block checking
  516. * and for reading ONFI parameter page.
  517. */
  518. static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
  519. struct msm_nand_common_cfgs data,
  520. struct msm_nand_sps_cmd **curr_cmd)
  521. {
  522. struct msm_nand_sps_cmd *cmd;
  523. cmd = *curr_cmd;
  524. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
  525. SPS_IOVEC_FLAG_LOCK);
  526. cmd++;
  527. msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
  528. cmd++;
  529. msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
  530. cmd++;
  531. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
  532. cmd++;
  533. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
  534. cmd++;
  535. *curr_cmd = cmd;
  536. }
  537. /*
  538. * Function to check the CRC integrity check on ONFI parameter page read.
  539. * For ONFI parameter page read, the controller ECC will be disabled. Hence,
  540. * it is mandatory to manually compute CRC and check it against the value
  541. * stored within ONFI page.
  542. */
  543. static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
  544. {
  545. int i;
  546. uint16_t result;
  547. for (i = 0; i < count; i++)
  548. buffer[i] = bitrev8(buffer[i]);
  549. result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
  550. for (i = 0; i < count; i++)
  551. buffer[i] = bitrev8(buffer[i]);
  552. return result;
  553. }
  554. /*
  555. * Structure that contains NANDc register data for commands required
  556. * for reading ONFI paramter page.
  557. */
  558. struct msm_nand_flash_onfi_data {
  559. struct msm_nand_common_cfgs cfg;
  560. uint32_t exec;
  561. uint32_t devcmd1_orig;
  562. uint32_t devcmdvld_orig;
  563. uint32_t devcmd1_mod;
  564. uint32_t devcmdvld_mod;
  565. uint32_t ecc_bch_cfg;
  566. };
  567. struct version {
  568. uint16_t nand_major;
  569. uint16_t nand_minor;
  570. uint16_t qpic_major;
  571. uint16_t qpic_minor;
  572. };
  573. static int msm_nand_version_check(struct msm_nand_info *info,
  574. struct version *nandc_version)
  575. {
  576. uint32_t qpic_ver = 0, nand_ver = 0;
  577. int err = 0;
  578. /* Lookup the version to identify supported features */
  579. err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info),
  580. &nand_ver);
  581. if (err) {
  582. pr_err("Failed to read NAND_VERSION, err=%d\n", err);
  583. goto out;
  584. }
  585. nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
  586. MSM_NAND_VERSION_MAJOR_SHIFT;
  587. nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >>
  588. MSM_NAND_VERSION_MINOR_SHIFT;
  589. err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info),
  590. &qpic_ver);
  591. if (err) {
  592. pr_err("Failed to read QPIC_VERSION, err=%d\n", err);
  593. goto out;
  594. }
  595. nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
  596. MSM_NAND_VERSION_MAJOR_SHIFT;
  597. nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >>
  598. MSM_NAND_VERSION_MINOR_SHIFT;
  599. pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n",
  600. nandc_version->nand_major, nandc_version->nand_minor,
  601. nandc_version->qpic_major, nandc_version->qpic_minor);
  602. out:
  603. return err;
  604. }
  605. /*
  606. * Function to identify whether the attached NAND flash device is
  607. * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
  608. * page to get the device parameters.
  609. */
  610. static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
  611. {
  612. struct msm_nand_chip *chip = &info->nand_chip;
  613. struct flash_identification *flash = &info->flash_dev;
  614. uint32_t crc_chk_count = 0, page_address = 0;
  615. int ret = 0, i;
  616. /* SPS parameters */
  617. struct msm_nand_sps_cmd *cmd, *curr_cmd;
  618. struct sps_iovec *iovec;
  619. uint32_t rdata;
  620. /* ONFI Identifier/Parameter Page parameters */
  621. uint8_t *onfi_param_info_buf = NULL;
  622. dma_addr_t dma_addr_param_info = 0;
  623. struct onfi_param_page *onfi_param_page_ptr;
  624. struct msm_nand_flash_onfi_data data;
  625. uint32_t onfi_signature = 0;
  626. /* SPS command/data descriptors */
  627. uint32_t total_cnt = 13;
  628. /*
  629. * The following 13 commands are required to get onfi parameters -
  630. * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
  631. * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
  632. */
  633. struct {
  634. struct sps_transfer xfer;
  635. struct sps_iovec cmd_iovec[total_cnt];
  636. struct msm_nand_sps_cmd cmd[total_cnt];
  637. uint32_t flash_status;
  638. } *dma_buffer;
  639. /* Lookup the version to identify supported features */
  640. struct version nandc_version = {0};
  641. ret = msm_nand_version_check(info, &nandc_version);
  642. if (!ret && !(nandc_version.nand_major == 1 &&
  643. nandc_version.nand_minor == 1 &&
  644. nandc_version.qpic_major == 1 &&
  645. nandc_version.qpic_minor == 1)) {
  646. ret = -EPERM;
  647. goto out;
  648. }
  649. wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
  650. msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
  651. dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
  652. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  653. (chip, sizeof(*dma_buffer))));
  654. ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
  655. if (ret < 0) {
  656. pr_err("Failed to read ONFI signature\n");
  657. goto free_dma;
  658. }
  659. if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
  660. pr_info("Found a non ONFI device\n");
  661. ret = -EIO;
  662. goto free_dma;
  663. }
  664. memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
  665. ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
  666. &data.devcmd1_orig);
  667. if (ret < 0)
  668. goto free_dma;
  669. ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
  670. &data.devcmdvld_orig);
  671. if (ret < 0)
  672. goto free_dma;
  673. /* Lookup the 'APPS' partition's first page address */
  674. for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
  675. if (!strncmp("apps", mtd_part[i].name,
  676. strlen(mtd_part[i].name))) {
  677. page_address = mtd_part[i].offset << 6;
  678. break;
  679. }
  680. }
  681. data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
  682. data.exec = 1;
  683. data.cfg.addr0 = (page_address << 16) |
  684. FLASH_READ_ONFI_PARAMETERS_ADDRESS;
  685. data.cfg.addr1 = (page_address >> 16) & 0xFF;
  686. data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
  687. data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
  688. data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
  689. FLASH_READ_ONFI_PARAMETERS_COMMAND;
  690. data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
  691. data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  692. dma_buffer->flash_status = 0xeeeeeeee;
  693. curr_cmd = cmd = dma_buffer->cmd;
  694. msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
  695. cmd = curr_cmd;
  696. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
  697. data.ecc_bch_cfg, 0);
  698. cmd++;
  699. msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
  700. data.devcmdvld_mod, 0);
  701. cmd++;
  702. msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
  703. data.devcmd1_mod, 0);
  704. cmd++;
  705. rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
  706. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
  707. rdata, 0);
  708. cmd++;
  709. msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
  710. data.exec, SPS_IOVEC_FLAG_NWD);
  711. cmd++;
  712. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
  713. msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
  714. cmd++;
  715. msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
  716. data.devcmd1_orig, 0);
  717. cmd++;
  718. msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
  719. data.devcmdvld_orig,
  720. SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
  721. cmd++;
  722. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  723. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  724. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  725. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  726. &dma_buffer->cmd_iovec);
  727. iovec = dma_buffer->xfer.iovec;
  728. for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
  729. iovec->addr = msm_virt_to_dma(chip,
  730. &dma_buffer->cmd[i].ce);
  731. iovec->size = sizeof(struct sps_command_element);
  732. iovec->flags = dma_buffer->cmd[i].flags;
  733. iovec++;
  734. }
  735. mutex_lock(&info->bam_lock);
  736. /* Submit data descriptor */
  737. ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
  738. ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
  739. if (ret) {
  740. pr_err("Failed to submit data descriptors %d\n", ret);
  741. mutex_unlock(&info->bam_lock);
  742. goto free_dma;
  743. }
  744. /* Submit command descriptors */
  745. ret = sps_transfer(info->sps.cmd_pipe.handle,
  746. &dma_buffer->xfer);
  747. if (ret) {
  748. pr_err("Failed to submit commands %d\n", ret);
  749. mutex_unlock(&info->bam_lock);
  750. goto free_dma;
  751. }
  752. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  753. wait_for_completion_io(&info->sps.data_prod.completion);
  754. mutex_unlock(&info->bam_lock);
  755. /* Check for flash status errors */
  756. if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  757. pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
  758. ret = -EIO;
  759. goto free_dma;
  760. }
  761. for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
  762. / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
  763. onfi_param_page_ptr =
  764. (struct onfi_param_page *)
  765. (&(onfi_param_info_buf
  766. [ONFI_PARAM_PAGE_LENGTH *
  767. crc_chk_count]));
  768. if (msm_nand_flash_onfi_crc_check(
  769. (uint8_t *)onfi_param_page_ptr,
  770. ONFI_PARAM_PAGE_LENGTH - 2) ==
  771. onfi_param_page_ptr->integrity_crc) {
  772. break;
  773. }
  774. }
  775. if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
  776. / ONFI_PARAM_PAGE_LENGTH) {
  777. pr_err("CRC Check failed on param page\n");
  778. ret = -EIO;
  779. goto free_dma;
  780. }
  781. ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
  782. if (ret < 0) {
  783. pr_err("Failed to read flash ID\n");
  784. goto free_dma;
  785. }
  786. flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
  787. flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
  788. flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
  789. flash->pagesize;
  790. flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
  791. flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
  792. * flash->blksize;
  793. flash->ecc_correctability = onfi_param_page_ptr->
  794. number_of_bits_ecc_correctability;
  795. pr_info("Found an ONFI compliant device %s\n",
  796. onfi_param_page_ptr->device_model);
  797. /*
  798. * Temporary hack for MT29F4G08ABC device.
  799. * Since the device is not properly adhering
  800. * to ONFi specification it is reporting
  801. * as 16 bit device though it is 8 bit device!!!
  802. */
  803. if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
  804. flash->widebus = 0;
  805. free_dma:
  806. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  807. msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
  808. ONFI_PARAM_INFO_LENGTH);
  809. out:
  810. return ret;
  811. }
  812. /*
  813. * Structure that contains read/write parameters required for reading/writing
  814. * from/to a page.
  815. */
  816. struct msm_nand_rw_params {
  817. uint32_t page;
  818. uint32_t page_count;
  819. uint32_t sectordatasize;
  820. uint32_t sectoroobsize;
  821. uint32_t cwperpage;
  822. uint32_t oob_len_cmd;
  823. uint32_t oob_len_data;
  824. uint32_t start_sector;
  825. uint32_t oob_col;
  826. dma_addr_t data_dma_addr;
  827. dma_addr_t oob_dma_addr;
  828. dma_addr_t data_dma_addr_curr;
  829. dma_addr_t oob_dma_addr_curr;
  830. bool read;
  831. };
  832. /*
  833. * Structure that contains NANDc register data required for reading/writing
  834. * from/to a page.
  835. */
  836. struct msm_nand_rw_reg_data {
  837. uint32_t cmd;
  838. uint32_t addr0;
  839. uint32_t addr1;
  840. uint32_t cfg0;
  841. uint32_t cfg1;
  842. uint32_t ecc_bch_cfg;
  843. uint32_t exec;
  844. uint32_t ecc_cfg;
  845. uint32_t clrfstatus;
  846. uint32_t clrrstatus;
  847. };
  848. /*
  849. * Function that validates page read/write MTD parameters received from upper
  850. * layers such as MTD/YAFFS2 and returns error for any unsupported operations
  851. * by the driver. In case of success, it also maps the data and oob buffer
  852. * received for DMA.
  853. */
  854. static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
  855. loff_t offset,
  856. struct mtd_oob_ops *ops,
  857. struct msm_nand_rw_params *args)
  858. {
  859. struct msm_nand_info *info = mtd->priv;
  860. struct msm_nand_chip *chip = &info->nand_chip;
  861. int err = 0;
  862. pr_debug("========================================================\n");
  863. pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
  864. offset, ops->mode, ops->datbuf, ops->len);
  865. pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
  866. if (ops->mode == MTD_OPS_PLACE_OOB) {
  867. pr_err("MTD_OPS_PLACE_OOB is not supported\n");
  868. err = -EINVAL;
  869. goto out;
  870. }
  871. if (mtd->writesize == PAGE_SIZE_2K)
  872. args->page = offset >> 11;
  873. if (mtd->writesize == PAGE_SIZE_4K)
  874. args->page = offset >> 12;
  875. args->oob_len_cmd = ops->ooblen;
  876. args->oob_len_data = ops->ooblen;
  877. args->cwperpage = (mtd->writesize >> 9);
  878. args->read = (read ? true : false);
  879. if (offset & (mtd->writesize - 1)) {
  880. pr_err("unsupported offset 0x%llx\n", offset);
  881. err = -EINVAL;
  882. goto out;
  883. }
  884. if (!read && !ops->datbuf) {
  885. pr_err("No data buffer provided for write!!\n");
  886. err = -EINVAL;
  887. goto out;
  888. }
  889. if (ops->mode == MTD_OPS_RAW) {
  890. if (!ops->datbuf) {
  891. pr_err("No data buffer provided for RAW mode\n");
  892. err = -EINVAL;
  893. goto out;
  894. } else if ((ops->len % (mtd->writesize +
  895. mtd->oobsize)) != 0) {
  896. pr_err("unsupported data len %d for RAW mode\n",
  897. ops->len);
  898. err = -EINVAL;
  899. goto out;
  900. }
  901. args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
  902. } else if (ops->mode == MTD_OPS_AUTO_OOB) {
  903. if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
  904. /* when ops->datbuf is NULL, ops->len can be ooblen */
  905. pr_err("unsupported data len %d for AUTO mode\n",
  906. ops->len);
  907. err = -EINVAL;
  908. goto out;
  909. }
  910. if (read && ops->oobbuf && !ops->datbuf) {
  911. args->start_sector = args->cwperpage - 1;
  912. args->page_count = ops->ooblen / mtd->oobavail;
  913. if ((args->page_count == 0) && (ops->ooblen))
  914. args->page_count = 1;
  915. } else if (ops->datbuf) {
  916. args->page_count = ops->len / mtd->writesize;
  917. }
  918. }
  919. if (ops->datbuf) {
  920. args->data_dma_addr_curr = args->data_dma_addr =
  921. msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
  922. (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
  923. if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
  924. pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
  925. err = -EIO;
  926. goto out;
  927. }
  928. }
  929. if (ops->oobbuf) {
  930. if (read)
  931. memset(ops->oobbuf, 0xFF, ops->ooblen);
  932. args->oob_dma_addr_curr = args->oob_dma_addr =
  933. msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
  934. (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
  935. if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
  936. pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
  937. err = -EIO;
  938. goto dma_map_oobbuf_failed;
  939. }
  940. }
  941. goto out;
  942. dma_map_oobbuf_failed:
  943. if (ops->datbuf)
  944. dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
  945. (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
  946. out:
  947. return err;
  948. }
  949. /*
  950. * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
  951. * required for page read/write.
  952. */
  953. static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
  954. struct mtd_oob_ops *ops,
  955. struct msm_nand_rw_params *args,
  956. struct msm_nand_rw_reg_data *data)
  957. {
  958. if (args->read) {
  959. if (ops->mode != MTD_OPS_RAW) {
  960. data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
  961. data->cfg0 =
  962. (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
  963. (((args->cwperpage-1) - args->start_sector)
  964. << CW_PER_PAGE);
  965. data->cfg1 = chip->cfg1;
  966. data->ecc_bch_cfg = chip->ecc_bch_cfg;
  967. } else {
  968. data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
  969. data->cfg0 = chip->cfg0_raw;
  970. data->cfg1 = chip->cfg1_raw;
  971. data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  972. }
  973. } else {
  974. if (ops->mode != MTD_OPS_RAW) {
  975. data->cfg0 = chip->cfg0;
  976. data->cfg1 = chip->cfg1;
  977. data->ecc_bch_cfg = chip->ecc_bch_cfg;
  978. } else {
  979. data->cfg0 = chip->cfg0_raw;
  980. data->cfg1 = chip->cfg1_raw;
  981. data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  982. }
  983. data->cmd = MSM_NAND_CMD_PRG_PAGE;
  984. data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
  985. data->clrrstatus = MSM_NAND_RESET_READ_STS;
  986. }
  987. data->exec = 1;
  988. data->ecc_cfg = chip->ecc_buf_cfg;
  989. }
  990. /*
  991. * Function to prepare series of SPS command descriptors required for a page
  992. * read/write operation.
  993. */
  994. static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
  995. struct msm_nand_rw_params *args,
  996. struct msm_nand_rw_reg_data *data,
  997. struct msm_nand_info *info,
  998. uint32_t curr_cw,
  999. struct msm_nand_sps_cmd **curr_cmd)
  1000. {
  1001. struct msm_nand_chip *chip = &info->nand_chip;
  1002. struct msm_nand_sps_cmd *cmd;
  1003. uint32_t rdata;
  1004. /* read_location register parameters */
  1005. uint32_t offset, size, last_read;
  1006. cmd = *curr_cmd;
  1007. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
  1008. ((curr_cw == args->start_sector) ?
  1009. SPS_IOVEC_FLAG_LOCK : 0));
  1010. cmd++;
  1011. if (curr_cw == args->start_sector) {
  1012. msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
  1013. data->addr0, 0);
  1014. cmd++;
  1015. msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
  1016. data->addr1, 0);
  1017. cmd++;
  1018. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
  1019. data->cfg0, 0);
  1020. cmd++;
  1021. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
  1022. data->cfg1, 0);
  1023. cmd++;
  1024. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
  1025. data->ecc_bch_cfg, 0);
  1026. cmd++;
  1027. msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
  1028. WRITE, data->ecc_cfg, 0);
  1029. cmd++;
  1030. }
  1031. if (!args->read)
  1032. goto sub_exec_cmd;
  1033. if (ops->mode == MTD_OPS_RAW) {
  1034. rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
  1035. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
  1036. rdata, 0);
  1037. cmd++;
  1038. }
  1039. if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
  1040. offset = 0;
  1041. size = (curr_cw < (args->cwperpage - 1)) ? 516 :
  1042. (512 - ((args->cwperpage - 1) << 2));
  1043. last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
  1044. (ops->oobbuf ? 0 : 1);
  1045. rdata = (offset << 0) | (size << 16) | (last_read << 31);
  1046. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
  1047. rdata, 0);
  1048. cmd++;
  1049. }
  1050. if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
  1051. && (curr_cw == (args->cwperpage - 1))) {
  1052. offset = 512 - ((args->cwperpage - 1) << 2);
  1053. size = (args->cwperpage) << 2;
  1054. if (size > args->oob_len_cmd)
  1055. size = args->oob_len_cmd;
  1056. args->oob_len_cmd -= size;
  1057. last_read = 1;
  1058. rdata = (offset << 0) | (size << 16) | (last_read << 31);
  1059. if (ops->datbuf) {
  1060. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
  1061. WRITE, rdata, 0);
  1062. } else {
  1063. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
  1064. WRITE, rdata, 0);
  1065. }
  1066. cmd++;
  1067. }
  1068. sub_exec_cmd:
  1069. msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
  1070. SPS_IOVEC_FLAG_NWD);
  1071. cmd++;
  1072. *curr_cmd = cmd;
  1073. }
  1074. /*
  1075. * Function to prepare and submit SPS data descriptors required for a page
  1076. * read/write operation.
  1077. */
  1078. static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
  1079. struct msm_nand_rw_params *args,
  1080. struct msm_nand_info *info,
  1081. uint32_t curr_cw)
  1082. {
  1083. struct msm_nand_chip *chip = &info->nand_chip;
  1084. struct sps_pipe *data_pipe_handle;
  1085. uint32_t sectordatasize, sectoroobsize;
  1086. uint32_t sps_flags = 0;
  1087. int err = 0;
  1088. if (args->read)
  1089. data_pipe_handle = info->sps.data_prod.handle;
  1090. else
  1091. data_pipe_handle = info->sps.data_cons.handle;
  1092. if (ops->mode == MTD_OPS_RAW) {
  1093. sectordatasize = chip->cw_size;
  1094. if (!args->read)
  1095. sps_flags = SPS_IOVEC_FLAG_EOT;
  1096. if (curr_cw == (args->cwperpage - 1))
  1097. sps_flags |= SPS_IOVEC_FLAG_INT;
  1098. err = sps_transfer_one(data_pipe_handle,
  1099. args->data_dma_addr_curr,
  1100. sectordatasize, NULL,
  1101. sps_flags);
  1102. if (err)
  1103. goto out;
  1104. args->data_dma_addr_curr += sectordatasize;
  1105. } else if (ops->mode == MTD_OPS_AUTO_OOB) {
  1106. if (ops->datbuf) {
  1107. sectordatasize = (curr_cw < (args->cwperpage - 1))
  1108. ? 516 : (512 - ((args->cwperpage - 1) << 2));
  1109. if (!args->read) {
  1110. sps_flags = SPS_IOVEC_FLAG_EOT;
  1111. if (curr_cw == (args->cwperpage - 1) &&
  1112. ops->oobbuf)
  1113. sps_flags = 0;
  1114. }
  1115. if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
  1116. sps_flags |= SPS_IOVEC_FLAG_INT;
  1117. err = sps_transfer_one(data_pipe_handle,
  1118. args->data_dma_addr_curr,
  1119. sectordatasize, NULL,
  1120. sps_flags);
  1121. if (err)
  1122. goto out;
  1123. args->data_dma_addr_curr += sectordatasize;
  1124. }
  1125. if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
  1126. sectoroobsize = args->cwperpage << 2;
  1127. if (sectoroobsize > args->oob_len_data)
  1128. sectoroobsize = args->oob_len_data;
  1129. if (!args->read)
  1130. sps_flags |= SPS_IOVEC_FLAG_EOT;
  1131. sps_flags |= SPS_IOVEC_FLAG_INT;
  1132. err = sps_transfer_one(data_pipe_handle,
  1133. args->oob_dma_addr_curr,
  1134. sectoroobsize, NULL,
  1135. sps_flags);
  1136. if (err)
  1137. goto out;
  1138. args->oob_dma_addr_curr += sectoroobsize;
  1139. args->oob_len_data -= sectoroobsize;
  1140. }
  1141. }
  1142. out:
  1143. return err;
  1144. }
  1145. /*
  1146. * Function that gets called from upper layers such as MTD/YAFFS2 to read a
  1147. * page with main or/and spare data.
  1148. */
  1149. static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
  1150. struct mtd_oob_ops *ops)
  1151. {
  1152. struct msm_nand_info *info = mtd->priv;
  1153. struct msm_nand_chip *chip = &info->nand_chip;
  1154. uint32_t cwperpage = (mtd->writesize >> 9);
  1155. int err, pageerr = 0, rawerr = 0;
  1156. uint32_t n = 0, pages_read = 0;
  1157. uint32_t ecc_errors = 0, total_ecc_errors = 0;
  1158. struct msm_nand_rw_params rw_params;
  1159. struct msm_nand_rw_reg_data data;
  1160. struct msm_nand_sps_cmd *cmd, *curr_cmd;
  1161. struct sps_iovec *iovec;
  1162. /*
  1163. * The following 6 commands will be sent only once for the first
  1164. * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
  1165. * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
  1166. * be sent for every CW - flash, read_location_0, read_location_1,
  1167. * exec, flash_status and buffer_status.
  1168. */
  1169. uint32_t total_cnt = (6 * cwperpage) + 6;
  1170. struct {
  1171. struct sps_transfer xfer;
  1172. struct sps_iovec cmd_iovec[total_cnt];
  1173. struct msm_nand_sps_cmd cmd[total_cnt];
  1174. struct {
  1175. uint32_t flash_status;
  1176. uint32_t buffer_status;
  1177. } result[cwperpage];
  1178. } *dma_buffer;
  1179. memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
  1180. err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
  1181. if (err)
  1182. goto validate_mtd_params_failed;
  1183. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
  1184. chip, sizeof(*dma_buffer))));
  1185. rw_params.oob_col = rw_params.start_sector * chip->cw_size;
  1186. if (chip->cfg1 & (1 << WIDE_FLASH))
  1187. rw_params.oob_col >>= 1;
  1188. memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
  1189. msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
  1190. while (rw_params.page_count-- > 0) {
  1191. data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
  1192. data.addr1 = (rw_params.page >> 16) & 0xff;
  1193. cmd = dma_buffer->cmd;
  1194. for (n = rw_params.start_sector; n < cwperpage; n++) {
  1195. dma_buffer->result[n].flash_status = 0xeeeeeeee;
  1196. dma_buffer->result[n].buffer_status = 0xeeeeeeee;
  1197. curr_cmd = cmd;
  1198. msm_nand_prep_rw_cmd_desc(ops, &rw_params,
  1199. &data, info, n, &curr_cmd);
  1200. cmd = curr_cmd;
  1201. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
  1202. READ, msm_virt_to_dma(chip,
  1203. &dma_buffer->result[n].flash_status), 0);
  1204. cmd++;
  1205. msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
  1206. READ, msm_virt_to_dma(chip,
  1207. &dma_buffer->result[n].buffer_status),
  1208. ((n == (cwperpage - 1)) ?
  1209. (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
  1210. 0));
  1211. cmd++;
  1212. }
  1213. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1214. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  1215. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  1216. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  1217. &dma_buffer->cmd_iovec);
  1218. iovec = dma_buffer->xfer.iovec;
  1219. for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
  1220. iovec->addr = msm_virt_to_dma(chip,
  1221. &dma_buffer->cmd[n].ce);
  1222. iovec->size = sizeof(struct sps_command_element);
  1223. iovec->flags = dma_buffer->cmd[n].flags;
  1224. iovec++;
  1225. }
  1226. mutex_lock(&info->bam_lock);
  1227. /* Submit data descriptors */
  1228. for (n = rw_params.start_sector; n < cwperpage; n++) {
  1229. err = msm_nand_submit_rw_data_desc(ops,
  1230. &rw_params, info, n);
  1231. if (err) {
  1232. pr_err("Failed to submit data descs %d\n", err);
  1233. mutex_unlock(&info->bam_lock);
  1234. goto free_dma;
  1235. }
  1236. }
  1237. /* Submit command descriptors */
  1238. err = sps_transfer(info->sps.cmd_pipe.handle,
  1239. &dma_buffer->xfer);
  1240. if (err) {
  1241. pr_err("Failed to submit commands %d\n", err);
  1242. mutex_unlock(&info->bam_lock);
  1243. goto free_dma;
  1244. }
  1245. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  1246. wait_for_completion_io(&info->sps.data_prod.completion);
  1247. mutex_unlock(&info->bam_lock);
  1248. /* Check for flash status errors */
  1249. pageerr = rawerr = 0;
  1250. for (n = rw_params.start_sector; n < cwperpage; n++) {
  1251. if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
  1252. FS_MPU_ERR)) {
  1253. rawerr = -EIO;
  1254. break;
  1255. }
  1256. }
  1257. /* Check for ECC correction on empty block */
  1258. if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
  1259. uint8_t *datbuf = ops->datbuf +
  1260. pages_read * mtd->writesize;
  1261. dma_sync_single_for_cpu(chip->dev,
  1262. rw_params.data_dma_addr_curr - mtd->writesize,
  1263. mtd->writesize, DMA_BIDIRECTIONAL);
  1264. for (n = 0; n < mtd->writesize; n++) {
  1265. /* TODO: check offset for 4bit BCHECC */
  1266. if ((n % 516 == 3 || n % 516 == 175)
  1267. && datbuf[n] == 0x54)
  1268. datbuf[n] = 0xff;
  1269. if (datbuf[n] != 0xff) {
  1270. pageerr = rawerr;
  1271. break;
  1272. }
  1273. }
  1274. dma_sync_single_for_device(chip->dev,
  1275. rw_params.data_dma_addr_curr - mtd->writesize,
  1276. mtd->writesize, DMA_BIDIRECTIONAL);
  1277. }
  1278. if (rawerr && ops->oobbuf) {
  1279. dma_sync_single_for_cpu(chip->dev,
  1280. rw_params.oob_dma_addr_curr - (ops->ooblen -
  1281. rw_params.oob_len_data),
  1282. ops->ooblen - rw_params.oob_len_data,
  1283. DMA_BIDIRECTIONAL);
  1284. for (n = 0; n < ops->ooblen; n++) {
  1285. if (ops->oobbuf[n] != 0xff) {
  1286. pageerr = rawerr;
  1287. break;
  1288. }
  1289. }
  1290. dma_sync_single_for_device(chip->dev,
  1291. rw_params.oob_dma_addr_curr - (ops->ooblen -
  1292. rw_params.oob_len_data),
  1293. ops->ooblen - rw_params.oob_len_data,
  1294. DMA_BIDIRECTIONAL);
  1295. }
  1296. /* check for uncorrectable errors */
  1297. if (pageerr) {
  1298. for (n = rw_params.start_sector; n < cwperpage; n++) {
  1299. if (dma_buffer->result[n].buffer_status &
  1300. BS_UNCORRECTABLE_BIT) {
  1301. mtd->ecc_stats.failed++;
  1302. pageerr = -EBADMSG;
  1303. break;
  1304. }
  1305. }
  1306. }
  1307. /* check for correctable errors */
  1308. if (!rawerr) {
  1309. for (n = rw_params.start_sector; n < cwperpage; n++) {
  1310. ecc_errors =
  1311. dma_buffer->result[n].buffer_status
  1312. & BS_CORRECTABLE_ERR_MSK;
  1313. if (ecc_errors) {
  1314. total_ecc_errors += ecc_errors;
  1315. mtd->ecc_stats.corrected += ecc_errors;
  1316. /*
  1317. * For Micron devices it is observed
  1318. * that correctable errors upto 3 bits
  1319. * are very common.
  1320. */
  1321. if (ecc_errors > 3)
  1322. pageerr = -EUCLEAN;
  1323. }
  1324. }
  1325. }
  1326. if (pageerr && (pageerr != -EUCLEAN || err == 0))
  1327. err = pageerr;
  1328. if (rawerr && !pageerr) {
  1329. pr_debug("%llx %x %x empty page\n",
  1330. (loff_t)rw_params.page * mtd->writesize,
  1331. ops->len, ops->ooblen);
  1332. } else {
  1333. for (n = rw_params.start_sector; n < cwperpage; n++)
  1334. pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
  1335. n, dma_buffer->result[n].flash_status,
  1336. dma_buffer->result[n].buffer_status);
  1337. }
  1338. if (err && err != -EUCLEAN && err != -EBADMSG)
  1339. goto free_dma;
  1340. pages_read++;
  1341. rw_params.page++;
  1342. }
  1343. free_dma:
  1344. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1345. if (ops->oobbuf)
  1346. dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
  1347. ops->ooblen, DMA_FROM_DEVICE);
  1348. if (ops->datbuf)
  1349. dma_unmap_page(chip->dev, rw_params.data_dma_addr,
  1350. ops->len, DMA_BIDIRECTIONAL);
  1351. validate_mtd_params_failed:
  1352. if (ops->mode != MTD_OPS_RAW)
  1353. ops->retlen = mtd->writesize * pages_read;
  1354. else
  1355. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
  1356. ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
  1357. if (err)
  1358. pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
  1359. from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
  1360. total_ecc_errors);
  1361. pr_debug("ret %d, retlen %d oobretlen %d\n",
  1362. err, ops->retlen, ops->oobretlen);
  1363. pr_debug("========================================================\n");
  1364. return err;
  1365. }
  1366. /*
  1367. * Function that gets called from upper layers such as MTD/YAFFS2 to read a
  1368. * page with only main data.
  1369. */
  1370. static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  1371. size_t *retlen, u_char *buf)
  1372. {
  1373. int ret;
  1374. struct mtd_oob_ops ops;
  1375. ops.mode = MTD_OPS_AUTO_OOB;
  1376. ops.len = len;
  1377. ops.retlen = 0;
  1378. ops.ooblen = 0;
  1379. ops.datbuf = buf;
  1380. ops.oobbuf = NULL;
  1381. ret = msm_nand_read_oob(mtd, from, &ops);
  1382. *retlen = ops.retlen;
  1383. return ret;
  1384. }
  1385. /*
  1386. * Function that gets called from upper layers such as MTD/YAFFS2 to write a
  1387. * page with both main and spare data.
  1388. */
  1389. static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
  1390. struct mtd_oob_ops *ops)
  1391. {
  1392. struct msm_nand_info *info = mtd->priv;
  1393. struct msm_nand_chip *chip = &info->nand_chip;
  1394. uint32_t cwperpage = (mtd->writesize >> 9);
  1395. uint32_t n, flash_sts, pages_written = 0;
  1396. int err = 0;
  1397. struct msm_nand_rw_params rw_params;
  1398. struct msm_nand_rw_reg_data data;
  1399. struct msm_nand_sps_cmd *cmd, *curr_cmd;
  1400. struct sps_iovec *iovec;
  1401. /*
  1402. * The following 7 commands will be sent only once :
  1403. * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
  1404. * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
  1405. * For last codeword (CW) - read_status(write)
  1406. *
  1407. * The following 4 commands will be sent for every CW :
  1408. * flash, exec, flash_status (read), flash_status (write).
  1409. */
  1410. uint32_t total_cnt = (4 * cwperpage) + 7;
  1411. struct {
  1412. struct sps_transfer xfer;
  1413. struct sps_iovec cmd_iovec[total_cnt];
  1414. struct msm_nand_sps_cmd cmd[total_cnt];
  1415. struct {
  1416. uint32_t flash_status[cwperpage];
  1417. } data;
  1418. } *dma_buffer;
  1419. memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
  1420. err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
  1421. if (err)
  1422. goto validate_mtd_params_failed;
  1423. wait_event(chip->dma_wait_queue, (dma_buffer =
  1424. msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
  1425. memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
  1426. msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
  1427. while (rw_params.page_count-- > 0) {
  1428. data.addr0 = (rw_params.page << 16);
  1429. data.addr1 = (rw_params.page >> 16) & 0xff;
  1430. cmd = dma_buffer->cmd;
  1431. for (n = 0; n < cwperpage ; n++) {
  1432. dma_buffer->data.flash_status[n] = 0xeeeeeeee;
  1433. curr_cmd = cmd;
  1434. msm_nand_prep_rw_cmd_desc(ops, &rw_params,
  1435. &data, info, n, &curr_cmd);
  1436. cmd = curr_cmd;
  1437. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
  1438. READ, msm_virt_to_dma(chip,
  1439. &dma_buffer->data.flash_status[n]), 0);
  1440. cmd++;
  1441. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
  1442. WRITE, data.clrfstatus, 0);
  1443. cmd++;
  1444. if (n == (cwperpage - 1)) {
  1445. msm_nand_prep_ce(cmd,
  1446. MSM_NAND_READ_STATUS(info), WRITE,
  1447. data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
  1448. | SPS_IOVEC_FLAG_INT);
  1449. cmd++;
  1450. }
  1451. }
  1452. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1453. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  1454. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  1455. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  1456. &dma_buffer->cmd_iovec);
  1457. iovec = dma_buffer->xfer.iovec;
  1458. for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
  1459. iovec->addr = msm_virt_to_dma(chip,
  1460. &dma_buffer->cmd[n].ce);
  1461. iovec->size = sizeof(struct sps_command_element);
  1462. iovec->flags = dma_buffer->cmd[n].flags;
  1463. iovec++;
  1464. }
  1465. mutex_lock(&info->bam_lock);
  1466. /* Submit data descriptors */
  1467. for (n = 0; n < cwperpage; n++) {
  1468. err = msm_nand_submit_rw_data_desc(ops,
  1469. &rw_params, info, n);
  1470. if (err) {
  1471. pr_err("Failed to submit data descs %d\n", err);
  1472. mutex_unlock(&info->bam_lock);
  1473. goto free_dma;
  1474. }
  1475. }
  1476. /* Submit command descriptors */
  1477. err = sps_transfer(info->sps.cmd_pipe.handle,
  1478. &dma_buffer->xfer);
  1479. if (err) {
  1480. pr_err("Failed to submit commands %d\n", err);
  1481. mutex_unlock(&info->bam_lock);
  1482. goto free_dma;
  1483. }
  1484. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  1485. wait_for_completion_io(&info->sps.data_cons.completion);
  1486. mutex_unlock(&info->bam_lock);
  1487. for (n = 0; n < cwperpage; n++)
  1488. pr_debug("write pg %d: flash_status[%d] = %x\n",
  1489. rw_params.page, n,
  1490. dma_buffer->data.flash_status[n]);
  1491. /* Check for flash status errors */
  1492. for (n = 0; n < cwperpage; n++) {
  1493. flash_sts = dma_buffer->data.flash_status[n];
  1494. if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
  1495. pr_err("MPU/OP err (0x%x) set\n", flash_sts);
  1496. err = -EIO;
  1497. goto free_dma;
  1498. }
  1499. if (n == (cwperpage - 1)) {
  1500. if (!(flash_sts & FS_DEVICE_WP) ||
  1501. (flash_sts & FS_DEVICE_STS_ERR)) {
  1502. pr_err("Dev sts err 0x%x\n", flash_sts);
  1503. err = -EIO;
  1504. goto free_dma;
  1505. }
  1506. }
  1507. }
  1508. pages_written++;
  1509. rw_params.page++;
  1510. }
  1511. free_dma:
  1512. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1513. if (ops->oobbuf)
  1514. dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
  1515. ops->ooblen, DMA_TO_DEVICE);
  1516. if (ops->datbuf)
  1517. dma_unmap_page(chip->dev, rw_params.data_dma_addr,
  1518. ops->len, DMA_TO_DEVICE);
  1519. validate_mtd_params_failed:
  1520. if (ops->mode != MTD_OPS_RAW)
  1521. ops->retlen = mtd->writesize * pages_written;
  1522. else
  1523. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
  1524. ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
  1525. if (err)
  1526. pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
  1527. to, ops->len, ops->ooblen, err);
  1528. pr_debug("ret %d, retlen %d oobretlen %d\n",
  1529. err, ops->retlen, ops->oobretlen);
  1530. pr_debug("================================================\n");
  1531. return err;
  1532. }
  1533. /*
  1534. * Function that gets called from upper layers such as MTD/YAFFS2 to write a
  1535. * page with only main data.
  1536. */
  1537. static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  1538. size_t *retlen, const u_char *buf)
  1539. {
  1540. int ret;
  1541. struct mtd_oob_ops ops;
  1542. ops.mode = MTD_OPS_AUTO_OOB;
  1543. ops.len = len;
  1544. ops.retlen = 0;
  1545. ops.ooblen = 0;
  1546. ops.datbuf = (uint8_t *)buf;
  1547. ops.oobbuf = NULL;
  1548. ret = msm_nand_write_oob(mtd, to, &ops);
  1549. *retlen = ops.retlen;
  1550. return ret;
  1551. }
  1552. /*
  1553. * Structure that contains NANDc register data for commands required
  1554. * for Erase operation.
  1555. */
  1556. struct msm_nand_erase_reg_data {
  1557. struct msm_nand_common_cfgs cfg;
  1558. uint32_t exec;
  1559. uint32_t flash_status;
  1560. uint32_t clrfstatus;
  1561. uint32_t clrrstatus;
  1562. };
  1563. /*
  1564. * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
  1565. * block within NAND device.
  1566. */
  1567. static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  1568. {
  1569. int i, err = 0;
  1570. struct msm_nand_info *info = mtd->priv;
  1571. struct msm_nand_chip *chip = &info->nand_chip;
  1572. uint32_t page = 0;
  1573. struct msm_nand_sps_cmd *cmd, *curr_cmd;
  1574. struct msm_nand_erase_reg_data data;
  1575. struct sps_iovec *iovec;
  1576. uint32_t total_cnt = 9;
  1577. /*
  1578. * The following 9 commands are required to erase a page -
  1579. * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
  1580. * flash_status(write), read_status.
  1581. */
  1582. struct {
  1583. struct sps_transfer xfer;
  1584. struct sps_iovec cmd_iovec[total_cnt];
  1585. struct msm_nand_sps_cmd cmd[total_cnt];
  1586. uint32_t flash_status;
  1587. } *dma_buffer;
  1588. if (mtd->writesize == PAGE_SIZE_2K)
  1589. page = instr->addr >> 11;
  1590. if (mtd->writesize == PAGE_SIZE_4K)
  1591. page = instr->addr >> 12;
  1592. if (instr->addr & (mtd->erasesize - 1)) {
  1593. pr_err("unsupported erase address, 0x%llx\n", instr->addr);
  1594. err = -EINVAL;
  1595. goto out;
  1596. }
  1597. if (instr->len != mtd->erasesize) {
  1598. pr_err("unsupported erase len, %lld\n", instr->len);
  1599. err = -EINVAL;
  1600. goto out;
  1601. }
  1602. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
  1603. chip, sizeof(*dma_buffer))));
  1604. cmd = dma_buffer->cmd;
  1605. memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
  1606. data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
  1607. data.cfg.addr0 = page;
  1608. data.cfg.addr1 = 0;
  1609. data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
  1610. data.cfg.cfg1 = chip->cfg1;
  1611. data.exec = 1;
  1612. dma_buffer->flash_status = 0xeeeeeeee;
  1613. data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
  1614. data.clrrstatus = MSM_NAND_RESET_READ_STS;
  1615. curr_cmd = cmd;
  1616. msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
  1617. cmd = curr_cmd;
  1618. msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
  1619. SPS_IOVEC_FLAG_NWD);
  1620. cmd++;
  1621. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
  1622. msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
  1623. cmd++;
  1624. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
  1625. data.clrfstatus, 0);
  1626. cmd++;
  1627. msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
  1628. data.clrrstatus,
  1629. SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
  1630. cmd++;
  1631. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1632. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  1633. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  1634. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  1635. &dma_buffer->cmd_iovec);
  1636. iovec = dma_buffer->xfer.iovec;
  1637. for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
  1638. iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
  1639. iovec->size = sizeof(struct sps_command_element);
  1640. iovec->flags = dma_buffer->cmd[i].flags;
  1641. iovec++;
  1642. }
  1643. mutex_lock(&info->bam_lock);
  1644. err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
  1645. if (err) {
  1646. pr_err("Failed to submit commands %d\n", err);
  1647. mutex_unlock(&info->bam_lock);
  1648. goto free_dma;
  1649. }
  1650. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  1651. mutex_unlock(&info->bam_lock);
  1652. /* Check for flash status errors */
  1653. if (dma_buffer->flash_status & (FS_OP_ERR |
  1654. FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
  1655. pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
  1656. err = -EIO;
  1657. }
  1658. if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
  1659. pr_err("Device is write protected\n");
  1660. err = -EIO;
  1661. }
  1662. if (err) {
  1663. pr_err("Erase failed, 0x%llx\n", instr->addr);
  1664. instr->fail_addr = instr->addr;
  1665. instr->state = MTD_ERASE_FAILED;
  1666. } else {
  1667. instr->state = MTD_ERASE_DONE;
  1668. instr->fail_addr = 0xffffffff;
  1669. mtd_erase_callback(instr);
  1670. }
  1671. free_dma:
  1672. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1673. out:
  1674. return err;
  1675. }
  1676. /*
  1677. * Structure that contains NANDc register data for commands required
  1678. * for checking if a block is bad.
  1679. */
  1680. struct msm_nand_blk_isbad_data {
  1681. struct msm_nand_common_cfgs cfg;
  1682. uint32_t ecc_bch_cfg;
  1683. uint32_t exec;
  1684. uint32_t read_offset;
  1685. };
  1686. /*
  1687. * Function that gets called from upper layers such as MTD/YAFFS2 to check if
  1688. * a block is bad. This is done by reading the first page within a block and
  1689. * checking whether the bad block byte location contains 0xFF or not. If it
  1690. * doesn't contain 0xFF, then it is considered as bad block.
  1691. */
  1692. static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
  1693. {
  1694. struct msm_nand_info *info = mtd->priv;
  1695. struct msm_nand_chip *chip = &info->nand_chip;
  1696. int i, ret = 0, bad_block = 0;
  1697. uint8_t *buf;
  1698. uint32_t page = 0, rdata, cwperpage;
  1699. struct msm_nand_sps_cmd *cmd, *curr_cmd;
  1700. struct msm_nand_blk_isbad_data data;
  1701. struct sps_iovec *iovec;
  1702. uint32_t total_cnt = 9;
  1703. /*
  1704. * The following 9 commands are required to check bad block -
  1705. * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
  1706. * exec, flash_status(read).
  1707. */
  1708. struct {
  1709. struct sps_transfer xfer;
  1710. struct sps_iovec cmd_iovec[total_cnt];
  1711. struct msm_nand_sps_cmd cmd[total_cnt];
  1712. uint32_t flash_status;
  1713. } *dma_buffer;
  1714. if (mtd->writesize == PAGE_SIZE_2K)
  1715. page = ofs >> 11;
  1716. if (mtd->writesize == PAGE_SIZE_4K)
  1717. page = ofs >> 12;
  1718. cwperpage = (mtd->writesize >> 9);
  1719. if (ofs > mtd->size) {
  1720. pr_err("Invalid offset 0x%llx\n", ofs);
  1721. bad_block = -EINVAL;
  1722. goto out;
  1723. }
  1724. if (ofs & (mtd->erasesize - 1)) {
  1725. pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
  1726. bad_block = -EINVAL;
  1727. goto out;
  1728. }
  1729. wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
  1730. chip , sizeof(*dma_buffer) + 4)));
  1731. buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
  1732. cmd = dma_buffer->cmd;
  1733. memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data));
  1734. data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
  1735. data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
  1736. data.cfg.cfg1 = chip->cfg1_raw;
  1737. if (chip->cfg1 & (1 << WIDE_FLASH))
  1738. data.cfg.addr0 = (page << 16) |
  1739. ((chip->cw_size * (cwperpage-1)) >> 1);
  1740. else
  1741. data.cfg.addr0 = (page << 16) |
  1742. (chip->cw_size * (cwperpage-1));
  1743. data.cfg.addr1 = (page >> 16) & 0xff;
  1744. data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
  1745. data.exec = 1;
  1746. data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
  1747. dma_buffer->flash_status = 0xeeeeeeee;
  1748. curr_cmd = cmd;
  1749. msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
  1750. cmd = curr_cmd;
  1751. msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
  1752. data.ecc_bch_cfg, 0);
  1753. cmd++;
  1754. rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
  1755. msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
  1756. cmd++;
  1757. msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
  1758. data.exec, SPS_IOVEC_FLAG_NWD);
  1759. cmd++;
  1760. msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
  1761. msm_virt_to_dma(chip, &dma_buffer->flash_status),
  1762. SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
  1763. cmd++;
  1764. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1765. dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
  1766. dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
  1767. dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
  1768. &dma_buffer->cmd_iovec);
  1769. iovec = dma_buffer->xfer.iovec;
  1770. for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
  1771. iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
  1772. iovec->size = sizeof(struct sps_command_element);
  1773. iovec->flags = dma_buffer->cmd[i].flags;
  1774. iovec++;
  1775. }
  1776. mutex_lock(&info->bam_lock);
  1777. /* Submit data descriptor */
  1778. ret = sps_transfer_one(info->sps.data_prod.handle,
  1779. msm_virt_to_dma(chip, buf),
  1780. 4, NULL, SPS_IOVEC_FLAG_INT);
  1781. if (ret) {
  1782. pr_err("Failed to submit data desc %d\n", ret);
  1783. mutex_unlock(&info->bam_lock);
  1784. goto free_dma;
  1785. }
  1786. /* Submit command descriptor */
  1787. ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
  1788. if (ret) {
  1789. pr_err("Failed to submit commands %d\n", ret);
  1790. mutex_unlock(&info->bam_lock);
  1791. goto free_dma;
  1792. }
  1793. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  1794. wait_for_completion_io(&info->sps.data_prod.completion);
  1795. mutex_unlock(&info->bam_lock);
  1796. /* Check for flash status errors */
  1797. if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
  1798. pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
  1799. bad_block = -EIO;
  1800. goto free_dma;
  1801. }
  1802. /* Check for bad block marker byte */
  1803. if (chip->cfg1 & (1 << WIDE_FLASH)) {
  1804. if (buf[0] != 0xFF || buf[1] != 0xFF)
  1805. bad_block = 1;
  1806. } else {
  1807. if (buf[0] != 0xFF)
  1808. bad_block = 1;
  1809. }
  1810. free_dma:
  1811. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
  1812. out:
  1813. return ret ? ret : bad_block;
  1814. }
  1815. /*
  1816. * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
  1817. * block as bad. This is done by writing the first page within a block with 0,
  1818. * thus setting the bad block byte location as well to 0.
  1819. */
  1820. static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1821. {
  1822. struct mtd_oob_ops ops;
  1823. int ret;
  1824. uint8_t *buf;
  1825. size_t len;
  1826. if (ofs > mtd->size) {
  1827. pr_err("Invalid offset 0x%llx\n", ofs);
  1828. ret = -EINVAL;
  1829. goto out;
  1830. }
  1831. if (ofs & (mtd->erasesize - 1)) {
  1832. pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
  1833. ret = -EINVAL;
  1834. goto out;
  1835. }
  1836. len = mtd->writesize + mtd->oobsize;
  1837. buf = kzalloc(len, GFP_KERNEL);
  1838. if (!buf) {
  1839. pr_err("unable to allocate memory for 0x%x size\n", len);
  1840. ret = -ENOMEM;
  1841. goto out;
  1842. }
  1843. ops.mode = MTD_OPS_RAW;
  1844. ops.len = len;
  1845. ops.retlen = 0;
  1846. ops.ooblen = 0;
  1847. ops.datbuf = buf;
  1848. ops.oobbuf = NULL;
  1849. ret = msm_nand_write_oob(mtd, ofs, &ops);
  1850. kfree(buf);
  1851. out:
  1852. return ret;
  1853. }
  1854. /*
  1855. * Function that scans for the attached NAND device. This fills out all
  1856. * the uninitialized function pointers with the defaults. The flash ID is
  1857. * read and the mtd/chip structures are filled with the appropriate values.
  1858. */
  1859. int msm_nand_scan(struct mtd_info *mtd)
  1860. {
  1861. struct msm_nand_info *info = mtd->priv;
  1862. struct msm_nand_chip *chip = &info->nand_chip;
  1863. struct flash_identification *supported_flash = &info->flash_dev;
  1864. int flash_id = 0, err = 0;
  1865. uint32_t i, mtd_writesize;
  1866. uint8_t dev_found = 0, wide_bus;
  1867. uint32_t manid, devid, devcfg;
  1868. uint32_t bad_block_byte;
  1869. struct nand_flash_dev *flashdev = NULL;
  1870. struct nand_manufacturers *flashman = NULL;
  1871. /* Probe the Flash device for ONFI compliance */
  1872. if (!msm_nand_flash_onfi_probe(info)) {
  1873. dev_found = 1;
  1874. } else {
  1875. err = msm_nand_flash_read_id(info, 0, &flash_id);
  1876. if (err < 0) {
  1877. pr_err("Failed to read Flash ID\n");
  1878. err = -EINVAL;
  1879. goto out;
  1880. }
  1881. manid = flash_id & 0xFF;
  1882. devid = (flash_id >> 8) & 0xFF;
  1883. devcfg = (flash_id >> 24) & 0xFF;
  1884. for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
  1885. if (nand_manuf_ids[i].id == manid)
  1886. flashman = &nand_manuf_ids[i];
  1887. for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
  1888. if (nand_flash_ids[i].id == devid)
  1889. flashdev = &nand_flash_ids[i];
  1890. if (!flashdev || !flashman) {
  1891. pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
  1892. flash_id, manid, devid);
  1893. err = -ENOENT;
  1894. goto out;
  1895. }
  1896. dev_found = 1;
  1897. if (!flashdev->pagesize) {
  1898. supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
  1899. supported_flash->pagesize = 1024 << (devcfg & 0x3);
  1900. supported_flash->blksize = (64 * 1024) <<
  1901. ((devcfg >> 4) & 0x3);
  1902. supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
  1903. (supported_flash->pagesize >> 9);
  1904. } else {
  1905. supported_flash->widebus = flashdev->options &
  1906. NAND_BUSWIDTH_16 ? 1 : 0;
  1907. supported_flash->pagesize = flashdev->pagesize;
  1908. supported_flash->blksize = flashdev->erasesize;
  1909. supported_flash->oobsize = flashdev->pagesize >> 5;
  1910. }
  1911. supported_flash->flash_id = flash_id;
  1912. supported_flash->density = flashdev->chipsize << 20;
  1913. }
  1914. if (dev_found) {
  1915. wide_bus = supported_flash->widebus;
  1916. mtd->size = supported_flash->density;
  1917. mtd->writesize = supported_flash->pagesize;
  1918. mtd->oobsize = supported_flash->oobsize;
  1919. mtd->erasesize = supported_flash->blksize;
  1920. mtd_writesize = mtd->writesize;
  1921. /* Check whether NAND device support 8bit ECC*/
  1922. if (supported_flash->ecc_correctability >= 8)
  1923. chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
  1924. else
  1925. chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
  1926. pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
  1927. supported_flash->flash_id, (wide_bus) ? 16 : 8,
  1928. (mtd->size >> 20));
  1929. pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
  1930. mtd->writesize, mtd->erasesize, mtd->oobsize);
  1931. pr_info("BCH ECC: %d Bit\n",
  1932. (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
  1933. }
  1934. chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
  1935. chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
  1936. | (516 << UD_SIZE_BYTES)
  1937. | (0 << DISABLE_STATUS_AFTER_WRITE)
  1938. | (5 << NUM_ADDR_CYCLES);
  1939. bad_block_byte = (mtd_writesize - (chip->cw_size * (
  1940. (mtd_writesize >> 9) - 1)) + 1);
  1941. chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
  1942. | (0 << CS_ACTIVE_BSY)
  1943. | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
  1944. | (0 << BAD_BLOCK_IN_SPARE_AREA)
  1945. | (2 << WR_RD_BSY_GAP)
  1946. | ((wide_bus ? 1 : 0) << WIDE_FLASH)
  1947. | (1 << ENABLE_BCH_ECC);
  1948. chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
  1949. | (5 << NUM_ADDR_CYCLES)
  1950. | (0 << SPARE_SIZE_BYTES)
  1951. | (chip->cw_size << UD_SIZE_BYTES);
  1952. chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES)
  1953. | (0 << CS_ACTIVE_BSY)
  1954. | (17 << BAD_BLOCK_BYTE_NUM)
  1955. | (1 << BAD_BLOCK_IN_SPARE_AREA)
  1956. | (2 << WR_RD_BSY_GAP)
  1957. | ((wide_bus ? 1 : 0) << WIDE_FLASH)
  1958. | (1 << DEV0_CFG1_ECC_DISABLE);
  1959. chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
  1960. | (0 << ECC_SW_RESET)
  1961. | (516 << ECC_NUM_DATA_BYTES)
  1962. | (1 << ECC_FORCE_CLK_OPEN);
  1963. if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
  1964. chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
  1965. 2 << SPARE_SIZE_BYTES);
  1966. chip->ecc_bch_cfg |= (1 << ECC_MODE)
  1967. | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
  1968. (13 << ECC_PARITY_SIZE_BYTES));
  1969. } else {
  1970. chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
  1971. 4 << SPARE_SIZE_BYTES);
  1972. chip->ecc_bch_cfg |= (0 << ECC_MODE)
  1973. | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
  1974. (7 << ECC_PARITY_SIZE_BYTES));
  1975. }
  1976. /*
  1977. * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
  1978. * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
  1979. */
  1980. chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
  1981. (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
  1982. chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
  1983. pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
  1984. " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
  1985. " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
  1986. " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
  1987. chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
  1988. chip->ecc_bch_cfg, bad_block_byte);
  1989. if (mtd->oobsize == 64) {
  1990. mtd->oobavail = 16;
  1991. } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
  1992. mtd->oobavail = 32;
  1993. } else {
  1994. pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
  1995. err = -ENODEV;
  1996. goto out;
  1997. }
  1998. /* Fill in remaining MTD driver data */
  1999. mtd->type = MTD_NANDFLASH;
  2000. mtd->flags = MTD_CAP_NANDFLASH;
  2001. mtd->_erase = msm_nand_erase;
  2002. mtd->_block_isbad = msm_nand_block_isbad;
  2003. mtd->_block_markbad = msm_nand_block_markbad;
  2004. mtd->_read = msm_nand_read;
  2005. mtd->_write = msm_nand_write;
  2006. mtd->_read_oob = msm_nand_read_oob;
  2007. mtd->_write_oob = msm_nand_write_oob;
  2008. mtd->owner = THIS_MODULE;
  2009. out:
  2010. return err;
  2011. }
  2012. #define BAM_APPS_PIPE_LOCK_GRP0 0
  2013. #define BAM_APPS_PIPE_LOCK_GRP1 1
  2014. /*
  2015. * This function allocates, configures, connects an end point and
  2016. * also registers event notification for an end point. It also allocates
  2017. * DMA memory for descriptor FIFO of a pipe.
  2018. */
  2019. static int msm_nand_init_endpoint(struct msm_nand_info *info,
  2020. struct msm_nand_sps_endpt *end_point,
  2021. uint32_t pipe_index)
  2022. {
  2023. int rc = 0;
  2024. struct sps_pipe *pipe_handle;
  2025. struct sps_connect *sps_config = &end_point->config;
  2026. struct sps_register_event *sps_event = &end_point->event;
  2027. pipe_handle = sps_alloc_endpoint();
  2028. if (!pipe_handle) {
  2029. pr_err("sps_alloc_endpoint() failed\n");
  2030. rc = -ENOMEM;
  2031. goto out;
  2032. }
  2033. rc = sps_get_config(pipe_handle, sps_config);
  2034. if (rc) {
  2035. pr_err("sps_get_config() failed %d\n", rc);
  2036. goto free_endpoint;
  2037. }
  2038. if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
  2039. /* READ CASE: source - BAM; destination - system memory */
  2040. sps_config->source = info->sps.bam_handle;
  2041. sps_config->destination = SPS_DEV_HANDLE_MEM;
  2042. sps_config->mode = SPS_MODE_SRC;
  2043. sps_config->src_pipe_index = pipe_index;
  2044. } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
  2045. pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
  2046. /* WRITE CASE: source - system memory; destination - BAM */
  2047. sps_config->source = SPS_DEV_HANDLE_MEM;
  2048. sps_config->destination = info->sps.bam_handle;
  2049. sps_config->mode = SPS_MODE_DEST;
  2050. sps_config->dest_pipe_index = pipe_index;
  2051. }
  2052. sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
  2053. if (pipe_index == SPS_DATA_PROD_PIPE_INDEX ||
  2054. pipe_index == SPS_DATA_CONS_PIPE_INDEX)
  2055. sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0;
  2056. else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX)
  2057. sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1;
  2058. /*
  2059. * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
  2060. * are allowed to be submitted before we get any ack for any of them,
  2061. * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
  2062. * sizeof(struct sps_iovec).
  2063. */
  2064. sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
  2065. sizeof(struct sps_iovec);
  2066. sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
  2067. sps_config->desc.size,
  2068. &sps_config->desc.phys_base,
  2069. GFP_KERNEL);
  2070. if (!sps_config->desc.base) {
  2071. pr_err("dmam_alloc_coherent() failed for size %x\n",
  2072. sps_config->desc.size);
  2073. rc = -ENOMEM;
  2074. goto free_endpoint;
  2075. }
  2076. memset(sps_config->desc.base, 0x00, sps_config->desc.size);
  2077. rc = sps_connect(pipe_handle, sps_config);
  2078. if (rc) {
  2079. pr_err("sps_connect() failed %d\n", rc);
  2080. goto free_endpoint;
  2081. }
  2082. init_completion(&end_point->completion);
  2083. sps_event->mode = SPS_TRIGGER_WAIT;
  2084. sps_event->options = SPS_O_DESC_DONE;
  2085. sps_event->xfer_done = &end_point->completion;
  2086. sps_event->user = (void *)info;
  2087. rc = sps_register_event(pipe_handle, sps_event);
  2088. if (rc) {
  2089. pr_err("sps_register_event() failed %d\n", rc);
  2090. goto sps_disconnect;
  2091. }
  2092. end_point->handle = pipe_handle;
  2093. pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
  2094. pipe_index);
  2095. goto out;
  2096. sps_disconnect:
  2097. sps_disconnect(pipe_handle);
  2098. free_endpoint:
  2099. sps_free_endpoint(pipe_handle);
  2100. out:
  2101. return rc;
  2102. }
  2103. /* This function disconnects and frees an end point */
  2104. static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
  2105. struct msm_nand_sps_endpt *end_point)
  2106. {
  2107. sps_disconnect(end_point->handle);
  2108. sps_free_endpoint(end_point->handle);
  2109. }
  2110. /*
  2111. * This function registers BAM device and initializes its end points for
  2112. * the following pipes -
  2113. * system consumer pipe for data (pipe#0),
  2114. * system producer pipe for data (pipe#1),
  2115. * system consumer pipe for commands (pipe#2).
  2116. */
  2117. static int msm_nand_bam_init(struct msm_nand_info *nand_info)
  2118. {
  2119. struct sps_bam_props bam = {0};
  2120. int rc = 0;
  2121. bam.phys_addr = nand_info->bam_phys;
  2122. bam.virt_addr = nand_info->bam_base;
  2123. bam.irq = nand_info->bam_irq;
  2124. /*
  2125. * NAND device is accessible from both Apps and Modem processor and
  2126. * thus, NANDc and BAM are shared between both the processors. But BAM
  2127. * must be enabled and instantiated only once during boot up by
  2128. * Trustzone before Modem/Apps is brought out from reset.
  2129. *
  2130. * This is indicated to SPS driver on Apps by marking flag
  2131. * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
  2132. * initializations that will be done by Trustzone - Execution
  2133. * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
  2134. * Descriptor summing threshold.
  2135. *
  2136. * NANDc BAM device supports 2 execution environments - Modem and Apps
  2137. * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
  2138. */
  2139. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
  2140. rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
  2141. if (!rc)
  2142. goto init_sps_ep;
  2143. rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
  2144. if (rc) {
  2145. pr_err("%s: sps_register_bam_device() failed with %d\n",
  2146. __func__, rc);
  2147. goto out;
  2148. }
  2149. pr_info("%s: BAM device registered: bam_handle 0x%x\n",
  2150. __func__, nand_info->sps.bam_handle);
  2151. init_sps_ep:
  2152. rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
  2153. SPS_DATA_PROD_PIPE_INDEX);
  2154. if (rc)
  2155. goto out;
  2156. rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
  2157. SPS_DATA_CONS_PIPE_INDEX);
  2158. if (rc)
  2159. goto deinit_data_prod;
  2160. rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
  2161. SPS_CMD_CONS_PIPE_INDEX);
  2162. if (rc)
  2163. goto deinit_data_cons;
  2164. goto out;
  2165. deinit_data_cons:
  2166. msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
  2167. deinit_data_prod:
  2168. msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
  2169. out:
  2170. return rc;
  2171. }
  2172. /*
  2173. * This function disconnects and frees its end points for all the pipes.
  2174. * Since the BAM is shared resource, it is not deregistered as its handle
  2175. * might be in use with LCDC.
  2176. */
  2177. static void msm_nand_bam_free(struct msm_nand_info *nand_info)
  2178. {
  2179. msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
  2180. msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
  2181. msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
  2182. }
  2183. /* This function enables DMA support for the NANDc in BAM mode. */
  2184. static int msm_nand_enable_dma(struct msm_nand_info *info)
  2185. {
  2186. struct msm_nand_sps_cmd *sps_cmd;
  2187. struct msm_nand_chip *chip = &info->nand_chip;
  2188. int ret;
  2189. wait_event(chip->dma_wait_queue,
  2190. (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
  2191. msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
  2192. (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
  2193. ret = sps_transfer_one(info->sps.cmd_pipe.handle,
  2194. msm_virt_to_dma(chip, &sps_cmd->ce),
  2195. sizeof(struct sps_command_element), NULL,
  2196. sps_cmd->flags);
  2197. if (ret) {
  2198. pr_err("Failed to submit command: %d\n", ret);
  2199. goto out;
  2200. }
  2201. wait_for_completion_io(&info->sps.cmd_pipe.completion);
  2202. out:
  2203. msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
  2204. return ret;
  2205. }
  2206. #ifdef CONFIG_MSM_SMD
  2207. static int msm_nand_parse_smem_ptable(int *nr_parts)
  2208. {
  2209. uint32_t i, j;
  2210. uint32_t len = FLASH_PTABLE_HDR_LEN;
  2211. struct flash_partition_entry *pentry;
  2212. char *delimiter = ":";
  2213. pr_info("Parsing partition table info from SMEM\n");
  2214. /* Read only the header portion of ptable */
  2215. ptable = *(struct flash_partition_table *)
  2216. (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
  2217. /* Verify ptable magic */
  2218. if (ptable.magic1 != FLASH_PART_MAGIC1 ||
  2219. ptable.magic2 != FLASH_PART_MAGIC2) {
  2220. pr_err("Partition table magic verification failed\n");
  2221. goto out;
  2222. }
  2223. /* Ensure that # of partitions is less than the max we have allocated */
  2224. if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
  2225. pr_err("Partition numbers exceed the max limit\n");
  2226. goto out;
  2227. }
  2228. /* Find out length of partition data based on table version. */
  2229. if (ptable.version <= FLASH_PTABLE_V3) {
  2230. len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
  2231. sizeof(struct flash_partition_entry);
  2232. } else if (ptable.version == FLASH_PTABLE_V4) {
  2233. len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
  2234. sizeof(struct flash_partition_entry);
  2235. } else {
  2236. pr_err("Unknown ptable version (%d)", ptable.version);
  2237. goto out;
  2238. }
  2239. *nr_parts = ptable.numparts;
  2240. ptable = *(struct flash_partition_table *)
  2241. (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
  2242. for (i = 0; i < ptable.numparts; i++) {
  2243. pentry = &ptable.part_entry[i];
  2244. if (pentry->name == '\0')
  2245. continue;
  2246. /* Convert name to lower case and discard the initial chars */
  2247. mtd_part[i].name = pentry->name;
  2248. for (j = 0; j < strlen(mtd_part[i].name); j++)
  2249. *(mtd_part[i].name + j) =
  2250. tolower(*(mtd_part[i].name + j));
  2251. strsep(&(mtd_part[i].name), delimiter);
  2252. mtd_part[i].offset = pentry->offset;
  2253. mtd_part[i].mask_flags = pentry->attr;
  2254. mtd_part[i].size = pentry->length;
  2255. pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
  2256. i, pentry->name, pentry->offset, pentry->length,
  2257. pentry->attr);
  2258. }
  2259. pr_info("SMEM partition table found: ver: %d len: %d\n",
  2260. ptable.version, ptable.numparts);
  2261. return 0;
  2262. out:
  2263. return -EINVAL;
  2264. }
  2265. #else
  2266. static int msm_nand_parse_smem_ptable(int *nr_parts)
  2267. {
  2268. return -ENODEV;
  2269. }
  2270. #endif
  2271. /*
  2272. * This function gets called when its device named msm-nand is added to
  2273. * device tree .dts file with all its resources such as physical addresses
  2274. * for NANDc and BAM, BAM IRQ.
  2275. *
  2276. * It also expects the NAND flash partition information to be passed in .dts
  2277. * file so that it can parse the partitions by calling MTD function
  2278. * mtd_device_parse_register().
  2279. *
  2280. */
  2281. static int __devinit msm_nand_probe(struct platform_device *pdev)
  2282. {
  2283. struct msm_nand_info *info;
  2284. struct resource *res;
  2285. int i, err, nr_parts;
  2286. /*
  2287. * The partition information can also be passed from kernel command
  2288. * line. Also, the MTD core layer supports adding the whole device as
  2289. * one MTD device when no partition information is available at all.
  2290. */
  2291. info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
  2292. GFP_KERNEL);
  2293. if (!info) {
  2294. pr_err("Unable to allocate memory for msm_nand_info\n");
  2295. err = -ENOMEM;
  2296. goto out;
  2297. }
  2298. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  2299. "nand_phys");
  2300. if (!res || !res->start) {
  2301. pr_err("NAND phys address range is not provided\n");
  2302. err = -ENODEV;
  2303. goto out;
  2304. }
  2305. info->nand_phys = res->start;
  2306. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  2307. "bam_phys");
  2308. if (!res || !res->start) {
  2309. pr_err("BAM phys address range is not provided\n");
  2310. err = -ENODEV;
  2311. goto out;
  2312. }
  2313. info->bam_phys = res->start;
  2314. info->bam_base = devm_ioremap(&pdev->dev, res->start,
  2315. resource_size(res));
  2316. if (!info->bam_base) {
  2317. pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
  2318. res->start, resource_size(res));
  2319. err = -ENOMEM;
  2320. goto out;
  2321. }
  2322. info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
  2323. if (info->bam_irq < 0) {
  2324. pr_err("BAM IRQ is not provided\n");
  2325. err = -ENODEV;
  2326. goto out;
  2327. }
  2328. info->mtd.name = dev_name(&pdev->dev);
  2329. info->mtd.priv = info;
  2330. info->mtd.owner = THIS_MODULE;
  2331. info->nand_chip.dev = &pdev->dev;
  2332. init_waitqueue_head(&info->nand_chip.dma_wait_queue);
  2333. mutex_init(&info->bam_lock);
  2334. info->nand_chip.dma_virt_addr =
  2335. dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
  2336. &info->nand_chip.dma_phys_addr, GFP_KERNEL);
  2337. if (!info->nand_chip.dma_virt_addr) {
  2338. pr_err("No memory for DMA buffer size %x\n",
  2339. MSM_NAND_DMA_BUFFER_SIZE);
  2340. err = -ENOMEM;
  2341. goto out;
  2342. }
  2343. err = msm_nand_bam_init(info);
  2344. if (err) {
  2345. pr_err("msm_nand_bam_init() failed %d\n", err);
  2346. goto out;
  2347. }
  2348. err = msm_nand_enable_dma(info);
  2349. if (err) {
  2350. pr_err("Failed to enable DMA in NANDc\n");
  2351. goto free_bam;
  2352. }
  2353. err = msm_nand_parse_smem_ptable(&nr_parts);
  2354. if (err < 0) {
  2355. pr_err("Failed to parse partition table in SMEM\n");
  2356. goto free_bam;
  2357. }
  2358. if (msm_nand_scan(&info->mtd)) {
  2359. pr_err("No nand device found\n");
  2360. err = -ENXIO;
  2361. goto free_bam;
  2362. }
  2363. for (i = 0; i < nr_parts; i++) {
  2364. mtd_part[i].offset *= info->mtd.erasesize;
  2365. mtd_part[i].size *= info->mtd.erasesize;
  2366. }
  2367. err = mtd_device_parse_register(&info->mtd, NULL, NULL,
  2368. &mtd_part[0], nr_parts);
  2369. if (err < 0) {
  2370. pr_err("Unable to register MTD partitions %d\n", err);
  2371. goto free_bam;
  2372. }
  2373. dev_set_drvdata(&pdev->dev, info);
  2374. pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
  2375. info->nand_phys, info->bam_phys, info->bam_irq);
  2376. pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
  2377. info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
  2378. goto out;
  2379. free_bam:
  2380. msm_nand_bam_free(info);
  2381. out:
  2382. return err;
  2383. }
  2384. /*
  2385. * Remove functionality that gets called when driver/device msm-nand
  2386. * is removed.
  2387. */
  2388. static int __devexit msm_nand_remove(struct platform_device *pdev)
  2389. {
  2390. struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
  2391. dev_set_drvdata(&pdev->dev, NULL);
  2392. if (info) {
  2393. mtd_device_unregister(&info->mtd);
  2394. msm_nand_bam_free(info);
  2395. }
  2396. return 0;
  2397. }
  2398. #define DRIVER_NAME "msm_qpic_nand"
  2399. static const struct of_device_id msm_nand_match_table[] = {
  2400. { .compatible = "qcom,msm-nand", },
  2401. {},
  2402. };
  2403. static struct platform_driver msm_nand_driver = {
  2404. .probe = msm_nand_probe,
  2405. .remove = __devexit_p(msm_nand_remove),
  2406. .driver = {
  2407. .name = DRIVER_NAME,
  2408. .of_match_table = msm_nand_match_table,
  2409. },
  2410. };
  2411. module_platform_driver(msm_nand_driver);
  2412. MODULE_ALIAS(DRIVER_NAME);
  2413. MODULE_LICENSE("GPL v2");
  2414. MODULE_DESCRIPTION("MSM QPIC NAND flash driver");