m3_nand.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. #include <linux/module.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/kernel.h>
  5. #include <linux/string.h>
  6. #include <linux/ioport.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/delay.h>
  9. #include <linux/err.h>
  10. #include <linux/slab.h>
  11. #include <linux/io.h>
  12. #include <linux/bitops.h>
  13. #include <linux/reboot.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/mtd/nand.h>
  16. #include <linux/mtd/nand_ecc.h>
  17. #include <linux/mtd/partitions.h>
  18. #include <mach/nand.h>
  19. #include <mach/clock.h>
  20. static char *aml_nand_plane_string[]={
  21. "NAND_SINGLE_PLANE_MODE",
  22. "NAND_TWO_PLANE_MODE",
  23. };
  24. static char *aml_nand_internal_string[]={
  25. "NAND_NONE_INTERLEAVING_MODE",
  26. "NAND_INTERLEAVING_MODE",
  27. };
  28. #define ECC_INFORMATION(name_a, bch_a, size_a, parity_a, user_a) { \
  29. .name=name_a, .bch_mode=bch_a, .bch_unit_size=size_a, .bch_bytes=parity_a, .user_byte_mode=user_a \
  30. }
  31. static struct aml_nand_bch_desc m3_bch_list[] = {
  32. [0]=ECC_INFORMATION("NAND_RAW_MODE", NAND_ECC_SOFT_MODE, 0, 0, 0),
  33. [1]=ECC_INFORMATION("NAND_SHORT_MODE" ,NAND_ECC_SHORT_MODE, NAND_ECC_UNIT_SHORT, NAND_BCH60_1K_ECC_SIZE, 2),
  34. [1]=ECC_INFORMATION("NAND_BCH8_MODE", NAND_ECC_BCH8_MODE, NAND_ECC_UNIT_SIZE, NAND_BCH8_ECC_SIZE, 2),
  35. [2]=ECC_INFORMATION("NAND_BCH8_1K_MODE" ,NAND_ECC_BCH8_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH8_1K_ECC_SIZE, 2),
  36. [3]=ECC_INFORMATION("NAND_BCH16_1K_MODE" ,NAND_ECC_BCH16_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH16_1K_ECC_SIZE, 2),
  37. [4]=ECC_INFORMATION("NAND_BCH24_1K_MODE" ,NAND_ECC_BCH24_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH24_1K_ECC_SIZE, 2),
  38. [5]=ECC_INFORMATION("NAND_BCH30_1K_MODE" ,NAND_ECC_BCH30_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH30_1K_ECC_SIZE, 2),
  39. [6]=ECC_INFORMATION("NAND_BCH40_1K_MODE" ,NAND_ECC_BCH40_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH40_1K_ECC_SIZE, 2),
  40. [7]=ECC_INFORMATION("NAND_BCH60_1K_MODE" ,NAND_ECC_BCH60_1K_MODE, NAND_ECC_UNIT_1KSIZE, NAND_BCH60_1K_ECC_SIZE, 2),
  41. };
  42. static struct aml_nand_device *to_nand_dev(struct platform_device *pdev)
  43. {
  44. return pdev->dev.platform_data;
  45. }
  46. static pinmux_item_t nand_ce0_pins[] = {
  47. {
  48. .reg = PINMUX_REG(2),
  49. .setmask = 1<<25,
  50. },
  51. PINMUX_END_ITEM
  52. };
  53. static pinmux_item_t nand_ce1_pins[] = {
  54. {
  55. .reg = PINMUX_REG(2),
  56. .setmask = 1<<24,
  57. },
  58. PINMUX_END_ITEM
  59. };
  60. static pinmux_item_t nand_ce2_pins[] = {
  61. {
  62. .reg = PINMUX_REG(2),
  63. .setmask = 1<<23,
  64. },
  65. PINMUX_END_ITEM
  66. };
  67. static pinmux_item_t nand_ce3_pins[] = {
  68. {
  69. .reg = PINMUX_REG(2),
  70. .setmask = 1<<22,
  71. },
  72. PINMUX_END_ITEM
  73. };
  74. static pinmux_set_t nand_ce0 = {
  75. .chip_select = NULL,
  76. .pinmux = &nand_ce0_pins[0]
  77. };
  78. static pinmux_set_t nand_ce1 = {
  79. .chip_select = NULL,
  80. .pinmux = &nand_ce1_pins[0]
  81. };
  82. static pinmux_set_t nand_ce2 = {
  83. .chip_select = NULL,
  84. .pinmux = &nand_ce2_pins[0]
  85. };
  86. static pinmux_set_t nand_ce3 = {
  87. .chip_select = NULL,
  88. .pinmux = &nand_ce3_pins[0]
  89. };
  90. static pinmux_item_t nand_rb0_pins[] = {
  91. {
  92. .reg = PINMUX_REG(2),
  93. .setmask = 1<<17,
  94. },
  95. PINMUX_END_ITEM
  96. };
  97. static pinmux_item_t nand_rb1_pins[] ={
  98. {
  99. .reg = PINMUX_REG(2),
  100. .setmask = 1<<16,
  101. },
  102. PINMUX_END_ITEM
  103. };
  104. static pinmux_set_t nand_rb0 = {
  105. .chip_select = NULL,
  106. .pinmux = &nand_rb0_pins[0]
  107. };
  108. static pinmux_set_t nand_rb1 = {
  109. .chip_select = NULL,
  110. .pinmux = &nand_rb1_pins[0]
  111. };
  112. static void m3_nand_select_chip(struct aml_nand_chip *aml_chip, int chipnr)
  113. {
  114. int i;
  115. switch (chipnr) {
  116. case 0:
  117. case 1:
  118. case 2:
  119. case 3:
  120. udelay(10);
  121. aml_chip->chip_selected = aml_chip->chip_enable[chipnr];
  122. aml_chip->rb_received = aml_chip->rb_enable[chipnr];
  123. for (i=0; i<aml_chip->chip_num; i++) {
  124. if (aml_chip->valid_chip[i]) {
  125. if (!((aml_chip->chip_enable[i] >> 10) & 1))
  126. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 25));
  127. pinmux_set(&nand_ce0);
  128. if (!((aml_chip->chip_enable[i] >> 10) & 2))
  129. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 24));
  130. pinmux_set(&nand_ce1);
  131. if (!((aml_chip->chip_enable[i] >> 10) & 4))
  132. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 23));
  133. pinmux_set(&nand_ce2);
  134. if (!((aml_chip->chip_enable[i] >> 10) & 8))
  135. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 22));
  136. pinmux_set(&nand_ce3);
  137. if (((aml_chip->ops_mode & AML_CHIP_NONE_RB) == 0) && (aml_chip->rb_enable[i])){
  138. if (!((aml_chip->rb_enable[i] >> 10) & 1))
  139. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 17));
  140. pinmux_set(&nand_rb0);
  141. if (!((aml_chip->rb_enable[i] >> 10) & 2))
  142. //aml_set_reg32_mask(P_PERIPHS_PIN_MUX_2, (1 << 16));
  143. pinmux_set(&nand_rb1);
  144. }
  145. }
  146. }
  147. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  148. break;
  149. default:
  150. BUG();
  151. aml_chip->chip_selected = CE_NOT_SEL;
  152. break;
  153. }
  154. return;
  155. }
  156. static void m3_nand_hw_init(struct aml_nand_chip *aml_chip)
  157. {
  158. struct clk *sys_clk;
  159. int sys_clk_rate, sys_time, start_cycle, end_cycle, bus_cycle, bus_timing, Tcycle, T_REA = DEFAULT_T_REA, T_RHOH = DEFAULT_T_RHOH;
  160. sys_clk = clk_get_sys(NAND_SYS_CLK_NAME, NULL);
  161. sys_clk_rate = clk_get_rate(sys_clk);
  162. sys_time = (10000 / (sys_clk_rate / 1000000));
  163. start_cycle = (((NAND_CYCLE_DELAY + T_REA * 10) * 10) / sys_time);
  164. start_cycle = (start_cycle + 9) / 10;
  165. for (bus_cycle = 4; bus_cycle <= MAX_CYCLE_NUM; bus_cycle++) {
  166. Tcycle = bus_cycle * sys_time;
  167. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + T_RHOH * 10) * 10) / sys_time);
  168. end_cycle = end_cycle / 10;
  169. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  170. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  171. && (start_cycle <= end_cycle)) {
  172. break;
  173. }
  174. }
  175. if (bus_cycle > MAX_CYCLE_NUM)
  176. return;
  177. bus_timing = (start_cycle + end_cycle) / 2;
  178. NFC_SET_CFG(0);
  179. NFC_SET_TIMING_ASYC(bus_timing, (bus_cycle - 1));
  180. NFC_SEND_CMD(1<<31);
  181. printk("init bus_cycle=%d, bus_timing=%d, start_cycle=%d, end_cycle=%d,system=%d.%dns\n",
  182. bus_cycle, bus_timing, start_cycle, end_cycle, sys_time/10, sys_time%10);
  183. return;
  184. }
  185. static void m3_nand_adjust_timing(struct aml_nand_chip *aml_chip)
  186. {
  187. struct clk *sys_clk;
  188. int sys_clk_rate, sys_time, start_cycle, end_cycle, bus_cycle, bus_timing, Tcycle;
  189. if (!aml_chip->T_REA)
  190. aml_chip->T_REA = 20;
  191. if (!aml_chip->T_RHOH)
  192. aml_chip->T_RHOH = 15;
  193. sys_clk = clk_get_sys(NAND_SYS_CLK_NAME, NULL);
  194. sys_clk_rate = clk_get_rate(sys_clk);
  195. sys_time = (10000 / (sys_clk_rate / 1000000));
  196. start_cycle = (((NAND_CYCLE_DELAY + aml_chip->T_REA * 10) * 10) / sys_time);
  197. start_cycle = (start_cycle + 9) / 10;
  198. for (bus_cycle = 4; bus_cycle <= MAX_CYCLE_NUM; bus_cycle++) {
  199. Tcycle = bus_cycle * sys_time;
  200. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + aml_chip->T_RHOH * 10) * 10) / sys_time);
  201. end_cycle = end_cycle / 10;
  202. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  203. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  204. && (start_cycle <= end_cycle)) {
  205. break;
  206. }
  207. }
  208. if (bus_cycle > MAX_CYCLE_NUM)
  209. return;
  210. bus_timing = (start_cycle + end_cycle) / 2;
  211. NFC_SET_CFG(0);
  212. NFC_SET_TIMING_ASYC(bus_timing, (bus_cycle - 1));
  213. NFC_SEND_CMD(1<<31);
  214. printk("bus_cycle=%d, bus_timing=%d, start_cycle=%d, end_cycle=%d,system=%d.%dns\n",
  215. bus_cycle, bus_timing, start_cycle, end_cycle, sys_time/10, sys_time%10);
  216. }
  217. #ifdef CONFIG_HAS_EARLYSUSPEND
  218. static void m3_nand_early_suspend(struct early_suspend *nand_early_suspend)
  219. {
  220. printk("m3_nand_early suspend entered\n");
  221. return;
  222. }
  223. static void m3_nand_late_resume(struct early_suspend *nand_early_suspend)
  224. {
  225. printk("m3_nand_late resume entered\n");
  226. return;
  227. }
  228. #endif
  229. static int m3_nand_suspend(struct mtd_info *mtd)
  230. {
  231. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  232. struct aml_nand_platform *plat = aml_chip->platform;
  233. struct nand_chip *chip = &aml_chip->chip;
  234. spinlock_t *lock = &chip->controller->lock;
  235. if (!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME)))
  236. return 0;
  237. spin_lock(lock);
  238. if (!chip->controller->active)
  239. chip->controller->active = chip;
  240. chip->state = FL_PM_SUSPENDED;
  241. spin_unlock(lock);
  242. printk("m3 nand suspend entered\n");
  243. return 0;
  244. }
  245. static void m3_nand_resume(struct mtd_info *mtd)
  246. {
  247. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  248. struct aml_nand_platform *plat = aml_chip->platform;
  249. struct nand_chip *chip = &aml_chip->chip;
  250. u8 onfi_features[4];
  251. if (!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME)))
  252. return;
  253. chip->select_chip(mtd, 0);
  254. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  255. if (aml_chip->onfi_mode) {
  256. aml_nand_set_onfi_features(aml_chip, (uint8_t *)(&aml_chip->onfi_mode), ONFI_TIMING_ADDR);
  257. aml_nand_get_onfi_features(aml_chip, onfi_features, ONFI_TIMING_ADDR);
  258. if (onfi_features[0] != aml_chip->onfi_mode) {
  259. aml_chip->T_REA = DEFAULT_T_REA;
  260. aml_chip->T_RHOH = DEFAULT_T_RHOH;
  261. printk("onfi timing mode set failed: %x\n", onfi_features[0]);
  262. }
  263. }
  264. chip->select_chip(mtd, -1);
  265. spin_lock(&chip->controller->lock);
  266. chip->controller->active = NULL;
  267. chip->state = FL_READY;
  268. spin_unlock(&chip->controller->lock);
  269. printk("m3 nand resume entered\n");
  270. return;
  271. }
  272. static int m3_nand_options_confirm(struct aml_nand_chip *aml_chip)
  273. {
  274. struct mtd_info *mtd = &aml_chip->mtd;
  275. struct nand_chip *chip = &aml_chip->chip;
  276. struct aml_nand_platform *plat = aml_chip->platform;
  277. struct aml_nand_bch_desc *ecc_supports = aml_chip->bch_desc;
  278. unsigned max_bch_mode = aml_chip->max_bch_mode;
  279. unsigned options_selected = 0, options_support = 0, ecc_bytes, options_define, valid_chip_num = 0;
  280. int error = 0, i, j;
  281. options_selected = (plat->platform_nand_data.chip.options & NAND_ECC_OPTIONS_MASK);
  282. options_define = (aml_chip->options & NAND_ECC_OPTIONS_MASK);
  283. for (i=0; i<max_bch_mode; i++) {
  284. if (ecc_supports[i].bch_mode == options_selected) {
  285. break;
  286. }
  287. }
  288. j = i;
  289. for(i=max_bch_mode-1; i>0; i--)
  290. {
  291. ecc_bytes = aml_chip->oob_size / (aml_chip->page_size / ecc_supports[i].bch_unit_size);
  292. if(ecc_bytes >= ecc_supports[i].bch_bytes + ecc_supports[i].user_byte_mode)
  293. {
  294. options_support = ecc_supports[i].bch_mode;
  295. break;
  296. }
  297. }
  298. if (options_define != options_support) {
  299. options_define = options_support;
  300. //printk("define oob size: %d could support bch mode: %s\n", aml_chip->oob_size, ecc_supports[options_support].name);
  301. }
  302. if (options_selected > options_define) {
  303. printk("oob size is not enough for selected bch mode: %s force bch to mode: %s\n", ecc_supports[j].name, ecc_supports[i].name);
  304. options_selected = options_define;
  305. }
  306. switch (options_selected) {
  307. case NAND_ECC_BCH8_MODE:
  308. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  309. chip->ecc.bytes = NAND_BCH8_ECC_SIZE;
  310. aml_chip->bch_mode = NAND_ECC_BCH8;
  311. aml_chip->user_byte_mode = 2;
  312. break;
  313. case NAND_ECC_BCH8_1K_MODE:
  314. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  315. chip->ecc.bytes = NAND_BCH8_1K_ECC_SIZE;
  316. aml_chip->bch_mode = NAND_ECC_BCH8_1K;
  317. aml_chip->user_byte_mode = 2;
  318. break;
  319. case NAND_ECC_BCH16_1K_MODE:
  320. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  321. chip->ecc.bytes = NAND_BCH16_1K_ECC_SIZE;
  322. aml_chip->bch_mode = NAND_ECC_BCH16_1K;
  323. aml_chip->user_byte_mode = 2;
  324. break;
  325. case NAND_ECC_BCH24_1K_MODE:
  326. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  327. chip->ecc.bytes = NAND_BCH24_1K_ECC_SIZE;
  328. aml_chip->bch_mode = NAND_ECC_BCH24_1K;
  329. aml_chip->user_byte_mode = 2;
  330. break;
  331. case NAND_ECC_BCH30_1K_MODE:
  332. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  333. chip->ecc.bytes = NAND_BCH30_1K_ECC_SIZE;
  334. aml_chip->bch_mode = NAND_ECC_BCH30_1K;
  335. aml_chip->user_byte_mode = 2;
  336. break;
  337. case NAND_ECC_BCH40_1K_MODE:
  338. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  339. chip->ecc.bytes = NAND_BCH40_1K_ECC_SIZE;
  340. aml_chip->bch_mode = NAND_ECC_BCH40_1K;
  341. aml_chip->user_byte_mode = 2;
  342. break;
  343. case NAND_ECC_BCH60_1K_MODE:
  344. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  345. chip->ecc.bytes = NAND_BCH60_1K_ECC_SIZE;
  346. aml_chip->bch_mode = NAND_ECC_BCH60_1K;
  347. aml_chip->user_byte_mode = 2;
  348. break;
  349. case NAND_ECC_SHORT_MODE:
  350. chip->ecc.size = NAND_ECC_UNIT_SHORT;
  351. chip->ecc.bytes = NAND_BCH60_1K_ECC_SIZE;
  352. aml_chip->bch_mode = NAND_ECC_BCH_SHORT;
  353. aml_chip->user_byte_mode = 2;
  354. chip->ecc.steps = mtd->writesize / 512;
  355. break;
  356. default :
  357. if ((plat->platform_nand_data.chip.options & NAND_ECC_OPTIONS_MASK) != NAND_ECC_SOFT_MODE) {
  358. printk("soft ecc or none ecc just support in linux self nand base please selected it at platform options\n");
  359. error = -ENXIO;
  360. }
  361. break;
  362. }
  363. options_selected = (plat->platform_nand_data.chip.options & NAND_INTERLEAVING_OPTIONS_MASK);
  364. options_define = (aml_chip->options & NAND_INTERLEAVING_OPTIONS_MASK);
  365. if (options_selected > options_define) {
  366. printk("internal mode error for selected internal mode: %s force internal mode to : %s\n", aml_nand_internal_string[options_selected >> 16], aml_nand_internal_string[options_define >> 16]);
  367. options_selected = options_define;
  368. }
  369. switch (options_selected) {
  370. case NAND_INTERLEAVING_MODE:
  371. aml_chip->ops_mode |= AML_INTERLEAVING_MODE;
  372. mtd->erasesize *= aml_chip->internal_chipnr;
  373. mtd->writesize *= aml_chip->internal_chipnr;
  374. mtd->oobsize *= aml_chip->internal_chipnr;
  375. break;
  376. default:
  377. break;
  378. }
  379. options_selected = (plat->platform_nand_data.chip.options & NAND_PLANE_OPTIONS_MASK);
  380. options_define = (aml_chip->options & NAND_PLANE_OPTIONS_MASK);
  381. if (options_selected > options_define) {
  382. printk("multi plane error for selected plane mode: %s force plane to : %s\n", aml_nand_plane_string[options_selected >> 4], aml_nand_plane_string[options_define >> 4]);
  383. options_selected = options_define;
  384. }
  385. for (i=0; i<aml_chip->chip_num; i++) {
  386. if (aml_chip->valid_chip[i]) {
  387. valid_chip_num++;
  388. }
  389. }
  390. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE)
  391. valid_chip_num *= aml_chip->internal_chipnr;
  392. if(valid_chip_num > 2){
  393. printk("detect valid_chip_num:%d over 2, and aml_chip->internal_chipnr:%d, disable NAND_TWO_PLANE_MODE here\n", valid_chip_num, aml_chip->internal_chipnr);
  394. }
  395. else{
  396. switch (options_selected) {
  397. case NAND_TWO_PLANE_MODE:
  398. aml_chip->plane_num = 2;
  399. mtd->erasesize *= 2;
  400. mtd->writesize *= 2;
  401. mtd->oobsize *= 2;
  402. break;
  403. default:
  404. aml_chip->plane_num = 1;
  405. break;
  406. }
  407. }
  408. return error;
  409. }
  410. static int aml_platform_dma_waiting(struct aml_nand_chip *aml_chip)
  411. {
  412. unsigned time_out_cnt = 0;
  413. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  414. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  415. do {
  416. if (NFC_CMDFIFO_SIZE() <= 0)
  417. break;
  418. }while (time_out_cnt++ <= AML_DMA_BUSY_TIMEOUT);
  419. if (time_out_cnt < AML_DMA_BUSY_TIMEOUT)
  420. return 0;
  421. return -EBUSY;
  422. }
  423. static int m3_nand_dma_write(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  424. {
  425. int ret = 0;
  426. unsigned dma_unit_size = 0, count = 0;
  427. struct nand_chip *chip = &aml_chip->chip;
  428. memcpy(aml_chip->aml_nand_data_buf, buf, len);
  429. smp_wmb();
  430. wmb();
  431. if (bch_mode == NAND_ECC_NONE)
  432. count = 1;
  433. else if (bch_mode == NAND_ECC_BCH_SHORT) {
  434. dma_unit_size = (chip->ecc.size >> 3);
  435. count = len/chip->ecc.size;
  436. }
  437. else
  438. count = len/chip->ecc.size;
  439. #ifdef CONFIG_CLK81_DFS
  440. down(&aml_chip->nand_sem);
  441. #endif
  442. NFC_SEND_CMD_ADL(aml_chip->data_dma_addr);
  443. NFC_SEND_CMD_ADH(aml_chip->data_dma_addr);
  444. NFC_SEND_CMD_AIL(aml_chip->nand_info_dma_addr);
  445. NFC_SEND_CMD_AIH((aml_chip->nand_info_dma_addr));
  446. if(aml_chip->ran_mode){
  447. NFC_SEND_CMD_SEED(aml_chip->page_addr);
  448. }
  449. if(!bch_mode)
  450. NFC_SEND_CMD_M2N_RAW(aml_chip->ran_mode, len);
  451. else
  452. NFC_SEND_CMD_M2N(aml_chip->ran_mode, ((bch_mode == NAND_ECC_BCH_SHORT)?NAND_ECC_BCH60_1K:bch_mode), ((bch_mode == NAND_ECC_BCH_SHORT)?1:0), dma_unit_size, count);
  453. ret = aml_platform_dma_waiting(aml_chip);
  454. #ifdef CONFIG_CLK81_DFS
  455. up(&aml_chip->nand_sem);
  456. #endif
  457. return ret;
  458. }
  459. static int m3_nand_dma_read(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  460. {
  461. volatile unsigned int * info_buf=NULL;
  462. //volatile int cmp=0;
  463. struct nand_chip *chip = &aml_chip->chip;
  464. unsigned dma_unit_size = 0, count = 0, info_times_int_len;
  465. int ret = 0;
  466. info_times_int_len = PER_INFO_BYTE/sizeof(unsigned int);
  467. if (bch_mode == NAND_ECC_NONE)
  468. count = 1;
  469. else if (bch_mode == NAND_ECC_BCH_SHORT) {
  470. dma_unit_size = (chip->ecc.size >> 3);
  471. count = len/chip->ecc.size;
  472. }
  473. else
  474. count = len/chip->ecc.size;
  475. info_buf = (volatile unsigned *)&(aml_chip->user_info_buf[(count-1)*info_times_int_len]);
  476. memset((unsigned char *)aml_chip->user_info_buf, 0, count*PER_INFO_BYTE);
  477. smp_wmb();
  478. wmb();
  479. #ifdef CONFIG_CLK81_DFS
  480. down(&aml_chip->nand_sem);
  481. #endif
  482. NFC_SEND_CMD_ADL(aml_chip->data_dma_addr);
  483. NFC_SEND_CMD_ADH(aml_chip->data_dma_addr);
  484. NFC_SEND_CMD_AIL(aml_chip->nand_info_dma_addr);
  485. NFC_SEND_CMD_AIH((aml_chip->nand_info_dma_addr));
  486. if(aml_chip->ran_mode){
  487. NFC_SEND_CMD_SEED(aml_chip->page_addr);
  488. }
  489. if(bch_mode == NAND_ECC_NONE)
  490. NFC_SEND_CMD_N2M_RAW(aml_chip->ran_mode,len);
  491. else
  492. NFC_SEND_CMD_N2M(aml_chip->ran_mode, ((bch_mode == NAND_ECC_BCH_SHORT)?NAND_ECC_BCH60_1K:bch_mode), ((bch_mode == NAND_ECC_BCH_SHORT)?1:0), dma_unit_size, count);
  493. ret = aml_platform_dma_waiting(aml_chip);
  494. #ifdef CONFIG_CLK81_DFS
  495. up(&aml_chip->nand_sem);
  496. #endif
  497. if (ret)
  498. return ret;
  499. /*do{
  500. info_buf = (volatile unsigned *)&(aml_chip->user_info_buf[(count-1)*info_times_int_len]);
  501. cmp = *info_buf;
  502. }while((cmp)==0);*/
  503. do{
  504. smp_rmb();
  505. }while(NAND_INFO_DONE(aml_read_reg32((unsigned) info_buf)) == 0);
  506. smp_rmb();
  507. if (buf != aml_chip->aml_nand_data_buf)
  508. memcpy(buf, aml_chip->aml_nand_data_buf, len);
  509. smp_wmb();
  510. wmb();
  511. return 0;
  512. }
  513. static int m3_nand_hwecc_correct(struct aml_nand_chip *aml_chip, unsigned char *buf, unsigned size, unsigned char *oob_buf)
  514. {
  515. struct nand_chip *chip = &aml_chip->chip;
  516. struct mtd_info *mtd = &aml_chip->mtd;
  517. unsigned ecc_step_num;
  518. unsigned info_times_int_len = PER_INFO_BYTE/sizeof(unsigned int);
  519. if (size % chip->ecc.size) {
  520. printk ("error parameter size for ecc correct %x\n", size);
  521. return -EINVAL;
  522. }
  523. aml_chip->ecc_cnt_cur = 0;
  524. for (ecc_step_num = 0; ecc_step_num < (size / chip->ecc.size); ecc_step_num++) {
  525. //check if there have uncorrectable sector
  526. if(NAND_ECC_CNT(aml_read_reg32((unsigned )(&aml_chip->user_info_buf[ecc_step_num*info_times_int_len]))) == 0x3f)
  527. {
  528. aml_chip->zero_cnt = NAND_ZERO_CNT(*(unsigned *)(&aml_chip->user_info_buf[ecc_step_num*info_times_int_len]));
  529. //printk ("nand communication have uncorrectable ecc error %d\n", ecc_step_num);
  530. return -EIO;
  531. }
  532. else {
  533. //mtd->ecc_stats.corrected += NAND_ECC_CNT(*(unsigned *)(&aml_chip->user_info_buf[ecc_step_num*info_times_int_len]));
  534. aml_chip->ecc_cnt_cur = NAND_ECC_CNT(*(unsigned *)(&aml_chip->user_info_buf[ecc_step_num*info_times_int_len]));
  535. }
  536. }
  537. return 0;
  538. }
  539. static void m3_nand_boot_erase_cmd(struct mtd_info *mtd, int page)
  540. {
  541. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  542. struct nand_chip *chip = mtd->priv;
  543. loff_t ofs;
  544. int i, page_addr;
  545. if (page >= M3_BOOT_PAGES_PER_COPY)
  546. return;
  547. if (aml_chip->valid_chip[0]) {
  548. for (i=0; i<M3_BOOT_COPY_NUM; i++) {
  549. page_addr = page + i*M3_BOOT_PAGES_PER_COPY;
  550. ofs = (page_addr << chip->page_shift);
  551. if (chip->block_bad(mtd, ofs, 0))
  552. continue;
  553. aml_chip->aml_nand_select_chip(aml_chip, 0);
  554. aml_chip->aml_nand_command(aml_chip, NAND_CMD_ERASE1, -1, page_addr, i);
  555. aml_chip->aml_nand_command(aml_chip, NAND_CMD_ERASE2, -1, -1, i);
  556. chip->waitfunc(mtd, chip);
  557. }
  558. }
  559. return ;
  560. }
  561. static int m3_nand_boot_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page)
  562. {
  563. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  564. uint8_t *oob_buf = chip->oob_poi;
  565. unsigned nand_page_size = chip->ecc.steps * chip->ecc.size;
  566. unsigned pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
  567. int user_byte_num = (chip->ecc.steps * aml_chip->user_byte_mode);
  568. int error = 0, i = 0, stat = 0, bch_mode, read_page, read_page_tmp;
  569. int new_nand_type = 0;
  570. #ifdef NEW_NAND_SUPPORT
  571. new_nand_type = aml_chip->new_nand_info.type;
  572. #endif
  573. if((new_nand_type < 10)&&(new_nand_type))
  574. {
  575. if (page >= (M3_BOOT_PAGES_PER_COPY/2 - 3)) {
  576. memset(buf, 0, (1 << chip->page_shift));
  577. goto exit;
  578. }
  579. }
  580. else{
  581. if (page >= (M3_BOOT_PAGES_PER_COPY - 1)) {
  582. memset(buf, 0, (1 << chip->page_shift));
  583. goto exit;
  584. }
  585. }
  586. read_page = page;
  587. read_page++;
  588. if((page > 3) && ((new_nand_type < 10)&&(new_nand_type))){
  589. read_page_tmp = page + 2;
  590. //printk("%s, write_page_tmp:0x%x\n", __func__, write_page_tmp);
  591. if(((read_page_tmp%4) == 2) ||((read_page_tmp%4) == 3)){
  592. read_page_tmp += (((read_page_tmp/4) -1) *4);
  593. }
  594. else if(((read_page_tmp%4) == 0) ||((read_page_tmp%4) == 1)){
  595. read_page_tmp += (((read_page_tmp/4) -1) *4 -2);
  596. }
  597. read_page = read_page_tmp;
  598. //printk("%s, page:0x%x\n", __func__, page);
  599. }
  600. //printk("%s, read_page:0x%x\n", __func__, read_page);
  601. chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, read_page);
  602. bch_mode = aml_chip->bch_mode;
  603. memset(buf, 0xff, (1 << chip->page_shift));
  604. if (aml_chip->valid_chip[i]) {
  605. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  606. printk ("read couldn`t found selected chip: %d ready\n", i);
  607. error = -EBUSY;
  608. goto exit;
  609. }
  610. if (aml_chip->ops_mode & AML_CHIP_NONE_RB)
  611. chip->cmd_ctrl(mtd, NAND_CMD_READ0 & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  612. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, bch_mode);
  613. if (error)
  614. goto exit;
  615. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buf, user_byte_num);
  616. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, buf, nand_page_size, oob_buf);
  617. if (stat < 0) {
  618. mtd->ecc_stats.failed++;
  619. printk("aml nand read data ecc failed at blk %d page:%d chip %d\n", (page >> pages_per_blk_shift), page, i);
  620. }
  621. else
  622. mtd->ecc_stats.corrected += stat;
  623. }
  624. else {
  625. error = -ENODEV;
  626. goto exit;
  627. }
  628. exit:
  629. return error;
  630. }
  631. static void m3_nand_boot_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
  632. {
  633. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  634. uint8_t *oob_buf = chip->oob_poi;
  635. unsigned nand_page_size = chip->ecc.steps * chip->ecc.size;
  636. int user_byte_num = (chip->ecc.steps * aml_chip->user_byte_mode);
  637. int error = 0, i = 0, bch_mode, ecc_size;
  638. ecc_size = chip->ecc.size;
  639. if (((aml_chip->page_addr % M3_BOOT_PAGES_PER_COPY) == 0) && (aml_chip->bch_mode != NAND_ECC_BCH_SHORT)) {
  640. nand_page_size = (mtd->writesize / 512) * NAND_ECC_UNIT_SHORT;
  641. bch_mode = NAND_ECC_BCH_SHORT;
  642. chip->ecc.size = NAND_ECC_UNIT_SHORT;
  643. }
  644. else
  645. bch_mode = aml_chip->bch_mode;
  646. for (i=0; i<mtd->oobavail; i+=2) {
  647. oob_buf[i] = 0x55;
  648. oob_buf[i+1] = 0xaa;
  649. }
  650. i = 0;
  651. if (aml_chip->valid_chip[i]) {
  652. aml_chip->aml_nand_select_chip(aml_chip, i);
  653. aml_chip->aml_nand_set_user_byte(aml_chip, oob_buf, user_byte_num);
  654. error = aml_chip->aml_nand_dma_write(aml_chip, (unsigned char *)buf, nand_page_size, bch_mode);
  655. if (error)
  656. goto exit;
  657. aml_chip->aml_nand_command(aml_chip, NAND_CMD_PAGEPROG, -1, -1, i);
  658. }
  659. else {
  660. error = -ENODEV;
  661. goto exit;
  662. }
  663. exit:
  664. if (((aml_chip->page_addr % M3_BOOT_PAGES_PER_COPY) == 0) && (aml_chip->bch_mode != NAND_ECC_BCH_SHORT))
  665. chip->ecc.size = ecc_size;
  666. return;
  667. }
  668. static int m3_nand_boot_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int page, int cached, int raw)
  669. {
  670. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  671. int status, i, write_page, configure_data, pages_per_blk, write_page_tmp, ran_mode;
  672. int new_nand_type = 0;
  673. int en_slc = 0;
  674. #ifdef NEW_NAND_SUPPORT
  675. new_nand_type = aml_chip->new_nand_info.type;
  676. en_slc = ((aml_chip->new_nand_info.type < 10)&&(aml_chip->new_nand_info.type))? 1:0;
  677. #endif
  678. if(en_slc){
  679. if (page >= (M3_BOOT_PAGES_PER_COPY/2 - 3))
  680. return 0;
  681. #ifdef NEW_NAND_SUPPORT
  682. if(page > 3)
  683. aml_chip->new_nand_info.slc_program_info.enter_enslc_mode(mtd);
  684. #endif
  685. }
  686. else{
  687. if (page >= (M3_BOOT_PAGES_PER_COPY - 1))
  688. return 0;
  689. }
  690. pages_per_blk = (1 << (chip->phys_erase_shift - chip->page_shift));
  691. for (i=0; i<M3_BOOT_COPY_NUM; i++) {
  692. write_page = page + i*M3_BOOT_PAGES_PER_COPY;
  693. if ((write_page % M3_BOOT_PAGES_PER_COPY) == 0) {
  694. if (aml_chip->bch_mode == NAND_ECC_BCH_SHORT)
  695. configure_data = NFC_CMD_N2M(aml_chip->ran_mode, NAND_ECC_BCH60_1K, 1, (chip->ecc.size >> 3), chip->ecc.steps);
  696. else
  697. configure_data = NFC_CMD_N2M(aml_chip->ran_mode, aml_chip->bch_mode, 0, (chip->ecc.size >> 3), chip->ecc.steps);
  698. memset(chip->buffers->databuf, 0xbb, mtd->writesize);
  699. memcpy(chip->buffers->databuf, (unsigned char *)(&configure_data), sizeof(int));
  700. memcpy(chip->buffers->databuf + sizeof(int), (unsigned char *)(&pages_per_blk), sizeof(int));
  701. //add for new nand
  702. memcpy(chip->buffers->databuf + sizeof(int) + sizeof(int), (unsigned char *)(&new_nand_type), sizeof(int));
  703. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, write_page);
  704. ran_mode = aml_chip->ran_mode;
  705. aml_chip->ran_mode = 0;
  706. chip->ecc.write_page(mtd, chip, chip->buffers->databuf);
  707. aml_chip->ran_mode = ran_mode;
  708. status = chip->waitfunc(mtd, chip);
  709. if ((status & NAND_STATUS_FAIL) && (chip->errstat))
  710. status = chip->errstat(mtd, chip, FL_WRITING, status, write_page);
  711. if (status & NAND_STATUS_FAIL)
  712. return -EIO;
  713. }
  714. write_page++;
  715. if((page > 3) && en_slc){
  716. write_page_tmp = page + 2;
  717. //printk("%s, write_page_tmp:0x%x\n", __func__, write_page_tmp);
  718. if(((write_page_tmp%4) == 2) ||((write_page_tmp%4) == 3)){
  719. write_page_tmp += (((write_page_tmp/4) -1) *4);
  720. }
  721. else if(((write_page_tmp%4) == 0) ||((write_page_tmp%4) == 1)){
  722. write_page_tmp += (((write_page_tmp/4) -1) *4 -2);
  723. }
  724. write_page = write_page_tmp + i*M3_BOOT_PAGES_PER_COPY;
  725. //printk("%s, write_page:0x%x\n", __func__, write_page);
  726. }
  727. //printk("%s, write_page:0x%x\n", __func__, write_page);
  728. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, write_page);
  729. if (unlikely(raw))
  730. chip->ecc.write_page_raw(mtd, chip, buf);
  731. else
  732. chip->ecc.write_page(mtd, chip, buf);
  733. if (!cached || !(chip->options & NAND_CACHEPRG)) {
  734. status = chip->waitfunc(mtd, chip);
  735. if ((status & NAND_STATUS_FAIL) && (chip->errstat))
  736. status = chip->errstat(mtd, chip, FL_WRITING, status, write_page);
  737. if (status & NAND_STATUS_FAIL){
  738. #ifdef NEW_NAND_SUPPORT
  739. if(en_slc && (page > 3))
  740. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode(mtd);
  741. #endif
  742. return -EIO;
  743. }
  744. } else {
  745. status = chip->waitfunc(mtd, chip);
  746. }
  747. }
  748. #ifdef NEW_NAND_SUPPORT
  749. if(en_slc && (page > 3))
  750. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode(mtd);
  751. #endif
  752. return 0;
  753. }
  754. #ifdef CONFIG_CLK81_DFS
  755. static int nand_pre_change_fun(struct clk81_client* client)
  756. {
  757. struct aml_nand_chip *aml_chip = client->param;
  758. return 0;
  759. }
  760. static int nand_check_client_ready(struct clk81_client* client)
  761. {
  762. struct aml_nand_chip *aml_chip = client->param;
  763. if(aml_chip->lock_state){
  764. printk(KERN_DEBUG "nand already lock");
  765. return 0;
  766. }
  767. else{
  768. if(down_trylock(&aml_chip->nand_sem)){
  769. printk(KERN_DEBUG "nand lock not ready");
  770. return -1;
  771. }
  772. aml_chip->lock_state = 1;
  773. printk(KERN_DEBUG "nand lock ready");
  774. return 0;
  775. }
  776. }
  777. static int nand_post_change_fun(struct clk81_client* client)
  778. {
  779. struct aml_nand_chip *aml_chip = client->param;
  780. aml_chip->aml_nand_adjust_timing(aml_chip);
  781. up(&aml_chip->nand_sem);
  782. aml_chip->lock_state = 0;
  783. printk(KERN_DEBUG "nand unlock");
  784. return 0;
  785. }
  786. #endif
  787. struct nand_hw_control controller;
  788. static int aml_nand_probe(struct aml_nand_platform *plat, struct device *dev)
  789. {
  790. struct aml_nand_chip *aml_chip = NULL;
  791. struct nand_chip *chip = NULL;
  792. struct mtd_info *mtd = NULL;
  793. int err = 0, i;
  794. aml_chip = kzalloc(sizeof(*aml_chip), GFP_KERNEL);
  795. if (aml_chip == NULL) {
  796. printk("no memory for flash info\n");
  797. err = -ENOMEM;
  798. goto exit_error;
  799. }
  800. #ifdef CONFIG_CLK81_DFS
  801. init_MUTEX(&aml_chip->nand_sem);
  802. aml_chip->lock_state = 0;
  803. regist_clk81_client(plat->name, nand_pre_change_fun, nand_post_change_fun, nand_check_client_ready, aml_chip);
  804. #endif
  805. /* initialize mtd info data struct */
  806. dev->coherent_dma_mask = DMA_BIT_MASK(32);
  807. aml_chip->device = dev;
  808. aml_chip->platform = plat;
  809. aml_chip->bch_desc = m3_bch_list;
  810. aml_chip->max_bch_mode = sizeof(m3_bch_list) / sizeof(m3_bch_list[0]);
  811. plat->aml_chip = aml_chip;
  812. chip = &aml_chip->chip;
  813. chip->priv = aml_chip;//&aml_chip->mtd;
  814. aml_chip->ran_mode = plat->ran_mode;
  815. aml_chip->rbpin_detect = plat->rbpin_detect;
  816. chip->controller=&controller;
  817. printk("chip->controller=%p\n",chip->controller);
  818. mtd = &aml_chip->mtd;
  819. mtd->priv = chip;
  820. mtd->dev.parent= dev->parent;
  821. mtd->owner = THIS_MODULE;
  822. aml_chip->aml_nand_hw_init = m3_nand_hw_init;
  823. aml_chip->aml_nand_adjust_timing = m3_nand_adjust_timing;
  824. aml_chip->aml_nand_select_chip = m3_nand_select_chip;
  825. aml_chip->aml_nand_options_confirm = m3_nand_options_confirm;
  826. aml_chip->aml_nand_dma_read = m3_nand_dma_read;
  827. aml_chip->aml_nand_dma_write = m3_nand_dma_write;
  828. aml_chip->aml_nand_hwecc_correct = m3_nand_hwecc_correct;
  829. // aml_chip->nand_early_suspend.suspend = m3_nand_early_suspend;
  830. // aml_chip->nand_early_suspend.resume = m3_nand_late_resume;
  831. err = aml_nand_init(aml_chip);
  832. if (err)
  833. goto exit_error;
  834. if (!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME))) {
  835. chip->erase_cmd = m3_nand_boot_erase_cmd;
  836. chip->ecc.read_page = m3_nand_boot_read_page_hwecc;
  837. chip->ecc.write_page = m3_nand_boot_write_page_hwecc;
  838. chip->write_page = m3_nand_boot_write_page;
  839. if (chip->ecc.layout)
  840. chip->ecc.layout->oobfree[0].length = ((mtd->writesize / 512) * aml_chip->user_byte_mode);
  841. chip->ecc.layout->oobavail = 0;
  842. for (i = 0; chip->ecc.layout->oobfree[i].length && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
  843. chip->ecc.layout->oobavail += chip->ecc.layout->oobfree[i].length;
  844. mtd->oobavail = chip->ecc.layout->oobavail;
  845. mtd->ecclayout = chip->ecc.layout;
  846. }
  847. mtd->suspend = m3_nand_suspend;
  848. mtd->resume = m3_nand_resume;
  849. return 0;
  850. exit_error:
  851. if (aml_chip)
  852. kfree(aml_chip);
  853. mtd->name = NULL;
  854. return err;
  855. }
  856. #define m3_nand_notifier_to_blk(l) container_of(l, struct aml_nand_device, nb)
  857. static int m3_nand_reboot_notifier(struct notifier_block *nb, unsigned long priority, void * arg)
  858. {
  859. int error = 0;
  860. struct aml_nand_device *aml_nand_dev = m3_nand_notifier_to_blk(nb);
  861. struct aml_nand_platform *plat = NULL;
  862. struct aml_nand_chip *aml_chip = NULL;
  863. struct mtd_info *mtd = NULL;
  864. int i;
  865. for (i=1; i<aml_nand_dev->dev_num; i++) {
  866. plat = &aml_nand_dev->aml_nand_platform[i];
  867. aml_chip = plat->aml_chip;
  868. if (aml_chip) {
  869. mtd = &aml_chip->mtd;
  870. #ifdef NEW_NAND_SUPPORT
  871. if (mtd) {
  872. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10)){
  873. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode(mtd);
  874. aml_chip->new_nand_info.read_rety_info.set_default_value(mtd);
  875. }
  876. }
  877. #endif
  878. }
  879. }
  880. return error;
  881. }
  882. static int m3_nand_probe(struct platform_device *pdev)
  883. {
  884. struct aml_nand_device *aml_nand_dev = to_nand_dev(pdev);
  885. struct aml_nand_platform *plat = NULL;
  886. int err = 0, i;
  887. dev_dbg(&pdev->dev, "(%p)\n", pdev);
  888. if (!aml_nand_dev) {
  889. dev_err(&pdev->dev, "no platform specific information\n");
  890. err = -ENOMEM;
  891. goto exit_error;
  892. }
  893. platform_set_drvdata(pdev, aml_nand_dev);
  894. printk("%d\n",aml_nand_dev->dev_num);
  895. spin_lock_init(&controller.lock);
  896. init_waitqueue_head(&controller.wq);
  897. aml_nand_dev->nb.notifier_call = m3_nand_reboot_notifier;
  898. register_reboot_notifier(&aml_nand_dev->nb);
  899. atomic_notifier_chain_register(&panic_notifier_list, &aml_nand_dev->nb);
  900. for (i=0; i<aml_nand_dev->dev_num; i++) {
  901. plat = &aml_nand_dev->aml_nand_platform[i];
  902. if (!plat) {
  903. printk("error for not platform data\n");
  904. continue;
  905. }
  906. err = aml_nand_probe(plat, &pdev->dev);
  907. if (err) {
  908. printk("%s dev probe failed %d\n", plat->name, err);
  909. continue;
  910. }
  911. }
  912. exit_error:
  913. return err;
  914. }
  915. static int m3_nand_remove(struct platform_device *pdev)
  916. {
  917. struct aml_nand_device *aml_nand_dev = to_nand_dev(pdev);
  918. struct aml_nand_platform *plat = NULL;
  919. struct aml_nand_chip *aml_chip = NULL;
  920. struct mtd_info *mtd = NULL;
  921. int i;
  922. platform_set_drvdata(pdev, NULL);
  923. for (i=0; i<aml_nand_dev->dev_num; i++) {
  924. plat = &aml_nand_dev->aml_nand_platform[i];
  925. aml_chip = plat->aml_chip;
  926. if (aml_chip) {
  927. mtd = &aml_chip->mtd;
  928. if (mtd) {
  929. #ifdef NEW_NAND_SUPPORT
  930. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10) && (i == 1)){
  931. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode(mtd);
  932. aml_chip->new_nand_info.read_rety_info.set_default_value(mtd);
  933. }
  934. #endif
  935. nand_release(mtd);
  936. kfree(mtd);
  937. }
  938. kfree(aml_chip);
  939. }
  940. }
  941. return 0;
  942. }
  943. static void m3_nand_shutdown(struct platform_device *pdev)
  944. {
  945. struct aml_nand_device *aml_nand_dev = to_nand_dev(pdev);
  946. struct aml_nand_platform *plat = NULL;
  947. struct aml_nand_chip *aml_chip = NULL;
  948. struct mtd_info *mtd = NULL;
  949. int i;
  950. for (i=1; i<aml_nand_dev->dev_num; i++) {
  951. plat = &aml_nand_dev->aml_nand_platform[i];
  952. aml_chip = plat->aml_chip;
  953. if (aml_chip) {
  954. mtd = &aml_chip->mtd;
  955. #ifdef NEW_NAND_SUPPORT
  956. if (mtd) {
  957. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10)){
  958. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode(mtd);
  959. aml_chip->new_nand_info.read_rety_info.set_default_value(mtd);
  960. }
  961. }
  962. #endif
  963. }
  964. }
  965. return;
  966. }
  967. #define DRV_NAME "aml_nand"
  968. #define DRV_VERSION "1.1"
  969. #define DRV_AUTHOR "xiaojun_yoyo"
  970. #define DRV_DESC "Amlogic nand flash AVOS driver for M3"
  971. /* driver device registration */
  972. static struct platform_driver m3_nand_driver = {
  973. .probe = m3_nand_probe,
  974. .remove = m3_nand_remove,
  975. .shutdown = m3_nand_shutdown,
  976. .driver = {
  977. .name = DRV_NAME,
  978. .owner = THIS_MODULE,
  979. },
  980. };
  981. static int __init m3_nand_init(void)
  982. {
  983. printk(KERN_INFO "%s, Version %s (c) 2010 Amlogic Inc.\n", DRV_DESC, DRV_VERSION);
  984. return platform_driver_register(&m3_nand_driver);
  985. }
  986. static void __exit m3_nand_exit(void)
  987. {
  988. platform_driver_unregister(&m3_nand_driver);
  989. }
  990. module_init(m3_nand_init);
  991. module_exit(m3_nand_exit);
  992. MODULE_LICENSE("GPL");
  993. MODULE_AUTHOR(DRV_AUTHOR);
  994. MODULE_DESCRIPTION(DRV_DESC);
  995. MODULE_ALIAS("platform:" DRV_NAME);