omap2.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. /*
  2. * linux/drivers/mtd/onenand/omap2.c
  3. *
  4. * OneNAND driver for OMAP2 / OMAP3
  5. *
  6. * Copyright © 2005-2006 Nokia Corporation
  7. *
  8. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9. * IRQ and DMA support written by Timo Teras
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published by
  13. * the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; see the file COPYING. If not, write to the Free Software
  22. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. */
  25. #include <linux/device.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/onenand.h>
  30. #include <linux/mtd/partitions.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/delay.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/regulator/consumer.h>
  38. #include <asm/mach/flash.h>
  39. #include <plat/gpmc.h>
  40. #include <plat/onenand.h>
  41. #include <mach/gpio.h>
  42. #include <plat/dma.h>
  43. #include <plat/board.h>
  44. #define DRIVER_NAME "omap2-onenand"
  45. #define ONENAND_IO_SIZE SZ_128K
  46. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  47. struct omap2_onenand {
  48. struct platform_device *pdev;
  49. int gpmc_cs;
  50. unsigned long phys_base;
  51. int gpio_irq;
  52. struct mtd_info mtd;
  53. struct mtd_partition *parts;
  54. struct onenand_chip onenand;
  55. struct completion irq_done;
  56. struct completion dma_done;
  57. int dma_channel;
  58. int freq;
  59. int (*setup)(void __iomem *base, int *freq_ptr);
  60. struct regulator *regulator;
  61. };
  62. static const char *part_probes[] = { "cmdlinepart", NULL, };
  63. static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
  64. {
  65. struct omap2_onenand *c = data;
  66. complete(&c->dma_done);
  67. }
  68. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  69. {
  70. struct omap2_onenand *c = dev_id;
  71. complete(&c->irq_done);
  72. return IRQ_HANDLED;
  73. }
  74. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  75. {
  76. return readw(c->onenand.base + reg);
  77. }
  78. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  79. int reg)
  80. {
  81. writew(value, c->onenand.base + reg);
  82. }
  83. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  84. {
  85. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  86. msg, state, ctrl, intr);
  87. }
  88. static void wait_warn(char *msg, int state, unsigned int ctrl,
  89. unsigned int intr)
  90. {
  91. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  92. "intr 0x%04x\n", msg, state, ctrl, intr);
  93. }
  94. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  95. {
  96. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  97. struct onenand_chip *this = mtd->priv;
  98. unsigned int intr = 0;
  99. unsigned int ctrl, ctrl_mask;
  100. unsigned long timeout;
  101. u32 syscfg;
  102. if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
  103. state == FL_VERIFYING_ERASE) {
  104. int i = 21;
  105. unsigned int intr_flags = ONENAND_INT_MASTER;
  106. switch (state) {
  107. case FL_RESETING:
  108. intr_flags |= ONENAND_INT_RESET;
  109. break;
  110. case FL_PREPARING_ERASE:
  111. intr_flags |= ONENAND_INT_ERASE;
  112. break;
  113. case FL_VERIFYING_ERASE:
  114. i = 101;
  115. break;
  116. }
  117. while (--i) {
  118. udelay(1);
  119. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  120. if (intr & ONENAND_INT_MASTER)
  121. break;
  122. }
  123. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  124. if (ctrl & ONENAND_CTRL_ERROR) {
  125. wait_err("controller error", state, ctrl, intr);
  126. return -EIO;
  127. }
  128. if ((intr & intr_flags) == intr_flags)
  129. return 0;
  130. /* Continue in wait for interrupt branch */
  131. }
  132. if (state != FL_READING) {
  133. int result;
  134. /* Turn interrupts on */
  135. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  136. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  137. syscfg |= ONENAND_SYS_CFG1_IOBE;
  138. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  139. if (cpu_is_omap34xx())
  140. /* Add a delay to let GPIO settle */
  141. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  142. }
  143. INIT_COMPLETION(c->irq_done);
  144. if (c->gpio_irq) {
  145. result = gpio_get_value(c->gpio_irq);
  146. if (result == -1) {
  147. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  148. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  149. wait_err("gpio error", state, ctrl, intr);
  150. return -EIO;
  151. }
  152. } else
  153. result = 0;
  154. if (result == 0) {
  155. int retry_cnt = 0;
  156. retry:
  157. result = wait_for_completion_timeout(&c->irq_done,
  158. msecs_to_jiffies(20));
  159. if (result == 0) {
  160. /* Timeout after 20ms */
  161. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  162. if (ctrl & ONENAND_CTRL_ONGO &&
  163. !this->ongoing) {
  164. /*
  165. * The operation seems to be still going
  166. * so give it some more time.
  167. */
  168. retry_cnt += 1;
  169. if (retry_cnt < 3)
  170. goto retry;
  171. intr = read_reg(c,
  172. ONENAND_REG_INTERRUPT);
  173. wait_err("timeout", state, ctrl, intr);
  174. return -EIO;
  175. }
  176. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  177. if ((intr & ONENAND_INT_MASTER) == 0)
  178. wait_warn("timeout", state, ctrl, intr);
  179. }
  180. }
  181. } else {
  182. int retry_cnt = 0;
  183. /* Turn interrupts off */
  184. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  185. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  186. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  187. timeout = jiffies + msecs_to_jiffies(20);
  188. while (1) {
  189. if (time_before(jiffies, timeout)) {
  190. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  191. if (intr & ONENAND_INT_MASTER)
  192. break;
  193. } else {
  194. /* Timeout after 20ms */
  195. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  196. if (ctrl & ONENAND_CTRL_ONGO) {
  197. /*
  198. * The operation seems to be still going
  199. * so give it some more time.
  200. */
  201. retry_cnt += 1;
  202. if (retry_cnt < 3) {
  203. timeout = jiffies +
  204. msecs_to_jiffies(20);
  205. continue;
  206. }
  207. }
  208. break;
  209. }
  210. }
  211. }
  212. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  213. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  214. if (intr & ONENAND_INT_READ) {
  215. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  216. if (ecc) {
  217. unsigned int addr1, addr8;
  218. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  219. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  220. if (ecc & ONENAND_ECC_2BIT_ALL) {
  221. printk(KERN_ERR "onenand_wait: ECC error = "
  222. "0x%04x, addr1 %#x, addr8 %#x\n",
  223. ecc, addr1, addr8);
  224. mtd->ecc_stats.failed++;
  225. return -EBADMSG;
  226. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  227. printk(KERN_NOTICE "onenand_wait: correctable "
  228. "ECC error = 0x%04x, addr1 %#x, "
  229. "addr8 %#x\n", ecc, addr1, addr8);
  230. mtd->ecc_stats.corrected++;
  231. }
  232. }
  233. } else if (state == FL_READING) {
  234. wait_err("timeout", state, ctrl, intr);
  235. return -EIO;
  236. }
  237. if (ctrl & ONENAND_CTRL_ERROR) {
  238. wait_err("controller error", state, ctrl, intr);
  239. if (ctrl & ONENAND_CTRL_LOCK)
  240. printk(KERN_ERR "onenand_wait: "
  241. "Device is write protected!!!\n");
  242. return -EIO;
  243. }
  244. ctrl_mask = 0xFE9F;
  245. if (this->ongoing)
  246. ctrl_mask &= ~0x8000;
  247. if (ctrl & ctrl_mask)
  248. wait_warn("unexpected controller status", state, ctrl, intr);
  249. return 0;
  250. }
  251. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  252. {
  253. struct onenand_chip *this = mtd->priv;
  254. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  255. if (area == ONENAND_DATARAM)
  256. return this->writesize;
  257. if (area == ONENAND_SPARERAM)
  258. return mtd->oobsize;
  259. }
  260. return 0;
  261. }
  262. #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
  263. static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  264. unsigned char *buffer, int offset,
  265. size_t count)
  266. {
  267. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  268. struct onenand_chip *this = mtd->priv;
  269. dma_addr_t dma_src, dma_dst;
  270. int bram_offset;
  271. unsigned long timeout;
  272. void *buf = (void *)buffer;
  273. size_t xtra;
  274. volatile unsigned *done;
  275. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  276. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  277. goto out_copy;
  278. /* panic_write() may be in an interrupt context */
  279. if (in_interrupt() || oops_in_progress)
  280. goto out_copy;
  281. if (buf >= high_memory) {
  282. struct page *p1;
  283. if (((size_t)buf & PAGE_MASK) !=
  284. ((size_t)(buf + count - 1) & PAGE_MASK))
  285. goto out_copy;
  286. p1 = vmalloc_to_page(buf);
  287. if (!p1)
  288. goto out_copy;
  289. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  290. }
  291. xtra = count & 3;
  292. if (xtra) {
  293. count -= xtra;
  294. memcpy(buf + count, this->base + bram_offset + count, xtra);
  295. }
  296. dma_src = c->phys_base + bram_offset;
  297. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  298. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  299. dev_err(&c->pdev->dev,
  300. "Couldn't DMA map a %d byte buffer\n",
  301. count);
  302. goto out_copy;
  303. }
  304. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  305. count >> 2, 1, 0, 0, 0);
  306. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  307. dma_src, 0, 0);
  308. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  309. dma_dst, 0, 0);
  310. INIT_COMPLETION(c->dma_done);
  311. omap_start_dma(c->dma_channel);
  312. timeout = jiffies + msecs_to_jiffies(20);
  313. done = &c->dma_done.done;
  314. while (time_before(jiffies, timeout))
  315. if (*done)
  316. break;
  317. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  318. if (!*done) {
  319. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  320. goto out_copy;
  321. }
  322. return 0;
  323. out_copy:
  324. memcpy(buf, this->base + bram_offset, count);
  325. return 0;
  326. }
  327. static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  328. const unsigned char *buffer,
  329. int offset, size_t count)
  330. {
  331. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  332. struct onenand_chip *this = mtd->priv;
  333. dma_addr_t dma_src, dma_dst;
  334. int bram_offset;
  335. unsigned long timeout;
  336. void *buf = (void *)buffer;
  337. volatile unsigned *done;
  338. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  339. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  340. goto out_copy;
  341. /* panic_write() may be in an interrupt context */
  342. if (in_interrupt() || oops_in_progress)
  343. goto out_copy;
  344. if (buf >= high_memory) {
  345. struct page *p1;
  346. if (((size_t)buf & PAGE_MASK) !=
  347. ((size_t)(buf + count - 1) & PAGE_MASK))
  348. goto out_copy;
  349. p1 = vmalloc_to_page(buf);
  350. if (!p1)
  351. goto out_copy;
  352. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  353. }
  354. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  355. dma_dst = c->phys_base + bram_offset;
  356. if (dma_mapping_error(&c->pdev->dev, dma_src)) {
  357. dev_err(&c->pdev->dev,
  358. "Couldn't DMA map a %d byte buffer\n",
  359. count);
  360. return -1;
  361. }
  362. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  363. count >> 2, 1, 0, 0, 0);
  364. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  365. dma_src, 0, 0);
  366. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  367. dma_dst, 0, 0);
  368. INIT_COMPLETION(c->dma_done);
  369. omap_start_dma(c->dma_channel);
  370. timeout = jiffies + msecs_to_jiffies(20);
  371. done = &c->dma_done.done;
  372. while (time_before(jiffies, timeout))
  373. if (*done)
  374. break;
  375. dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
  376. if (!*done) {
  377. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  378. goto out_copy;
  379. }
  380. return 0;
  381. out_copy:
  382. memcpy(this->base + bram_offset, buf, count);
  383. return 0;
  384. }
  385. #else
  386. int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  387. unsigned char *buffer, int offset,
  388. size_t count);
  389. int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  390. const unsigned char *buffer,
  391. int offset, size_t count);
  392. #endif
  393. #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
  394. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  395. unsigned char *buffer, int offset,
  396. size_t count)
  397. {
  398. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  399. struct onenand_chip *this = mtd->priv;
  400. dma_addr_t dma_src, dma_dst;
  401. int bram_offset;
  402. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  403. /* DMA is not used. Revisit PM requirements before enabling it. */
  404. if (1 || (c->dma_channel < 0) ||
  405. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  406. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  407. memcpy(buffer, (__force void *)(this->base + bram_offset),
  408. count);
  409. return 0;
  410. }
  411. dma_src = c->phys_base + bram_offset;
  412. dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
  413. DMA_FROM_DEVICE);
  414. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  415. dev_err(&c->pdev->dev,
  416. "Couldn't DMA map a %d byte buffer\n",
  417. count);
  418. return -1;
  419. }
  420. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  421. count / 4, 1, 0, 0, 0);
  422. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  423. dma_src, 0, 0);
  424. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  425. dma_dst, 0, 0);
  426. INIT_COMPLETION(c->dma_done);
  427. omap_start_dma(c->dma_channel);
  428. wait_for_completion(&c->dma_done);
  429. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  430. return 0;
  431. }
  432. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  433. const unsigned char *buffer,
  434. int offset, size_t count)
  435. {
  436. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  437. struct onenand_chip *this = mtd->priv;
  438. dma_addr_t dma_src, dma_dst;
  439. int bram_offset;
  440. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  441. /* DMA is not used. Revisit PM requirements before enabling it. */
  442. if (1 || (c->dma_channel < 0) ||
  443. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  444. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  445. memcpy((__force void *)(this->base + bram_offset), buffer,
  446. count);
  447. return 0;
  448. }
  449. dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
  450. DMA_TO_DEVICE);
  451. dma_dst = c->phys_base + bram_offset;
  452. if (dma_mapping_error(&c->pdev->dev, dma_src)) {
  453. dev_err(&c->pdev->dev,
  454. "Couldn't DMA map a %d byte buffer\n",
  455. count);
  456. return -1;
  457. }
  458. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
  459. count / 2, 1, 0, 0, 0);
  460. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  461. dma_src, 0, 0);
  462. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  463. dma_dst, 0, 0);
  464. INIT_COMPLETION(c->dma_done);
  465. omap_start_dma(c->dma_channel);
  466. wait_for_completion(&c->dma_done);
  467. dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
  468. return 0;
  469. }
  470. #else
  471. int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  472. unsigned char *buffer, int offset,
  473. size_t count);
  474. int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  475. const unsigned char *buffer,
  476. int offset, size_t count);
  477. #endif
  478. static struct platform_driver omap2_onenand_driver;
  479. static int __adjust_timing(struct device *dev, void *data)
  480. {
  481. int ret = 0;
  482. struct omap2_onenand *c;
  483. c = dev_get_drvdata(dev);
  484. BUG_ON(c->setup == NULL);
  485. /* DMA is not in use so this is all that is needed */
  486. /* Revisit for OMAP3! */
  487. ret = c->setup(c->onenand.base, &c->freq);
  488. return ret;
  489. }
  490. int omap2_onenand_rephase(void)
  491. {
  492. return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
  493. NULL, __adjust_timing);
  494. }
  495. static void omap2_onenand_shutdown(struct platform_device *pdev)
  496. {
  497. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  498. /* With certain content in the buffer RAM, the OMAP boot ROM code
  499. * can recognize the flash chip incorrectly. Zero it out before
  500. * soft reset.
  501. */
  502. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  503. }
  504. static int omap2_onenand_enable(struct mtd_info *mtd)
  505. {
  506. int ret;
  507. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  508. ret = regulator_enable(c->regulator);
  509. if (ret != 0)
  510. dev_err(&c->pdev->dev, "can't enable regulator\n");
  511. return ret;
  512. }
  513. static int omap2_onenand_disable(struct mtd_info *mtd)
  514. {
  515. int ret;
  516. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  517. ret = regulator_disable(c->regulator);
  518. if (ret != 0)
  519. dev_err(&c->pdev->dev, "can't disable regulator\n");
  520. return ret;
  521. }
  522. static int __devinit omap2_onenand_probe(struct platform_device *pdev)
  523. {
  524. struct omap_onenand_platform_data *pdata;
  525. struct omap2_onenand *c;
  526. struct onenand_chip *this;
  527. int r;
  528. pdata = pdev->dev.platform_data;
  529. if (pdata == NULL) {
  530. dev_err(&pdev->dev, "platform data missing\n");
  531. return -ENODEV;
  532. }
  533. c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
  534. if (!c)
  535. return -ENOMEM;
  536. init_completion(&c->irq_done);
  537. init_completion(&c->dma_done);
  538. c->gpmc_cs = pdata->cs;
  539. c->gpio_irq = pdata->gpio_irq;
  540. c->dma_channel = pdata->dma_channel;
  541. if (c->dma_channel < 0) {
  542. /* if -1, don't use DMA */
  543. c->gpio_irq = 0;
  544. }
  545. r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
  546. if (r < 0) {
  547. dev_err(&pdev->dev, "Cannot request GPMC CS\n");
  548. goto err_kfree;
  549. }
  550. if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
  551. pdev->dev.driver->name) == NULL) {
  552. dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
  553. "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
  554. r = -EBUSY;
  555. goto err_free_cs;
  556. }
  557. c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
  558. if (c->onenand.base == NULL) {
  559. r = -ENOMEM;
  560. goto err_release_mem_region;
  561. }
  562. if (pdata->onenand_setup != NULL) {
  563. r = pdata->onenand_setup(c->onenand.base, &c->freq);
  564. if (r < 0) {
  565. dev_err(&pdev->dev, "Onenand platform setup failed: "
  566. "%d\n", r);
  567. goto err_iounmap;
  568. }
  569. c->setup = pdata->onenand_setup;
  570. }
  571. if (c->gpio_irq) {
  572. if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
  573. dev_err(&pdev->dev, "Failed to request GPIO%d for "
  574. "OneNAND\n", c->gpio_irq);
  575. goto err_iounmap;
  576. }
  577. gpio_direction_input(c->gpio_irq);
  578. if ((r = request_irq(gpio_to_irq(c->gpio_irq),
  579. omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
  580. pdev->dev.driver->name, c)) < 0)
  581. goto err_release_gpio;
  582. }
  583. if (c->dma_channel >= 0) {
  584. r = omap_request_dma(0, pdev->dev.driver->name,
  585. omap2_onenand_dma_cb, (void *) c,
  586. &c->dma_channel);
  587. if (r == 0) {
  588. omap_set_dma_write_mode(c->dma_channel,
  589. OMAP_DMA_WRITE_NON_POSTED);
  590. omap_set_dma_src_data_pack(c->dma_channel, 1);
  591. omap_set_dma_src_burst_mode(c->dma_channel,
  592. OMAP_DMA_DATA_BURST_8);
  593. omap_set_dma_dest_data_pack(c->dma_channel, 1);
  594. omap_set_dma_dest_burst_mode(c->dma_channel,
  595. OMAP_DMA_DATA_BURST_8);
  596. } else {
  597. dev_info(&pdev->dev,
  598. "failed to allocate DMA for OneNAND, "
  599. "using PIO instead\n");
  600. c->dma_channel = -1;
  601. }
  602. }
  603. dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
  604. "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
  605. c->onenand.base, c->freq);
  606. c->pdev = pdev;
  607. c->mtd.name = dev_name(&pdev->dev);
  608. c->mtd.priv = &c->onenand;
  609. c->mtd.owner = THIS_MODULE;
  610. c->mtd.dev.parent = &pdev->dev;
  611. this = &c->onenand;
  612. if (c->dma_channel >= 0) {
  613. this->wait = omap2_onenand_wait;
  614. if (cpu_is_omap34xx()) {
  615. this->read_bufferram = omap3_onenand_read_bufferram;
  616. this->write_bufferram = omap3_onenand_write_bufferram;
  617. } else {
  618. this->read_bufferram = omap2_onenand_read_bufferram;
  619. this->write_bufferram = omap2_onenand_write_bufferram;
  620. }
  621. }
  622. if (pdata->regulator_can_sleep) {
  623. c->regulator = regulator_get(&pdev->dev, "vonenand");
  624. if (IS_ERR(c->regulator)) {
  625. dev_err(&pdev->dev, "Failed to get regulator\n");
  626. goto err_release_dma;
  627. }
  628. c->onenand.enable = omap2_onenand_enable;
  629. c->onenand.disable = omap2_onenand_disable;
  630. }
  631. if (pdata->skip_initial_unlocking)
  632. this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
  633. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  634. goto err_release_regulator;
  635. r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
  636. if (r > 0)
  637. r = mtd_device_register(&c->mtd, c->parts, r);
  638. else if (pdata->parts != NULL)
  639. r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
  640. else
  641. r = mtd_device_register(&c->mtd, NULL, 0);
  642. if (r)
  643. goto err_release_onenand;
  644. platform_set_drvdata(pdev, c);
  645. return 0;
  646. err_release_onenand:
  647. onenand_release(&c->mtd);
  648. err_release_regulator:
  649. regulator_put(c->regulator);
  650. err_release_dma:
  651. if (c->dma_channel != -1)
  652. omap_free_dma(c->dma_channel);
  653. if (c->gpio_irq)
  654. free_irq(gpio_to_irq(c->gpio_irq), c);
  655. err_release_gpio:
  656. if (c->gpio_irq)
  657. gpio_free(c->gpio_irq);
  658. err_iounmap:
  659. iounmap(c->onenand.base);
  660. err_release_mem_region:
  661. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  662. err_free_cs:
  663. gpmc_cs_free(c->gpmc_cs);
  664. err_kfree:
  665. kfree(c->parts);
  666. kfree(c);
  667. return r;
  668. }
  669. static int __devexit omap2_onenand_remove(struct platform_device *pdev)
  670. {
  671. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  672. onenand_release(&c->mtd);
  673. regulator_put(c->regulator);
  674. if (c->dma_channel != -1)
  675. omap_free_dma(c->dma_channel);
  676. omap2_onenand_shutdown(pdev);
  677. platform_set_drvdata(pdev, NULL);
  678. if (c->gpio_irq) {
  679. free_irq(gpio_to_irq(c->gpio_irq), c);
  680. gpio_free(c->gpio_irq);
  681. }
  682. iounmap(c->onenand.base);
  683. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  684. gpmc_cs_free(c->gpmc_cs);
  685. kfree(c->parts);
  686. kfree(c);
  687. return 0;
  688. }
  689. static struct platform_driver omap2_onenand_driver = {
  690. .probe = omap2_onenand_probe,
  691. .remove = __devexit_p(omap2_onenand_remove),
  692. .shutdown = omap2_onenand_shutdown,
  693. .driver = {
  694. .name = DRIVER_NAME,
  695. .owner = THIS_MODULE,
  696. },
  697. };
  698. static int __init omap2_onenand_init(void)
  699. {
  700. printk(KERN_INFO "OneNAND driver initializing\n");
  701. return platform_driver_register(&omap2_onenand_driver);
  702. }
  703. static void __exit omap2_onenand_exit(void)
  704. {
  705. platform_driver_unregister(&omap2_onenand_driver);
  706. }
  707. module_init(omap2_onenand_init);
  708. module_exit(omap2_onenand_exit);
  709. MODULE_ALIAS("platform:" DRIVER_NAME);
  710. MODULE_LICENSE("GPL");
  711. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  712. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");