vmu-flash.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /* vmu-flash.c
  2. * Driver for SEGA Dreamcast Visual Memory Unit
  3. *
  4. * Copyright (c) Adrian McMenamin 2002 - 2009
  5. * Copyright (c) Paul Mundt 2001
  6. *
  7. * Licensed under version 2 of the
  8. * GNU General Public Licence
  9. */
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/delay.h>
  14. #include <linux/maple.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/map.h>
  17. struct vmu_cache {
  18. unsigned char *buffer; /* Cache */
  19. unsigned int block; /* Which block was cached */
  20. unsigned long jiffies_atc; /* When was it cached? */
  21. int valid;
  22. };
  23. struct mdev_part {
  24. struct maple_device *mdev;
  25. int partition;
  26. };
  27. struct vmupart {
  28. u16 user_blocks;
  29. u16 root_block;
  30. u16 numblocks;
  31. char *name;
  32. struct vmu_cache *pcache;
  33. };
  34. struct memcard {
  35. u16 tempA;
  36. u16 tempB;
  37. u32 partitions;
  38. u32 blocklen;
  39. u32 writecnt;
  40. u32 readcnt;
  41. u32 removeable;
  42. int partition;
  43. int read;
  44. unsigned char *blockread;
  45. struct vmupart *parts;
  46. struct mtd_info *mtd;
  47. };
  48. struct vmu_block {
  49. unsigned int num; /* block number */
  50. unsigned int ofs; /* block offset */
  51. };
  52. static struct vmu_block *ofs_to_block(unsigned long src_ofs,
  53. struct mtd_info *mtd, int partition)
  54. {
  55. struct vmu_block *vblock;
  56. struct maple_device *mdev;
  57. struct memcard *card;
  58. struct mdev_part *mpart;
  59. int num;
  60. mpart = mtd->priv;
  61. mdev = mpart->mdev;
  62. card = maple_get_drvdata(mdev);
  63. if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
  64. goto failed;
  65. num = src_ofs / card->blocklen;
  66. if (num > card->parts[partition].numblocks)
  67. goto failed;
  68. vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
  69. if (!vblock)
  70. goto failed;
  71. vblock->num = num;
  72. vblock->ofs = src_ofs % card->blocklen;
  73. return vblock;
  74. failed:
  75. return NULL;
  76. }
  77. /* Maple bus callback function for reads */
  78. static void vmu_blockread(struct mapleq *mq)
  79. {
  80. struct maple_device *mdev;
  81. struct memcard *card;
  82. mdev = mq->dev;
  83. card = maple_get_drvdata(mdev);
  84. /* copy the read in data */
  85. if (unlikely(!card->blockread))
  86. return;
  87. memcpy(card->blockread, mq->recvbuf->buf + 12,
  88. card->blocklen/card->readcnt);
  89. }
  90. /* Interface with maple bus to read blocks
  91. * caching the results so that other parts
  92. * of the driver can access block reads */
  93. static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
  94. struct mtd_info *mtd)
  95. {
  96. struct memcard *card;
  97. struct mdev_part *mpart;
  98. struct maple_device *mdev;
  99. int partition, error = 0, x, wait;
  100. unsigned char *blockread = NULL;
  101. struct vmu_cache *pcache;
  102. __be32 sendbuf;
  103. mpart = mtd->priv;
  104. mdev = mpart->mdev;
  105. partition = mpart->partition;
  106. card = maple_get_drvdata(mdev);
  107. pcache = card->parts[partition].pcache;
  108. pcache->valid = 0;
  109. /* prepare the cache for this block */
  110. if (!pcache->buffer) {
  111. pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
  112. if (!pcache->buffer) {
  113. dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
  114. " to lack of memory\n", mdev->port,
  115. mdev->unit);
  116. error = -ENOMEM;
  117. goto outB;
  118. }
  119. }
  120. /*
  121. * Reads may be phased - again the hardware spec
  122. * supports this - though may not be any devices in
  123. * the wild that implement it, but we will here
  124. */
  125. for (x = 0; x < card->readcnt; x++) {
  126. sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
  127. if (atomic_read(&mdev->busy) == 1) {
  128. wait_event_interruptible_timeout(mdev->maple_wait,
  129. atomic_read(&mdev->busy) == 0, HZ);
  130. if (atomic_read(&mdev->busy) == 1) {
  131. dev_notice(&mdev->dev, "VMU at (%d, %d)"
  132. " is busy\n", mdev->port, mdev->unit);
  133. error = -EAGAIN;
  134. goto outB;
  135. }
  136. }
  137. atomic_set(&mdev->busy, 1);
  138. blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
  139. if (!blockread) {
  140. error = -ENOMEM;
  141. atomic_set(&mdev->busy, 0);
  142. goto outB;
  143. }
  144. card->blockread = blockread;
  145. maple_getcond_callback(mdev, vmu_blockread, 0,
  146. MAPLE_FUNC_MEMCARD);
  147. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  148. MAPLE_COMMAND_BREAD, 2, &sendbuf);
  149. /* Very long timeouts seem to be needed when box is stressed */
  150. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  151. (atomic_read(&mdev->busy) == 0 ||
  152. atomic_read(&mdev->busy) == 2), HZ * 3);
  153. /*
  154. * MTD layer does not handle hotplugging well
  155. * so have to return errors when VMU is unplugged
  156. * in the middle of a read (busy == 2)
  157. */
  158. if (error || atomic_read(&mdev->busy) == 2) {
  159. if (atomic_read(&mdev->busy) == 2)
  160. error = -ENXIO;
  161. atomic_set(&mdev->busy, 0);
  162. card->blockread = NULL;
  163. goto outA;
  164. }
  165. if (wait == 0 || wait == -ERESTARTSYS) {
  166. card->blockread = NULL;
  167. atomic_set(&mdev->busy, 0);
  168. error = -EIO;
  169. list_del_init(&(mdev->mq->list));
  170. kfree(mdev->mq->sendbuf);
  171. mdev->mq->sendbuf = NULL;
  172. if (wait == -ERESTARTSYS) {
  173. dev_warn(&mdev->dev, "VMU read on (%d, %d)"
  174. " interrupted on block 0x%X\n",
  175. mdev->port, mdev->unit, num);
  176. } else
  177. dev_notice(&mdev->dev, "VMU read on (%d, %d)"
  178. " timed out on block 0x%X\n",
  179. mdev->port, mdev->unit, num);
  180. goto outA;
  181. }
  182. memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
  183. card->blocklen/card->readcnt);
  184. memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
  185. card->blockread, card->blocklen/card->readcnt);
  186. card->blockread = NULL;
  187. pcache->block = num;
  188. pcache->jiffies_atc = jiffies;
  189. pcache->valid = 1;
  190. kfree(blockread);
  191. }
  192. return error;
  193. outA:
  194. kfree(blockread);
  195. outB:
  196. return error;
  197. }
  198. /* communicate with maple bus for phased writing */
  199. static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
  200. struct mtd_info *mtd)
  201. {
  202. struct memcard *card;
  203. struct mdev_part *mpart;
  204. struct maple_device *mdev;
  205. int partition, error, locking, x, phaselen, wait;
  206. __be32 *sendbuf;
  207. mpart = mtd->priv;
  208. mdev = mpart->mdev;
  209. partition = mpart->partition;
  210. card = maple_get_drvdata(mdev);
  211. phaselen = card->blocklen/card->writecnt;
  212. sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
  213. if (!sendbuf) {
  214. error = -ENOMEM;
  215. goto fail_nosendbuf;
  216. }
  217. for (x = 0; x < card->writecnt; x++) {
  218. sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
  219. memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
  220. /* wait until the device is not busy doing something else
  221. * or 1 second - which ever is longer */
  222. if (atomic_read(&mdev->busy) == 1) {
  223. wait_event_interruptible_timeout(mdev->maple_wait,
  224. atomic_read(&mdev->busy) == 0, HZ);
  225. if (atomic_read(&mdev->busy) == 1) {
  226. error = -EBUSY;
  227. dev_notice(&mdev->dev, "VMU write at (%d, %d)"
  228. "failed - device is busy\n",
  229. mdev->port, mdev->unit);
  230. goto fail_nolock;
  231. }
  232. }
  233. atomic_set(&mdev->busy, 1);
  234. locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  235. MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
  236. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  237. atomic_read(&mdev->busy) == 0, HZ/10);
  238. if (locking) {
  239. error = -EIO;
  240. atomic_set(&mdev->busy, 0);
  241. goto fail_nolock;
  242. }
  243. if (atomic_read(&mdev->busy) == 2) {
  244. atomic_set(&mdev->busy, 0);
  245. } else if (wait == 0 || wait == -ERESTARTSYS) {
  246. error = -EIO;
  247. dev_warn(&mdev->dev, "Write at (%d, %d) of block"
  248. " 0x%X at phase %d failed: could not"
  249. " communicate with VMU", mdev->port,
  250. mdev->unit, num, x);
  251. atomic_set(&mdev->busy, 0);
  252. kfree(mdev->mq->sendbuf);
  253. mdev->mq->sendbuf = NULL;
  254. list_del_init(&(mdev->mq->list));
  255. goto fail_nolock;
  256. }
  257. }
  258. kfree(sendbuf);
  259. return card->blocklen;
  260. fail_nolock:
  261. kfree(sendbuf);
  262. fail_nosendbuf:
  263. dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
  264. mdev->unit);
  265. return error;
  266. }
  267. /* mtd function to simulate reading byte by byte */
  268. static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
  269. struct mtd_info *mtd)
  270. {
  271. struct vmu_block *vblock;
  272. struct memcard *card;
  273. struct mdev_part *mpart;
  274. struct maple_device *mdev;
  275. unsigned char *buf, ret;
  276. int partition, error;
  277. mpart = mtd->priv;
  278. mdev = mpart->mdev;
  279. partition = mpart->partition;
  280. card = maple_get_drvdata(mdev);
  281. *retval = 0;
  282. buf = kmalloc(card->blocklen, GFP_KERNEL);
  283. if (!buf) {
  284. *retval = 1;
  285. ret = -ENOMEM;
  286. goto finish;
  287. }
  288. vblock = ofs_to_block(ofs, mtd, partition);
  289. if (!vblock) {
  290. *retval = 3;
  291. ret = -ENOMEM;
  292. goto out_buf;
  293. }
  294. error = maple_vmu_read_block(vblock->num, buf, mtd);
  295. if (error) {
  296. ret = error;
  297. *retval = 2;
  298. goto out_vblock;
  299. }
  300. ret = buf[vblock->ofs];
  301. out_vblock:
  302. kfree(vblock);
  303. out_buf:
  304. kfree(buf);
  305. finish:
  306. return ret;
  307. }
  308. /* mtd higher order function to read flash */
  309. static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
  310. size_t *retlen, u_char *buf)
  311. {
  312. struct maple_device *mdev;
  313. struct memcard *card;
  314. struct mdev_part *mpart;
  315. struct vmu_cache *pcache;
  316. struct vmu_block *vblock;
  317. int index = 0, retval, partition, leftover, numblocks;
  318. unsigned char cx;
  319. if (len < 1)
  320. return -EIO;
  321. mpart = mtd->priv;
  322. mdev = mpart->mdev;
  323. partition = mpart->partition;
  324. card = maple_get_drvdata(mdev);
  325. numblocks = card->parts[partition].numblocks;
  326. if (from + len > numblocks * card->blocklen)
  327. len = numblocks * card->blocklen - from;
  328. if (len == 0)
  329. return -EIO;
  330. /* Have we cached this bit already? */
  331. pcache = card->parts[partition].pcache;
  332. do {
  333. vblock = ofs_to_block(from + index, mtd, partition);
  334. if (!vblock)
  335. return -ENOMEM;
  336. /* Have we cached this and is the cache valid and timely? */
  337. if (pcache->valid &&
  338. time_before(jiffies, pcache->jiffies_atc + HZ) &&
  339. (pcache->block == vblock->num)) {
  340. /* we have cached it, so do necessary copying */
  341. leftover = card->blocklen - vblock->ofs;
  342. if (vblock->ofs + len - index < card->blocklen) {
  343. /* only a bit of this block to copy */
  344. memcpy(buf + index,
  345. pcache->buffer + vblock->ofs,
  346. len - index);
  347. index = len;
  348. } else {
  349. /* otherwise copy remainder of whole block */
  350. memcpy(buf + index, pcache->buffer +
  351. vblock->ofs, leftover);
  352. index += leftover;
  353. }
  354. } else {
  355. /*
  356. * Not cached so read one byte -
  357. * but cache the rest of the block
  358. */
  359. cx = vmu_flash_read_char(from + index, &retval, mtd);
  360. if (retval) {
  361. *retlen = index;
  362. kfree(vblock);
  363. return cx;
  364. }
  365. memset(buf + index, cx, 1);
  366. index++;
  367. }
  368. kfree(vblock);
  369. } while (len > index);
  370. *retlen = index;
  371. return 0;
  372. }
  373. static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
  374. size_t *retlen, const u_char *buf)
  375. {
  376. struct maple_device *mdev;
  377. struct memcard *card;
  378. struct mdev_part *mpart;
  379. int index = 0, partition, error = 0, numblocks;
  380. struct vmu_cache *pcache;
  381. struct vmu_block *vblock;
  382. unsigned char *buffer;
  383. mpart = mtd->priv;
  384. mdev = mpart->mdev;
  385. partition = mpart->partition;
  386. card = maple_get_drvdata(mdev);
  387. /* simple sanity checks */
  388. if (len < 1) {
  389. error = -EIO;
  390. goto failed;
  391. }
  392. numblocks = card->parts[partition].numblocks;
  393. if (to + len > numblocks * card->blocklen)
  394. len = numblocks * card->blocklen - to;
  395. if (len == 0) {
  396. error = -EIO;
  397. goto failed;
  398. }
  399. vblock = ofs_to_block(to, mtd, partition);
  400. if (!vblock) {
  401. error = -ENOMEM;
  402. goto failed;
  403. }
  404. buffer = kmalloc(card->blocklen, GFP_KERNEL);
  405. if (!buffer) {
  406. error = -ENOMEM;
  407. goto fail_buffer;
  408. }
  409. do {
  410. /* Read in the block we are to write to */
  411. error = maple_vmu_read_block(vblock->num, buffer, mtd);
  412. if (error)
  413. goto fail_io;
  414. do {
  415. buffer[vblock->ofs] = buf[index];
  416. vblock->ofs++;
  417. index++;
  418. if (index >= len)
  419. break;
  420. } while (vblock->ofs < card->blocklen);
  421. /* write out new buffer */
  422. error = maple_vmu_write_block(vblock->num, buffer, mtd);
  423. /* invalidate the cache */
  424. pcache = card->parts[partition].pcache;
  425. pcache->valid = 0;
  426. if (error != card->blocklen)
  427. goto fail_io;
  428. vblock->num++;
  429. vblock->ofs = 0;
  430. } while (len > index);
  431. kfree(buffer);
  432. *retlen = index;
  433. kfree(vblock);
  434. return 0;
  435. fail_io:
  436. kfree(buffer);
  437. fail_buffer:
  438. kfree(vblock);
  439. failed:
  440. dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
  441. return error;
  442. }
  443. static void vmu_flash_sync(struct mtd_info *mtd)
  444. {
  445. /* Do nothing here */
  446. }
  447. /* Maple bus callback function to recursively query hardware details */
  448. static void vmu_queryblocks(struct mapleq *mq)
  449. {
  450. struct maple_device *mdev;
  451. unsigned short *res;
  452. struct memcard *card;
  453. __be32 partnum;
  454. struct vmu_cache *pcache;
  455. struct mdev_part *mpart;
  456. struct mtd_info *mtd_cur;
  457. struct vmupart *part_cur;
  458. int error;
  459. mdev = mq->dev;
  460. card = maple_get_drvdata(mdev);
  461. res = (unsigned short *) (mq->recvbuf->buf);
  462. card->tempA = res[12];
  463. card->tempB = res[6];
  464. dev_info(&mdev->dev, "VMU device at partition %d has %d user "
  465. "blocks with a root block at %d\n", card->partition,
  466. card->tempA, card->tempB);
  467. part_cur = &card->parts[card->partition];
  468. part_cur->user_blocks = card->tempA;
  469. part_cur->root_block = card->tempB;
  470. part_cur->numblocks = card->tempB + 1;
  471. part_cur->name = kmalloc(12, GFP_KERNEL);
  472. if (!part_cur->name)
  473. goto fail_name;
  474. sprintf(part_cur->name, "vmu%d.%d.%d",
  475. mdev->port, mdev->unit, card->partition);
  476. mtd_cur = &card->mtd[card->partition];
  477. mtd_cur->name = part_cur->name;
  478. mtd_cur->type = 8;
  479. mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
  480. mtd_cur->size = part_cur->numblocks * card->blocklen;
  481. mtd_cur->erasesize = card->blocklen;
  482. mtd_cur->write = vmu_flash_write;
  483. mtd_cur->read = vmu_flash_read;
  484. mtd_cur->sync = vmu_flash_sync;
  485. mtd_cur->writesize = card->blocklen;
  486. mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
  487. if (!mpart)
  488. goto fail_mpart;
  489. mpart->mdev = mdev;
  490. mpart->partition = card->partition;
  491. mtd_cur->priv = mpart;
  492. mtd_cur->owner = THIS_MODULE;
  493. pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
  494. if (!pcache)
  495. goto fail_cache_create;
  496. part_cur->pcache = pcache;
  497. error = mtd_device_register(mtd_cur, NULL, 0);
  498. if (error)
  499. goto fail_mtd_register;
  500. maple_getcond_callback(mdev, NULL, 0,
  501. MAPLE_FUNC_MEMCARD);
  502. /*
  503. * Set up a recursive call to the (probably theoretical)
  504. * second or more partition
  505. */
  506. if (++card->partition < card->partitions) {
  507. partnum = cpu_to_be32(card->partition << 24);
  508. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  509. MAPLE_FUNC_MEMCARD);
  510. maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  511. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  512. }
  513. return;
  514. fail_mtd_register:
  515. dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
  516. "error is 0x%X\n", mdev->port, mdev->unit, error);
  517. for (error = 0; error <= card->partition; error++) {
  518. kfree(((card->parts)[error]).pcache);
  519. ((card->parts)[error]).pcache = NULL;
  520. }
  521. fail_cache_create:
  522. fail_mpart:
  523. for (error = 0; error <= card->partition; error++) {
  524. kfree(((card->mtd)[error]).priv);
  525. ((card->mtd)[error]).priv = NULL;
  526. }
  527. maple_getcond_callback(mdev, NULL, 0,
  528. MAPLE_FUNC_MEMCARD);
  529. kfree(part_cur->name);
  530. fail_name:
  531. return;
  532. }
  533. /* Handles very basic info about the flash, queries for details */
  534. static int __devinit vmu_connect(struct maple_device *mdev)
  535. {
  536. unsigned long test_flash_data, basic_flash_data;
  537. int c, error;
  538. struct memcard *card;
  539. u32 partnum = 0;
  540. test_flash_data = be32_to_cpu(mdev->devinfo.function);
  541. /* Need to count how many bits are set - to find out which
  542. * function_data element has details of the memory card
  543. */
  544. c = hweight_long(test_flash_data);
  545. basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
  546. card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
  547. if (!card) {
  548. error = -ENOMEM;
  549. goto fail_nomem;
  550. }
  551. card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
  552. card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
  553. card->writecnt = basic_flash_data >> 12 & 0xF;
  554. card->readcnt = basic_flash_data >> 8 & 0xF;
  555. card->removeable = basic_flash_data >> 7 & 1;
  556. card->partition = 0;
  557. /*
  558. * Not sure there are actually any multi-partition devices in the
  559. * real world, but the hardware supports them, so, so will we
  560. */
  561. card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
  562. GFP_KERNEL);
  563. if (!card->parts) {
  564. error = -ENOMEM;
  565. goto fail_partitions;
  566. }
  567. card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
  568. GFP_KERNEL);
  569. if (!card->mtd) {
  570. error = -ENOMEM;
  571. goto fail_mtd_info;
  572. }
  573. maple_set_drvdata(mdev, card);
  574. /*
  575. * We want to trap meminfo not get cond
  576. * so set interval to zero, but rely on maple bus
  577. * driver to pass back the results of the meminfo
  578. */
  579. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  580. MAPLE_FUNC_MEMCARD);
  581. /* Make sure we are clear to go */
  582. if (atomic_read(&mdev->busy) == 1) {
  583. wait_event_interruptible_timeout(mdev->maple_wait,
  584. atomic_read(&mdev->busy) == 0, HZ);
  585. if (atomic_read(&mdev->busy) == 1) {
  586. dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
  587. mdev->port, mdev->unit);
  588. error = -EAGAIN;
  589. goto fail_device_busy;
  590. }
  591. }
  592. atomic_set(&mdev->busy, 1);
  593. /*
  594. * Set up the minfo call: vmu_queryblocks will handle
  595. * the information passed back
  596. */
  597. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  598. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  599. if (error) {
  600. dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
  601. " error is 0x%X\n", mdev->port, mdev->unit, error);
  602. goto fail_mtd_info;
  603. }
  604. return 0;
  605. fail_device_busy:
  606. kfree(card->mtd);
  607. fail_mtd_info:
  608. kfree(card->parts);
  609. fail_partitions:
  610. kfree(card);
  611. fail_nomem:
  612. return error;
  613. }
  614. static void __devexit vmu_disconnect(struct maple_device *mdev)
  615. {
  616. struct memcard *card;
  617. struct mdev_part *mpart;
  618. int x;
  619. mdev->callback = NULL;
  620. card = maple_get_drvdata(mdev);
  621. for (x = 0; x < card->partitions; x++) {
  622. mpart = ((card->mtd)[x]).priv;
  623. mpart->mdev = NULL;
  624. mtd_device_unregister(&((card->mtd)[x]));
  625. kfree(((card->parts)[x]).name);
  626. }
  627. kfree(card->parts);
  628. kfree(card->mtd);
  629. kfree(card);
  630. }
  631. /* Callback to handle eccentricities of both mtd subsystem
  632. * and general flakyness of Dreamcast VMUs
  633. */
  634. static int vmu_can_unload(struct maple_device *mdev)
  635. {
  636. struct memcard *card;
  637. int x;
  638. struct mtd_info *mtd;
  639. card = maple_get_drvdata(mdev);
  640. for (x = 0; x < card->partitions; x++) {
  641. mtd = &((card->mtd)[x]);
  642. if (mtd->usecount > 0)
  643. return 0;
  644. }
  645. return 1;
  646. }
  647. #define ERRSTR "VMU at (%d, %d) file error -"
  648. static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
  649. {
  650. enum maple_file_errors error = ((int *)recvbuf)[1];
  651. switch (error) {
  652. case MAPLE_FILEERR_INVALID_PARTITION:
  653. dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
  654. mdev->port, mdev->unit);
  655. break;
  656. case MAPLE_FILEERR_PHASE_ERROR:
  657. dev_notice(&mdev->dev, ERRSTR " phase error\n",
  658. mdev->port, mdev->unit);
  659. break;
  660. case MAPLE_FILEERR_INVALID_BLOCK:
  661. dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
  662. mdev->port, mdev->unit);
  663. break;
  664. case MAPLE_FILEERR_WRITE_ERROR:
  665. dev_notice(&mdev->dev, ERRSTR " write error\n",
  666. mdev->port, mdev->unit);
  667. break;
  668. case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
  669. dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
  670. mdev->port, mdev->unit);
  671. break;
  672. case MAPLE_FILEERR_BAD_CRC:
  673. dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
  674. mdev->port, mdev->unit);
  675. break;
  676. default:
  677. dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
  678. mdev->port, mdev->unit, error);
  679. }
  680. }
  681. static int __devinit probe_maple_vmu(struct device *dev)
  682. {
  683. int error;
  684. struct maple_device *mdev = to_maple_dev(dev);
  685. struct maple_driver *mdrv = to_maple_driver(dev->driver);
  686. mdev->can_unload = vmu_can_unload;
  687. mdev->fileerr_handler = vmu_file_error;
  688. mdev->driver = mdrv;
  689. error = vmu_connect(mdev);
  690. if (error)
  691. return error;
  692. return 0;
  693. }
  694. static int __devexit remove_maple_vmu(struct device *dev)
  695. {
  696. struct maple_device *mdev = to_maple_dev(dev);
  697. vmu_disconnect(mdev);
  698. return 0;
  699. }
  700. static struct maple_driver vmu_flash_driver = {
  701. .function = MAPLE_FUNC_MEMCARD,
  702. .drv = {
  703. .name = "Dreamcast_visual_memory",
  704. .probe = probe_maple_vmu,
  705. .remove = __devexit_p(remove_maple_vmu),
  706. },
  707. };
  708. static int __init vmu_flash_map_init(void)
  709. {
  710. return maple_driver_register(&vmu_flash_driver);
  711. }
  712. static void __exit vmu_flash_map_exit(void)
  713. {
  714. maple_driver_unregister(&vmu_flash_driver);
  715. }
  716. module_init(vmu_flash_map_init);
  717. module_exit(vmu_flash_map_exit);
  718. MODULE_LICENSE("GPL");
  719. MODULE_AUTHOR("Adrian McMenamin");
  720. MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");