rfd_ftl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * rfd_ftl.c -- resident flash disk (flash translation layer)
  3. *
  4. * Copyright © 2005 Sean Young <sean@mess.org>
  5. *
  6. * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7. * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8. *
  9. * http://www.gensw.com/pages/prod/bios/rfd.htm
  10. *
  11. * based on ftl.c
  12. */
  13. #include <linux/hdreg.h>
  14. #include <linux/init.h>
  15. #include <linux/mtd/blktrans.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/slab.h>
  19. #include <linux/jiffies.h>
  20. #include <asm/types.h>
  21. static int block_size = 0;
  22. module_param(block_size, int, 0);
  23. MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
  24. #define PREFIX "rfd_ftl: "
  25. /* This major has been assigned by device@lanana.org */
  26. #ifndef RFD_FTL_MAJOR
  27. #define RFD_FTL_MAJOR 256
  28. #endif
  29. /* Maximum number of partitions in an FTL region */
  30. #define PART_BITS 4
  31. /* An erase unit should start with this value */
  32. #define RFD_MAGIC 0x9193
  33. /* the second value is 0xffff or 0xffc8; function unknown */
  34. /* the third value is always 0xffff, ignored */
  35. /* next is an array of mapping for each corresponding sector */
  36. #define HEADER_MAP_OFFSET 3
  37. #define SECTOR_DELETED 0x0000
  38. #define SECTOR_ZERO 0xfffe
  39. #define SECTOR_FREE 0xffff
  40. #define SECTOR_SIZE 512
  41. #define SECTORS_PER_TRACK 63
  42. struct block {
  43. enum {
  44. BLOCK_OK,
  45. BLOCK_ERASING,
  46. BLOCK_ERASED,
  47. BLOCK_UNUSED,
  48. BLOCK_FAILED
  49. } state;
  50. int free_sectors;
  51. int used_sectors;
  52. int erases;
  53. u_long offset;
  54. };
  55. struct partition {
  56. struct mtd_blktrans_dev mbd;
  57. u_int block_size; /* size of erase unit */
  58. u_int total_blocks; /* number of erase units */
  59. u_int header_sectors_per_block; /* header sectors in erase unit */
  60. u_int data_sectors_per_block; /* data sectors in erase unit */
  61. u_int sector_count; /* sectors in translated disk */
  62. u_int header_size; /* bytes in header sector */
  63. int reserved_block; /* block next up for reclaim */
  64. int current_block; /* block to write to */
  65. u16 *header_cache; /* cached header */
  66. int is_reclaiming;
  67. int cylinders;
  68. int errors;
  69. u_long *sector_map;
  70. struct block *blocks;
  71. };
  72. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
  73. static int build_block_map(struct partition *part, int block_no)
  74. {
  75. struct block *block = &part->blocks[block_no];
  76. int i;
  77. block->offset = part->block_size * block_no;
  78. if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
  79. block->state = BLOCK_UNUSED;
  80. return -ENOENT;
  81. }
  82. block->state = BLOCK_OK;
  83. for (i=0; i<part->data_sectors_per_block; i++) {
  84. u16 entry;
  85. entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
  86. if (entry == SECTOR_DELETED)
  87. continue;
  88. if (entry == SECTOR_FREE) {
  89. block->free_sectors++;
  90. continue;
  91. }
  92. if (entry == SECTOR_ZERO)
  93. entry = 0;
  94. if (entry >= part->sector_count) {
  95. printk(KERN_WARNING PREFIX
  96. "'%s': unit #%d: entry %d corrupt, "
  97. "sector %d out of range\n",
  98. part->mbd.mtd->name, block_no, i, entry);
  99. continue;
  100. }
  101. if (part->sector_map[entry] != -1) {
  102. printk(KERN_WARNING PREFIX
  103. "'%s': more than one entry for sector %d\n",
  104. part->mbd.mtd->name, entry);
  105. part->errors = 1;
  106. continue;
  107. }
  108. part->sector_map[entry] = block->offset +
  109. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  110. block->used_sectors++;
  111. }
  112. if (block->free_sectors == part->data_sectors_per_block)
  113. part->reserved_block = block_no;
  114. return 0;
  115. }
  116. static int scan_header(struct partition *part)
  117. {
  118. int sectors_per_block;
  119. int i, rc = -ENOMEM;
  120. int blocks_found;
  121. size_t retlen;
  122. sectors_per_block = part->block_size / SECTOR_SIZE;
  123. part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
  124. if (part->total_blocks < 2)
  125. return -ENOENT;
  126. /* each erase block has three bytes header, followed by the map */
  127. part->header_sectors_per_block =
  128. ((HEADER_MAP_OFFSET + sectors_per_block) *
  129. sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
  130. part->data_sectors_per_block = sectors_per_block -
  131. part->header_sectors_per_block;
  132. part->header_size = (HEADER_MAP_OFFSET +
  133. part->data_sectors_per_block) * sizeof(u16);
  134. part->cylinders = (part->data_sectors_per_block *
  135. (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
  136. part->sector_count = part->cylinders * SECTORS_PER_TRACK;
  137. part->current_block = -1;
  138. part->reserved_block = -1;
  139. part->is_reclaiming = 0;
  140. part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
  141. if (!part->header_cache)
  142. goto err;
  143. part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
  144. GFP_KERNEL);
  145. if (!part->blocks)
  146. goto err;
  147. part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
  148. if (!part->sector_map) {
  149. printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
  150. "sector map", part->mbd.mtd->name);
  151. goto err;
  152. }
  153. for (i=0; i<part->sector_count; i++)
  154. part->sector_map[i] = -1;
  155. for (i=0, blocks_found=0; i<part->total_blocks; i++) {
  156. rc = part->mbd.mtd->read(part->mbd.mtd,
  157. i * part->block_size, part->header_size,
  158. &retlen, (u_char*)part->header_cache);
  159. if (!rc && retlen != part->header_size)
  160. rc = -EIO;
  161. if (rc)
  162. goto err;
  163. if (!build_block_map(part, i))
  164. blocks_found++;
  165. }
  166. if (blocks_found == 0) {
  167. printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
  168. part->mbd.mtd->name);
  169. rc = -ENOENT;
  170. goto err;
  171. }
  172. if (part->reserved_block == -1) {
  173. printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
  174. part->mbd.mtd->name);
  175. part->errors = 1;
  176. }
  177. return 0;
  178. err:
  179. vfree(part->sector_map);
  180. kfree(part->header_cache);
  181. kfree(part->blocks);
  182. return rc;
  183. }
  184. static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  185. {
  186. struct partition *part = (struct partition*)dev;
  187. u_long addr;
  188. size_t retlen;
  189. int rc;
  190. if (sector >= part->sector_count)
  191. return -EIO;
  192. addr = part->sector_map[sector];
  193. if (addr != -1) {
  194. rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
  195. &retlen, (u_char*)buf);
  196. if (!rc && retlen != SECTOR_SIZE)
  197. rc = -EIO;
  198. if (rc) {
  199. printk(KERN_WARNING PREFIX "error reading '%s' at "
  200. "0x%lx\n", part->mbd.mtd->name, addr);
  201. return rc;
  202. }
  203. } else
  204. memset(buf, 0, SECTOR_SIZE);
  205. return 0;
  206. }
  207. static void erase_callback(struct erase_info *erase)
  208. {
  209. struct partition *part;
  210. u16 magic;
  211. int i, rc;
  212. size_t retlen;
  213. part = (struct partition*)erase->priv;
  214. i = (u32)erase->addr / part->block_size;
  215. if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
  216. erase->addr > UINT_MAX) {
  217. printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
  218. "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
  219. return;
  220. }
  221. if (erase->state != MTD_ERASE_DONE) {
  222. printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
  223. "state %d\n", (unsigned long long)erase->addr,
  224. part->mbd.mtd->name, erase->state);
  225. part->blocks[i].state = BLOCK_FAILED;
  226. part->blocks[i].free_sectors = 0;
  227. part->blocks[i].used_sectors = 0;
  228. kfree(erase);
  229. return;
  230. }
  231. magic = cpu_to_le16(RFD_MAGIC);
  232. part->blocks[i].state = BLOCK_ERASED;
  233. part->blocks[i].free_sectors = part->data_sectors_per_block;
  234. part->blocks[i].used_sectors = 0;
  235. part->blocks[i].erases++;
  236. rc = part->mbd.mtd->write(part->mbd.mtd,
  237. part->blocks[i].offset, sizeof(magic), &retlen,
  238. (u_char*)&magic);
  239. if (!rc && retlen != sizeof(magic))
  240. rc = -EIO;
  241. if (rc) {
  242. printk(KERN_ERR PREFIX "'%s': unable to write RFD "
  243. "header at 0x%lx\n",
  244. part->mbd.mtd->name,
  245. part->blocks[i].offset);
  246. part->blocks[i].state = BLOCK_FAILED;
  247. }
  248. else
  249. part->blocks[i].state = BLOCK_OK;
  250. kfree(erase);
  251. }
  252. static int erase_block(struct partition *part, int block)
  253. {
  254. struct erase_info *erase;
  255. int rc = -ENOMEM;
  256. erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
  257. if (!erase)
  258. goto err;
  259. erase->mtd = part->mbd.mtd;
  260. erase->callback = erase_callback;
  261. erase->addr = part->blocks[block].offset;
  262. erase->len = part->block_size;
  263. erase->priv = (u_long)part;
  264. part->blocks[block].state = BLOCK_ERASING;
  265. part->blocks[block].free_sectors = 0;
  266. rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
  267. if (rc) {
  268. printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
  269. "failed\n", (unsigned long long)erase->addr,
  270. (unsigned long long)erase->len, part->mbd.mtd->name);
  271. kfree(erase);
  272. }
  273. err:
  274. return rc;
  275. }
  276. static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
  277. {
  278. void *sector_data;
  279. u16 *map;
  280. size_t retlen;
  281. int i, rc = -ENOMEM;
  282. part->is_reclaiming = 1;
  283. sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
  284. if (!sector_data)
  285. goto err3;
  286. map = kmalloc(part->header_size, GFP_KERNEL);
  287. if (!map)
  288. goto err2;
  289. rc = part->mbd.mtd->read(part->mbd.mtd,
  290. part->blocks[block_no].offset, part->header_size,
  291. &retlen, (u_char*)map);
  292. if (!rc && retlen != part->header_size)
  293. rc = -EIO;
  294. if (rc) {
  295. printk(KERN_ERR PREFIX "error reading '%s' at "
  296. "0x%lx\n", part->mbd.mtd->name,
  297. part->blocks[block_no].offset);
  298. goto err;
  299. }
  300. for (i=0; i<part->data_sectors_per_block; i++) {
  301. u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
  302. u_long addr;
  303. if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
  304. continue;
  305. if (entry == SECTOR_ZERO)
  306. entry = 0;
  307. /* already warned about and ignored in build_block_map() */
  308. if (entry >= part->sector_count)
  309. continue;
  310. addr = part->blocks[block_no].offset +
  311. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  312. if (*old_sector == addr) {
  313. *old_sector = -1;
  314. if (!part->blocks[block_no].used_sectors--) {
  315. rc = erase_block(part, block_no);
  316. break;
  317. }
  318. continue;
  319. }
  320. rc = part->mbd.mtd->read(part->mbd.mtd, addr,
  321. SECTOR_SIZE, &retlen, sector_data);
  322. if (!rc && retlen != SECTOR_SIZE)
  323. rc = -EIO;
  324. if (rc) {
  325. printk(KERN_ERR PREFIX "'%s': Unable to "
  326. "read sector for relocation\n",
  327. part->mbd.mtd->name);
  328. goto err;
  329. }
  330. rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
  331. entry, sector_data);
  332. if (rc)
  333. goto err;
  334. }
  335. err:
  336. kfree(map);
  337. err2:
  338. kfree(sector_data);
  339. err3:
  340. part->is_reclaiming = 0;
  341. return rc;
  342. }
  343. static int reclaim_block(struct partition *part, u_long *old_sector)
  344. {
  345. int block, best_block, score, old_sector_block;
  346. int rc;
  347. /* we have a race if sync doesn't exist */
  348. if (part->mbd.mtd->sync)
  349. part->mbd.mtd->sync(part->mbd.mtd);
  350. score = 0x7fffffff; /* MAX_INT */
  351. best_block = -1;
  352. if (*old_sector != -1)
  353. old_sector_block = *old_sector / part->block_size;
  354. else
  355. old_sector_block = -1;
  356. for (block=0; block<part->total_blocks; block++) {
  357. int this_score;
  358. if (block == part->reserved_block)
  359. continue;
  360. /*
  361. * Postpone reclaiming if there is a free sector as
  362. * more removed sectors is more efficient (have to move
  363. * less).
  364. */
  365. if (part->blocks[block].free_sectors)
  366. return 0;
  367. this_score = part->blocks[block].used_sectors;
  368. if (block == old_sector_block)
  369. this_score--;
  370. else {
  371. /* no point in moving a full block */
  372. if (part->blocks[block].used_sectors ==
  373. part->data_sectors_per_block)
  374. continue;
  375. }
  376. this_score += part->blocks[block].erases;
  377. if (this_score < score) {
  378. best_block = block;
  379. score = this_score;
  380. }
  381. }
  382. if (best_block == -1)
  383. return -ENOSPC;
  384. part->current_block = -1;
  385. part->reserved_block = best_block;
  386. pr_debug("reclaim_block: reclaiming block #%d with %d used "
  387. "%d free sectors\n", best_block,
  388. part->blocks[best_block].used_sectors,
  389. part->blocks[best_block].free_sectors);
  390. if (part->blocks[best_block].used_sectors)
  391. rc = move_block_contents(part, best_block, old_sector);
  392. else
  393. rc = erase_block(part, best_block);
  394. return rc;
  395. }
  396. /*
  397. * IMPROVE: It would be best to choose the block with the most deleted sectors,
  398. * because if we fill that one up first it'll have the most chance of having
  399. * the least live sectors at reclaim.
  400. */
  401. static int find_free_block(struct partition *part)
  402. {
  403. int block, stop;
  404. block = part->current_block == -1 ?
  405. jiffies % part->total_blocks : part->current_block;
  406. stop = block;
  407. do {
  408. if (part->blocks[block].free_sectors &&
  409. block != part->reserved_block)
  410. return block;
  411. if (part->blocks[block].state == BLOCK_UNUSED)
  412. erase_block(part, block);
  413. if (++block >= part->total_blocks)
  414. block = 0;
  415. } while (block != stop);
  416. return -1;
  417. }
  418. static int find_writable_block(struct partition *part, u_long *old_sector)
  419. {
  420. int rc, block;
  421. size_t retlen;
  422. block = find_free_block(part);
  423. if (block == -1) {
  424. if (!part->is_reclaiming) {
  425. rc = reclaim_block(part, old_sector);
  426. if (rc)
  427. goto err;
  428. block = find_free_block(part);
  429. }
  430. if (block == -1) {
  431. rc = -ENOSPC;
  432. goto err;
  433. }
  434. }
  435. rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
  436. part->header_size, &retlen, (u_char*)part->header_cache);
  437. if (!rc && retlen != part->header_size)
  438. rc = -EIO;
  439. if (rc) {
  440. printk(KERN_ERR PREFIX "'%s': unable to read header at "
  441. "0x%lx\n", part->mbd.mtd->name,
  442. part->blocks[block].offset);
  443. goto err;
  444. }
  445. part->current_block = block;
  446. err:
  447. return rc;
  448. }
  449. static int mark_sector_deleted(struct partition *part, u_long old_addr)
  450. {
  451. int block, offset, rc;
  452. u_long addr;
  453. size_t retlen;
  454. u16 del = cpu_to_le16(SECTOR_DELETED);
  455. block = old_addr / part->block_size;
  456. offset = (old_addr % part->block_size) / SECTOR_SIZE -
  457. part->header_sectors_per_block;
  458. addr = part->blocks[block].offset +
  459. (HEADER_MAP_OFFSET + offset) * sizeof(u16);
  460. rc = part->mbd.mtd->write(part->mbd.mtd, addr,
  461. sizeof(del), &retlen, (u_char*)&del);
  462. if (!rc && retlen != sizeof(del))
  463. rc = -EIO;
  464. if (rc) {
  465. printk(KERN_ERR PREFIX "error writing '%s' at "
  466. "0x%lx\n", part->mbd.mtd->name, addr);
  467. if (rc)
  468. goto err;
  469. }
  470. if (block == part->current_block)
  471. part->header_cache[offset + HEADER_MAP_OFFSET] = del;
  472. part->blocks[block].used_sectors--;
  473. if (!part->blocks[block].used_sectors &&
  474. !part->blocks[block].free_sectors)
  475. rc = erase_block(part, block);
  476. err:
  477. return rc;
  478. }
  479. static int find_free_sector(const struct partition *part, const struct block *block)
  480. {
  481. int i, stop;
  482. i = stop = part->data_sectors_per_block - block->free_sectors;
  483. do {
  484. if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
  485. == SECTOR_FREE)
  486. return i;
  487. if (++i == part->data_sectors_per_block)
  488. i = 0;
  489. }
  490. while(i != stop);
  491. return -1;
  492. }
  493. static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
  494. {
  495. struct partition *part = (struct partition*)dev;
  496. struct block *block;
  497. u_long addr;
  498. int i;
  499. int rc;
  500. size_t retlen;
  501. u16 entry;
  502. if (part->current_block == -1 ||
  503. !part->blocks[part->current_block].free_sectors) {
  504. rc = find_writable_block(part, old_addr);
  505. if (rc)
  506. goto err;
  507. }
  508. block = &part->blocks[part->current_block];
  509. i = find_free_sector(part, block);
  510. if (i < 0) {
  511. rc = -ENOSPC;
  512. goto err;
  513. }
  514. addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
  515. block->offset;
  516. rc = part->mbd.mtd->write(part->mbd.mtd,
  517. addr, SECTOR_SIZE, &retlen, (u_char*)buf);
  518. if (!rc && retlen != SECTOR_SIZE)
  519. rc = -EIO;
  520. if (rc) {
  521. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  522. part->mbd.mtd->name, addr);
  523. if (rc)
  524. goto err;
  525. }
  526. part->sector_map[sector] = addr;
  527. entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
  528. part->header_cache[i + HEADER_MAP_OFFSET] = entry;
  529. addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
  530. rc = part->mbd.mtd->write(part->mbd.mtd, addr,
  531. sizeof(entry), &retlen, (u_char*)&entry);
  532. if (!rc && retlen != sizeof(entry))
  533. rc = -EIO;
  534. if (rc) {
  535. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  536. part->mbd.mtd->name, addr);
  537. if (rc)
  538. goto err;
  539. }
  540. block->used_sectors++;
  541. block->free_sectors--;
  542. err:
  543. return rc;
  544. }
  545. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  546. {
  547. struct partition *part = (struct partition*)dev;
  548. u_long old_addr;
  549. int i;
  550. int rc = 0;
  551. pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
  552. if (part->reserved_block == -1) {
  553. rc = -EACCES;
  554. goto err;
  555. }
  556. if (sector >= part->sector_count) {
  557. rc = -EIO;
  558. goto err;
  559. }
  560. old_addr = part->sector_map[sector];
  561. for (i=0; i<SECTOR_SIZE; i++) {
  562. if (!buf[i])
  563. continue;
  564. rc = do_writesect(dev, sector, buf, &old_addr);
  565. if (rc)
  566. goto err;
  567. break;
  568. }
  569. if (i == SECTOR_SIZE)
  570. part->sector_map[sector] = -1;
  571. if (old_addr != -1)
  572. rc = mark_sector_deleted(part, old_addr);
  573. err:
  574. return rc;
  575. }
  576. static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
  577. {
  578. struct partition *part = (struct partition*)dev;
  579. geo->heads = 1;
  580. geo->sectors = SECTORS_PER_TRACK;
  581. geo->cylinders = part->cylinders;
  582. return 0;
  583. }
  584. static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
  585. {
  586. struct partition *part;
  587. if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
  588. return;
  589. part = kzalloc(sizeof(struct partition), GFP_KERNEL);
  590. if (!part)
  591. return;
  592. part->mbd.mtd = mtd;
  593. if (block_size)
  594. part->block_size = block_size;
  595. else {
  596. if (!mtd->erasesize) {
  597. printk(KERN_WARNING PREFIX "please provide block_size");
  598. goto out;
  599. } else
  600. part->block_size = mtd->erasesize;
  601. }
  602. if (scan_header(part) == 0) {
  603. part->mbd.size = part->sector_count;
  604. part->mbd.tr = tr;
  605. part->mbd.devnum = -1;
  606. if (!(mtd->flags & MTD_WRITEABLE))
  607. part->mbd.readonly = 1;
  608. else if (part->errors) {
  609. printk(KERN_WARNING PREFIX "'%s': errors found, "
  610. "setting read-only\n", mtd->name);
  611. part->mbd.readonly = 1;
  612. }
  613. printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
  614. mtd->name, mtd->type, mtd->flags);
  615. if (!add_mtd_blktrans_dev((void*)part))
  616. return;
  617. }
  618. out:
  619. kfree(part);
  620. }
  621. static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
  622. {
  623. struct partition *part = (struct partition*)dev;
  624. int i;
  625. for (i=0; i<part->total_blocks; i++) {
  626. pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
  627. part->mbd.mtd->name, i, part->blocks[i].erases);
  628. }
  629. del_mtd_blktrans_dev(dev);
  630. vfree(part->sector_map);
  631. kfree(part->header_cache);
  632. kfree(part->blocks);
  633. }
  634. static struct mtd_blktrans_ops rfd_ftl_tr = {
  635. .name = "rfd",
  636. .major = RFD_FTL_MAJOR,
  637. .part_bits = PART_BITS,
  638. .blksize = SECTOR_SIZE,
  639. .readsect = rfd_ftl_readsect,
  640. .writesect = rfd_ftl_writesect,
  641. .getgeo = rfd_ftl_getgeo,
  642. .add_mtd = rfd_ftl_add_mtd,
  643. .remove_dev = rfd_ftl_remove_dev,
  644. .owner = THIS_MODULE,
  645. };
  646. static int __init init_rfd_ftl(void)
  647. {
  648. return register_mtd_blktrans(&rfd_ftl_tr);
  649. }
  650. static void __exit cleanup_rfd_ftl(void)
  651. {
  652. deregister_mtd_blktrans(&rfd_ftl_tr);
  653. }
  654. module_init(init_rfd_ftl);
  655. module_exit(cleanup_rfd_ftl);
  656. MODULE_LICENSE("GPL");
  657. MODULE_AUTHOR("Sean Young <sean@mess.org>");
  658. MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
  659. "used by General Software's Embedded BIOS");