card_block.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. /*
  2. * Block driver for media (i.e., flash cards)
  3. */
  4. #include <linux/moduleparam.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/sched.h>
  8. #include <linux/kernel.h>
  9. #include <linux/fs.h>
  10. #include <linux/errno.h>
  11. #include <linux/hdreg.h>
  12. #include <linux/kdev_t.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/mutex.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/err.h>
  17. #include <linux/mtd/mtd.h>
  18. #include <linux/mtd/partitions.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/genhd.h>
  21. #include <linux/kthread.h>
  22. #include <linux/cardreader/card_block.h>
  23. #include <linux/cardreader/cardreader.h>
  24. #include <asm/system.h>
  25. #include <asm/uaccess.h>
  26. static int major;
  27. #define CARD_SHIFT 4
  28. #define CARD_QUEUE_EXIT (1 << 0)
  29. #define CARD_QUEUE_SUSPENDED (1 << 1)
  30. #define CARD_QUEUE_BOUNCESZ (512*256)
  31. #define CARD_NUM_MINORS (256 >> CARD_SHIFT)
  32. //static unsigned long dev_use[CARD_NUM_MINORS / (8 * sizeof(unsigned long))];
  33. static unsigned long dev_use[1];
  34. #define CARD_INAND_START_MINOR 40
  35. #define MAX_MTD_DEVICES 32
  36. static int card_blk_issue_rq(struct card_queue *cq, struct request *req);
  37. static int card_blk_probe(struct memory_card *card);
  38. static int card_blk_prep_rq(struct card_queue *cq, struct request *req);
  39. void card_queue_resume(struct card_queue *cq);
  40. struct card_blk_data {
  41. spinlock_t lock;
  42. struct gendisk *disk;
  43. struct card_queue queue;
  44. unsigned int usage;
  45. unsigned int block_bits;
  46. unsigned int read_only;
  47. };
  48. static DEFINE_MUTEX(open_lock);
  49. /*wait device delete*/
  50. struct completion card_devdel_comp;
  51. /*sdio irq flag*/
  52. unsigned char sdio_irq_handled=0;
  53. struct card_queue_list {
  54. int cq_num;
  55. unsigned cq_flag;
  56. struct card_queue *cq;
  57. struct card_queue_list *cq_next;
  58. };
  59. void card_cleanup_queue(struct card_queue *cq)
  60. {
  61. struct request_queue *q = cq->queue;
  62. unsigned long flags;
  63. card_queue_resume(cq);
  64. /* Then terminate our worker thread */
  65. kthread_stop(cq->thread);
  66. /* Empty the queue */
  67. spin_lock_irqsave(q->queue_lock, flags);
  68. q->queuedata = NULL;
  69. blk_start_queue(q);
  70. spin_unlock_irqrestore(q->queue_lock, flags);
  71. if (cq->bounce_sg)
  72. kfree(cq->bounce_sg);
  73. cq->bounce_sg = NULL;
  74. if (cq->sg)
  75. kfree(cq->sg);
  76. cq->sg = NULL;
  77. //if (cq->bounce_buf)
  78. // kfree(cq->bounce_buf);
  79. cq->bounce_buf = NULL;
  80. cq->card = NULL;
  81. }
  82. static struct card_blk_data *card_blk_get(struct gendisk *disk)
  83. {
  84. struct card_blk_data *card_data;
  85. mutex_lock(&open_lock);
  86. card_data = disk->private_data;
  87. if (card_data && card_data->usage == 0)
  88. card_data = NULL;
  89. if (card_data)
  90. card_data->usage++;
  91. mutex_unlock(&open_lock);
  92. return card_data;
  93. }
  94. static void card_blk_put(struct card_blk_data *card_data)
  95. {
  96. mutex_lock(&open_lock);
  97. card_data->usage--;
  98. if (card_data->usage == 0) {
  99. put_disk(card_data->disk);
  100. //card_cleanup_queue(&card_data->queue);
  101. blk_cleanup_queue(card_data->queue.queue);
  102. card_data->disk->queue = NULL;
  103. kfree(card_data);
  104. complete(&card_devdel_comp);
  105. }
  106. mutex_unlock(&open_lock);
  107. }
  108. static int card_blk_open(struct block_device *bdev, fmode_t mode)
  109. {
  110. struct card_blk_data *card_data;
  111. int ret = -ENXIO;
  112. card_data = card_blk_get(bdev->bd_disk);
  113. if (card_data) {
  114. if (card_data->usage == 2)
  115. check_disk_change(bdev);
  116. ret = 0;
  117. /*
  118. * it would return -EROFS when FS/USB open card with O_RDWR.
  119. * set sd_mmc_info->write_protected_flag in func sd_mmc_check_wp
  120. * set card->state |= CARD_STATE_READONLY in func sd_open
  121. * set card_data->read_only = 1 in func card_blk_alloc
  122. */
  123. if ((mode & FMODE_WRITE) && card_data->read_only){
  124. card_blk_put(bdev->bd_disk->private_data);
  125. ret = -EROFS;
  126. }
  127. }
  128. return ret;
  129. }
  130. static int card_blk_release(struct gendisk *disk, fmode_t mode)
  131. {
  132. struct card_blk_data *card_data = disk->private_data;
  133. card_blk_put(card_data);
  134. return 0;
  135. }
  136. static int card_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  137. {
  138. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  139. geo->heads = 4;
  140. geo->sectors = 16;
  141. return 0;
  142. }
  143. static struct block_device_operations card_ops = {
  144. .open = card_blk_open,
  145. .release = card_blk_release,
  146. .getgeo = card_blk_getgeo,
  147. .owner = THIS_MODULE,
  148. };
  149. static inline int card_claim_card(struct memory_card *card)
  150. {
  151. if(card->card_status == CARD_REMOVED)
  152. return -ENODEV;
  153. return __card_claim_host(card->host, card);
  154. }
  155. static int card_prep_request(struct request_queue *q, struct request *req)
  156. {
  157. struct card_queue *cq = q->queuedata;
  158. int ret = BLKPREP_KILL;
  159. if (!cq) {
  160. //printk(KERN_ERR "[card_prep_request] %s: killing request - no device/host\n", req->rq_disk->disk_name);
  161. return BLKPREP_KILL;
  162. }
  163. if (blk_special_request(req)) {
  164. /*
  165. * Special commands already have the command
  166. * blocks already setup in req->special.
  167. */
  168. BUG_ON(!req->special);
  169. ret = BLKPREP_OK;
  170. } else if (blk_fs_request(req) || blk_pc_request(req)) {
  171. /*
  172. * Block I/O requests need translating according
  173. * to the protocol.
  174. */
  175. ret = cq->prep_fn(cq, req);
  176. } else {
  177. /*
  178. * Everything else is invalid.
  179. */
  180. blk_dump_rq_flags(req, "CARD bad request");
  181. }
  182. if (ret == BLKPREP_OK)
  183. req->cmd_flags |= REQ_DONTPREP;
  184. return ret;
  185. }
  186. static void card_request(struct request_queue *q)
  187. {
  188. struct card_queue *cq = q->queuedata;
  189. struct request* req;
  190. if (!cq) {
  191. while ((req = blk_fetch_request(q)) != NULL) {
  192. req->cmd_flags |= REQ_QUIET;
  193. __blk_end_request_all(req, -EIO);
  194. }
  195. return;
  196. }
  197. if (!cq->req) {
  198. wake_up_process(cq->thread);
  199. }
  200. }
  201. void card_queue_suspend(struct card_queue *cq)
  202. {
  203. struct request_queue *q = cq->queue;
  204. unsigned long flags;
  205. if (!(cq->flags & CARD_QUEUE_SUSPENDED)) {
  206. cq->flags |= CARD_QUEUE_SUSPENDED;
  207. spin_lock_irqsave(q->queue_lock, flags);
  208. blk_stop_queue(q);
  209. spin_unlock_irqrestore(q->queue_lock, flags);
  210. down(&cq->thread_sem);
  211. }
  212. }
  213. void card_queue_resume(struct card_queue *cq)
  214. {
  215. struct request_queue *q = cq->queue;
  216. unsigned long flags;
  217. if (cq->flags & CARD_QUEUE_SUSPENDED) {
  218. cq->flags &= ~CARD_QUEUE_SUSPENDED;
  219. up(&cq->thread_sem);
  220. spin_lock_irqsave(q->queue_lock, flags);
  221. blk_start_queue(q);
  222. spin_unlock_irqrestore(q->queue_lock, flags);
  223. }
  224. }
  225. static int card_queue_thread(void *d)
  226. {
  227. struct card_queue *cq = d;
  228. struct request_queue *q = cq->queue;
  229. // unsigned char rewait;
  230. /*
  231. * Set iothread to ensure that we aren't put to sleep by
  232. * the process freezing. We handle suspension ourselves.
  233. */
  234. current->flags |= PF_MEMALLOC;
  235. down(&cq->thread_sem);
  236. do {
  237. struct request *req = NULL;
  238. /*wait sdio handle irq & xfer data*/
  239. //for(rewait=3;(!sdio_irq_handled)&&(rewait--);)
  240. // schedule();
  241. spin_lock_irq(q->queue_lock);
  242. set_current_state(TASK_INTERRUPTIBLE);
  243. q = cq->queue;
  244. if (!blk_queue_plugged(q)) {
  245. req = blk_fetch_request(q);
  246. }
  247. cq->req = req;
  248. spin_unlock_irq(q->queue_lock);
  249. if (!req) {
  250. if (kthread_should_stop()) {
  251. set_current_state(TASK_RUNNING);
  252. break;
  253. }
  254. up(&cq->thread_sem);
  255. schedule();
  256. down(&cq->thread_sem);
  257. continue;
  258. }
  259. set_current_state(TASK_RUNNING);
  260. cq->issue_fn(cq, req);
  261. cond_resched();
  262. } while (1);
  263. up(&cq->thread_sem);
  264. return 0;
  265. }
  266. #define CONFIG_CARD_BLOCK_BOUNCE 1
  267. #ifdef CONFIG_CARD_BLOCK_BOUNCE
  268. /*
  269. * Prepare the sg list(s) to be handed of to the host driver
  270. */
  271. static unsigned int card_queue_map_sg(struct card_queue *cq)
  272. {
  273. unsigned int sg_len;
  274. size_t buflen;
  275. struct scatterlist *sg;
  276. int i;
  277. if (!cq->bounce_buf)
  278. return blk_rq_map_sg(cq->queue, cq->req, cq->sg);
  279. BUG_ON(!cq->bounce_sg);
  280. sg_len = blk_rq_map_sg(cq->queue, cq->req, cq->bounce_sg);
  281. cq->bounce_sg_len = sg_len;
  282. buflen = 0;
  283. for_each_sg(cq->bounce_sg, sg, sg_len, i)
  284. buflen += sg->length;
  285. sg_init_one(cq->sg, cq->bounce_buf, buflen);
  286. return 1;
  287. }
  288. /*
  289. * If writing, bounce the data to the buffer before the request
  290. * is sent to the host driver
  291. */
  292. static void card_queue_bounce_pre(struct card_queue *cq)
  293. {
  294. unsigned long flags;
  295. if (!cq->bounce_buf)
  296. return;
  297. if (rq_data_dir(cq->req) != WRITE)
  298. return;
  299. local_irq_save(flags);
  300. sg_copy_to_buffer(cq->bounce_sg, cq->bounce_sg_len,
  301. cq->bounce_buf, cq->sg[0].length);
  302. local_irq_restore(flags);
  303. }
  304. /*
  305. * If reading, bounce the data from the buffer after the request
  306. * has been handled by the host driver
  307. */
  308. static void card_queue_bounce_post(struct card_queue *cq)
  309. {
  310. unsigned long flags;
  311. if (!cq->bounce_buf)
  312. return;
  313. if (rq_data_dir(cq->req) != READ)
  314. return;
  315. local_irq_save(flags);
  316. sg_copy_from_buffer(cq->bounce_sg, cq->bounce_sg_len,
  317. cq->bounce_buf, cq->sg[0].length);
  318. local_irq_restore(flags);
  319. bio_flush_dcache_pages(cq->req->bio);
  320. }
  321. /*
  322. * Alloc bounce buf for read/write numbers of pages in one request
  323. */
  324. static int card_init_bounce_buf(struct card_queue *cq,
  325. struct memory_card *card)
  326. {
  327. int ret=0;
  328. struct card_host *host = card->host;
  329. unsigned int bouncesz;
  330. bouncesz = CARD_QUEUE_BOUNCESZ;
  331. if (bouncesz > host->max_req_size)
  332. bouncesz = host->max_req_size;
  333. if (bouncesz >= PAGE_CACHE_SIZE) {
  334. //cq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
  335. cq->bounce_buf = host->dma_buf;
  336. if (!cq->bounce_buf) {
  337. printk(KERN_WARNING "%s: unable to "
  338. "allocate bounce buffer\n", card->name);
  339. }
  340. }
  341. if (cq->bounce_buf) {
  342. blk_queue_bounce_limit(cq->queue, BLK_BOUNCE_HIGH);
  343. blk_queue_max_hw_sectors(cq->queue, bouncesz / 512);
  344. blk_queue_physical_block_size(cq->queue, bouncesz);
  345. blk_queue_max_segments(cq->queue, bouncesz / PAGE_CACHE_SIZE);
  346. blk_queue_max_segment_size(cq->queue, bouncesz);
  347. cq->queue->queuedata = cq;
  348. cq->req = NULL;
  349. cq->sg = kmalloc(sizeof(struct scatterlist),
  350. GFP_KERNEL);
  351. if (!cq->sg) {
  352. ret = -ENOMEM;
  353. blk_cleanup_queue(cq->queue);
  354. return ret;
  355. }
  356. sg_init_table(cq->sg, 1);
  357. cq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
  358. bouncesz / PAGE_CACHE_SIZE, GFP_KERNEL);
  359. if (!cq->bounce_sg) {
  360. ret = -ENOMEM;
  361. kfree(cq->sg);
  362. cq->sg = NULL;
  363. blk_cleanup_queue(cq->queue);
  364. return ret;
  365. }
  366. sg_init_table(cq->bounce_sg, bouncesz / PAGE_CACHE_SIZE);
  367. }
  368. return 0;
  369. }
  370. #else
  371. static unsigned int card_queue_map_sg(struct card_queue *cq)
  372. {
  373. }
  374. static void card_queue_bounce_pre(struct card_queue *cq)
  375. {
  376. }
  377. static void card_queue_bounce_post(struct card_queue *cq)
  378. {
  379. }
  380. static int card_init_bounce_buf(struct card_queue *cq,
  381. struct memory_card *card)
  382. {
  383. }
  384. #endif
  385. int card_init_queue(struct card_queue *cq, struct memory_card *card,
  386. spinlock_t * lock)
  387. {
  388. struct card_host *host = card->host;
  389. u64 limit = BLK_BOUNCE_HIGH;
  390. int ret=0;
  391. if (host->parent->dma_mask && *host->parent->dma_mask)
  392. limit = *host->parent->dma_mask;
  393. cq->card = card;
  394. cq->queue = blk_init_queue(card_request, lock);
  395. if (!cq->queue)
  396. return -ENOMEM;
  397. blk_queue_prep_rq(cq->queue, card_prep_request);
  398. card_init_bounce_buf(cq, card);
  399. if(!cq->bounce_buf){
  400. blk_queue_bounce_limit(cq->queue, limit);
  401. blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
  402. //blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
  403. blk_queue_max_segments(cq->queue, host->max_hw_segs);
  404. blk_queue_max_segment_size(cq->queue, host->max_seg_size);
  405. cq->queue->queuedata = cq;
  406. cq->req = NULL;
  407. cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
  408. if (!cq->sg) {
  409. ret = -ENOMEM;
  410. blk_cleanup_queue(cq->queue);
  411. return ret;
  412. }
  413. }
  414. /*change card io scheduler from cfq to deadline*/
  415. cq->queue->queuedata = cq;
  416. elevator_exit(cq->queue->elevator);
  417. cq->queue->elevator = NULL;
  418. ret = elevator_init(cq->queue, "deadline");
  419. if (ret) {
  420. printk("[card_init_queue] elevator_init deadline fail\n");
  421. blk_cleanup_queue(cq->queue);
  422. return ret;
  423. }
  424. init_MUTEX(&cq->thread_sem);
  425. cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name);
  426. if (IS_ERR(cq->thread)) {
  427. ret = PTR_ERR(cq->thread);
  428. //goto free_bounce_sg;
  429. }
  430. return ret;
  431. }
  432. static struct card_blk_data *card_blk_alloc(struct memory_card *card)
  433. {
  434. struct card_blk_data *card_data;
  435. int devidx, ret;
  436. devidx = find_first_zero_bit(dev_use, CARD_NUM_MINORS);
  437. if(card->card_type == CARD_INAND)
  438. devidx = CARD_INAND_START_MINOR>>CARD_SHIFT;
  439. if (devidx >= CARD_NUM_MINORS)
  440. return ERR_PTR(-ENOSPC);
  441. __set_bit(devidx, dev_use);
  442. card_data = kmalloc(sizeof(struct card_blk_data), GFP_KERNEL);
  443. if (!card_data) {
  444. ret = -ENOMEM;
  445. return ERR_PTR(ret);
  446. }
  447. memset(card_data, 0, sizeof(struct card_blk_data));
  448. if(card->state & CARD_STATE_READONLY)
  449. card_data->read_only = 1;
  450. card_data->block_bits = 9;
  451. card_data->disk = alloc_disk(1 << CARD_SHIFT);
  452. if (card_data->disk == NULL) {
  453. ret = -ENOMEM;
  454. kfree(card_data);
  455. return ERR_PTR(ret);
  456. }
  457. spin_lock_init(&card_data->lock);
  458. card_data->usage = 1;
  459. ret = card_init_queue(&card_data->queue, card, &card_data->lock);
  460. if (ret) {
  461. put_disk(card_data->disk);
  462. return ERR_PTR(ret);
  463. }
  464. card_data->queue.prep_fn = card_blk_prep_rq;
  465. card_data->queue.issue_fn = card_blk_issue_rq;
  466. card_data->queue.data = card_data;
  467. card_data->disk->major = major;
  468. card_data->disk->minors = 1 << CARD_SHIFT;
  469. card_data->disk->first_minor = devidx << CARD_SHIFT;
  470. card_data->disk->fops = &card_ops;
  471. card_data->disk->private_data = card_data;
  472. card_data->disk->queue = card_data->queue.queue;
  473. card_data->disk->driverfs_dev = &card->dev;
  474. sprintf(card_data->disk->disk_name, "cardblk%s", card->name);
  475. blk_queue_logical_block_size(card_data->queue.queue, 1 << card_data->block_bits);
  476. set_capacity(card_data->disk, card->capacity);
  477. return card_data;
  478. }
  479. static int card_blk_prep_rq(struct card_queue *cq, struct request *req)
  480. {
  481. struct card_blk_data *card_data = cq->data;
  482. int stat = BLKPREP_OK;
  483. WARN_ON(!cq->queue->queuedata);
  484. /*
  485. * If we have no device, we haven't finished initialising.
  486. */
  487. if (!card_data || !cq->card || !cq->queue->queuedata) {
  488. printk(KERN_ERR "%s: killing request - no device/host\n", req->rq_disk->disk_name);
  489. stat = BLKPREP_KILL;
  490. }
  491. return stat;
  492. }
  493. static int card_blk_issue_rq(struct card_queue *cq, struct request *req)
  494. {
  495. struct card_blk_data *card_data = cq->data;
  496. struct memory_card *card = card_data->queue.card;
  497. struct card_blk_request brq;
  498. int ret;
  499. if (card_claim_card(card)) {
  500. spin_lock_irq(&card_data->lock);
  501. ret = 1;
  502. while (ret) {
  503. req->cmd_flags |= REQ_QUIET;
  504. ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
  505. }
  506. spin_unlock_irq(&card_data->lock);
  507. return 0;
  508. }
  509. do {
  510. brq.crq.cmd = rq_data_dir(req);
  511. brq.crq.buf = cq->bounce_buf;
  512. // brq.crq.buf = req->buffer;
  513. brq.card_data.lba = blk_rq_pos(req);
  514. brq.card_data.blk_size = 1 << card_data->block_bits;
  515. brq.card_data.blk_nums = blk_rq_sectors(req);
  516. brq.card_data.sg = cq->sg;
  517. brq.card_data.sg_len = card_queue_map_sg(cq);
  518. //brq.card_data.sg_len = blk_rq_map_sg(req->q, req, brq.card_data.sg);
  519. card->host->card_type = card->card_type;
  520. card_queue_bounce_pre(cq);
  521. card_wait_for_req(card->host, &brq);
  522. card_queue_bounce_post(cq);
  523. /*
  524. *the request issue failed
  525. */
  526. if (brq.card_data.error) {
  527. card_release_host(card->host);
  528. spin_lock_irq(&card_data->lock);
  529. ret = 1;
  530. while (ret) {
  531. req->cmd_flags |= REQ_QUIET;
  532. ret = __blk_end_request(req, -EIO, (1 << card_data->block_bits));
  533. }
  534. spin_unlock_irq(&card_data->lock);
  535. /*add_disk_randomness(req->rq_disk);
  536. blkdev_dequeue_request(req);
  537. end_that_request_last(req, 0);
  538. spin_unlock_irq(&card_data->lock); */
  539. return 0;
  540. }
  541. /*
  542. * A block was successfully transferred.
  543. */
  544. spin_lock_irq(&card_data->lock);
  545. brq.card_data.bytes_xfered = brq.card_data.blk_size * brq.card_data.blk_nums;
  546. ret = __blk_end_request(req, 0, brq.card_data.bytes_xfered);
  547. //if(!ret)
  548. //{
  549. /*
  550. * The whole request completed successfully.
  551. */
  552. /*add_disk_randomness(req->rq_disk);
  553. blkdev_dequeue_request(req);
  554. end_that_request_last(req, 1);
  555. } */
  556. spin_unlock_irq(&card_data->lock);
  557. } while (ret);
  558. card_release_host(card->host);
  559. //printk("card request completely %d sector num: %d communiction dir %d\n", brq.card_data.lba, brq.card_data.blk_nums, brq.crq.cmd);
  560. return 1;
  561. }
  562. static void card_blk_remove(struct memory_card *card)
  563. {
  564. struct card_blk_data *card_data = card_get_drvdata(card);
  565. if (card_data) {
  566. int devidx;
  567. del_gendisk(card_data->disk);
  568. /*
  569. * I think this is needed.
  570. */
  571. //queue_flag_set_unlocked(QUEUE_FLAG_DEAD, card_data->queue.queue);
  572. //queue_flag_set_unlocked(QUEUE_FLAG_STOPPED, card_data->queue.queue);
  573. //card_data->queue.queue->queuedata = NULL;
  574. card_cleanup_queue(&card_data->queue);
  575. //card_data->disk->queue = NULL;
  576. devidx = card_data->disk->first_minor >> CARD_SHIFT;
  577. __clear_bit(devidx, dev_use);
  578. card_blk_put(card_data);
  579. }
  580. card_set_drvdata(card, NULL);
  581. }
  582. #ifdef CONFIG_PM
  583. static int card_blk_suspend(struct memory_card *card, pm_message_t state)
  584. {
  585. struct card_blk_data *card_data = card_get_drvdata(card);
  586. struct card_host *host = card->host;
  587. if (card_data)
  588. {
  589. card_queue_suspend(&card_data->queue);
  590. }
  591. if(!host->sdio_task_state)
  592. {
  593. host->sdio_task_state = 1;
  594. }
  595. if(!host->card_task_state)
  596. {
  597. host->card_task_state = 1;
  598. }
  599. if(card->card_suspend)
  600. {
  601. card->card_suspend(card);
  602. }
  603. if(card->card_type == CARD_SDIO)
  604. return 0;
  605. card->unit_state = CARD_UNIT_RESUMED;
  606. return 0;
  607. }
  608. static int card_blk_resume(struct memory_card *card)
  609. {
  610. struct card_blk_data *card_data = card_get_drvdata(card);
  611. struct card_host *host = card->host;
  612. if(card->card_resume)
  613. {
  614. card->card_resume(card);
  615. }
  616. if(host->card_task_state)
  617. {
  618. host->card_task_state = 0;
  619. if(host->card_task)
  620. wake_up_process(host->card_task);
  621. }
  622. if((host->sdio_task_state)&&((card->card_type == CARD_SDIO)))
  623. {
  624. host->sdio_task_state = 0;
  625. if(host->sdio_irq_thread)
  626. wake_up_process(host->sdio_irq_thread);
  627. }
  628. if (card_data) {
  629. //mmc_blk_set_blksize(md, card);
  630. card_queue_resume(&card_data->queue);
  631. }
  632. return 0;
  633. }
  634. #else
  635. #define card_blk_suspend NULL
  636. #define card_blk_resume NULL
  637. #endif
  638. #ifdef CONFIG_PROC_FS
  639. /*====================================================================*/
  640. /* Support for /proc/mtd */
  641. static struct proc_dir_entry *proc_card;
  642. struct mtd_partition *card_table[MAX_MTD_DEVICES];
  643. static inline int card_proc_info (char *buf, char* dev_name, int i)
  644. {
  645. struct mtd_partition *this = card_table[i];
  646. if (!this)
  647. return 0;
  648. return sprintf(buf, "%s%d: %8.8llx %8.8x \"%s\"\n", dev_name,
  649. i+1,(unsigned long long)this->size,
  650. CARD_QUEUE_BOUNCESZ, this->name);
  651. }
  652. static int card_read_proc (char *page, char **start, off_t off, int count,
  653. int *eof, void *data_unused)
  654. {
  655. int len, l, i;
  656. off_t begin = 0;
  657. len = sprintf(page, "dev: size erasesize name\n");
  658. for (i=0; i< MAX_MTD_DEVICES; i++) {
  659. l = card_proc_info(page + len, "inand", i);
  660. len += l;
  661. if (len+begin > off+count)
  662. goto done;
  663. if (len+begin < off) {
  664. begin += len;
  665. len = 0;
  666. }
  667. }
  668. *eof = 1;
  669. done:
  670. if (off >= len+begin)
  671. return 0;
  672. *start = page + (off-begin);
  673. return ((count < begin+len-off) ? count : begin+len-off);
  674. }
  675. #endif /* CONFIG_PROC_FS */
  676. #ifdef CONFIG_INAND_LP
  677. #define INAND_LAST_PART_MAJOR 202
  678. /**
  679. * add_last_partition : add card last partition as a full device, refer to
  680. * board-****.c inand_partition_info[] last partition
  681. * @card: inand_card_lp
  682. * @size: set last partition capacity
  683. */
  684. int add_last_partition(struct memory_card* card, uint64_t offset ,uint64_t size)
  685. {
  686. struct card_blk_data *card_data;
  687. int ret;
  688. card_data = kmalloc(sizeof(struct card_blk_data), GFP_KERNEL);
  689. if (!card_data) {
  690. ret = -ENOMEM;
  691. return ret;
  692. }
  693. memset(card_data, 0, sizeof(struct card_blk_data));
  694. if(card->state & CARD_STATE_READONLY)
  695. card_data->read_only = 1;
  696. card_data->block_bits = 9;
  697. card_data->disk = alloc_disk(1 << CARD_SHIFT);
  698. if (card_data->disk == NULL) {
  699. ret = -ENOMEM;
  700. kfree(card_data);
  701. return ret;
  702. }
  703. spin_lock_init(&card_data->lock);
  704. card_data->usage = 1;
  705. ret = card_init_queue(&card_data->queue, card, &card_data->lock);
  706. if (ret) {
  707. put_disk(card_data->disk);
  708. return ret;
  709. }
  710. card->part_offset=offset;
  711. card_data->queue.prep_fn = card_blk_prep_rq;
  712. card_data->queue.issue_fn = card_blk_issue_rq;
  713. card_data->queue.data = card_data;
  714. card_data->disk->major = INAND_LAST_PART_MAJOR;
  715. card_data->disk->minors = 1 << CARD_SHIFT;
  716. card_data->disk->first_minor = 0;
  717. card_data->disk->fops = &card_ops;
  718. card_data->disk->private_data = card_data;
  719. card_data->disk->queue = card_data->queue.queue;
  720. card_data->disk->driverfs_dev = &card->dev;
  721. sprintf(card_data->disk->disk_name, "cardblk%s", card->name);
  722. blk_queue_logical_block_size(card_data->queue.queue, 1 << card_data->block_bits);
  723. set_capacity(card_data->disk, size);
  724. card_set_drvdata(card, card_data);
  725. add_disk(card_data->disk);
  726. return 0;
  727. }
  728. int card_init_inand_lp(struct memory_card* card)
  729. {
  730. struct aml_card_info *pinfo = card->card_plat_info;
  731. struct mtd_partition * part = pinfo->partitions;
  732. int i, err=0, nr_part = pinfo->nr_partitions;
  733. uint64_t offset=0, size, cur_offset=0;
  734. for(i=0; i<nr_part; i++)
  735. {
  736. if (part[i].size == MTDPART_SIZ_FULL)
  737. {
  738. /*
  739. add last partition as a full device for fdisk error 22,
  740. and register a new card in bsp
  741. */
  742. if(part[i].offset == MTDPART_OFS_APPEND)
  743. size = card->capacity- cur_offset;
  744. else
  745. size = card->capacity - part[i].offset;
  746. printk("[%s] (sectors) capacity %d, offset %lld, size%lld\n",
  747. card->name, card->capacity, offset, size);
  748. err = add_last_partition(card, cur_offset, size);
  749. }
  750. else{
  751. offset = part[i].offset>>9;
  752. size = part[i].size>>9;
  753. cur_offset = offset + size;
  754. }
  755. }
  756. return err;
  757. }
  758. void card_remove_inand_lp(struct card_host* host)
  759. {
  760. struct card_blk_data* card_data=card_get_drvdata(host->card);
  761. del_gendisk(card_data->disk);
  762. put_disk(card_data->disk);
  763. card_remove_card(host->card);
  764. host->card = NULL;
  765. }
  766. #else
  767. int card_init_inand_lp(struct memory_card* card)
  768. {
  769. return 0;
  770. }
  771. void card_remove_inand_lp(struct card_host* host)
  772. {
  773. return;
  774. }
  775. #endif
  776. /**
  777. * add_card_partition : add card partition , refer to
  778. * board-****.c inand_partition_info[]
  779. * @disk: add partitions in which disk
  780. * @part: partition table
  781. * @nr_part: partition numbers
  782. */
  783. int add_card_partition(struct memory_card* card, struct gendisk * disk,
  784. struct mtd_partition * part, unsigned int nr_part)
  785. {
  786. unsigned int i;
  787. struct hd_struct * ret=NULL;
  788. uint64_t cur_offset=0;
  789. uint64_t offset, size;
  790. if(!part)
  791. return 0;
  792. for(i=0; i<nr_part; i++){
  793. offset = part[i].offset>>9;
  794. size = part[i].size>>9;
  795. if (part[i].offset== MTDPART_OFS_APPEND)
  796. offset = cur_offset;
  797. if (part[i].size == MTDPART_SIZ_FULL)
  798. {
  799. size = disk->part0.nr_sects - offset;
  800. #ifdef CONFIG_INAND_LP
  801. printk("[%s%d] %20s offset 0x%012llx, len 0x%012llx %s\n",
  802. disk->disk_name, 1+i, part[i].name, offset<<9, size<<9,
  803. IS_ERR(ret) ? "add fail":"");
  804. break;
  805. #endif
  806. }
  807. ret = add_partition(disk, 1+i, offset, size, 0,NULL);//change by leo
  808. printk("[%s%d] %20s offset 0x%012llx, len 0x%012llx %s\n",
  809. disk->disk_name, 1+i, part[i].name, offset<<9, size<<9,
  810. IS_ERR(ret) ? "add fail":"");
  811. //if(IS_ERR(ret)){
  812. // printk("errno = %d, offset = %x, size = %x, disk->part0.nr_sects = %x\n", ret, offset, size);
  813. // return ERR_PTR(ret);
  814. //}
  815. cur_offset = offset + size;
  816. card_table[i] = &part[i];
  817. card_table[i]->offset = offset<<9;
  818. card_table[i]->size = size<<9;
  819. }
  820. #ifdef CONFIG_PROC_FS
  821. if (!proc_card && (proc_card = create_proc_entry( "inand", 0, NULL )))
  822. proc_card->read_proc = card_read_proc;
  823. #endif /* CONFIG_PROC_FS */
  824. return 0;
  825. }
  826. static int card_blk_probe(struct memory_card *card)
  827. {
  828. struct card_blk_data *card_data;
  829. struct aml_card_info *pinfo = card->card_plat_info;
  830. card_data = card_blk_alloc(card);
  831. if (IS_ERR(card_data))
  832. return PTR_ERR(card_data);
  833. card_set_drvdata(card, card_data);
  834. add_disk(card_data->disk);
  835. add_card_partition(card, card_data->disk, pinfo->partitions,
  836. pinfo->nr_partitions);
  837. return 0;
  838. }
  839. static struct card_driver card_driver = {
  840. .drv = {
  841. .name = "cardblk",
  842. },
  843. .probe = card_blk_probe,
  844. .remove = card_blk_remove,
  845. .suspend = card_blk_suspend,
  846. .resume = card_blk_resume,
  847. };
  848. static int __init card_blk_init(void)
  849. {
  850. int res = -ENOMEM;
  851. res = register_blkdev(major, "memorycard");
  852. if (res < 0) {
  853. printk(KERN_WARNING
  854. "Unable to get major %d for Memory Card media: %d\n",
  855. major, res);
  856. return res;
  857. }
  858. if (major == 0)
  859. major = res;
  860. printk(KERN_WARNING
  861. "Memory Card media Major: %d\n",
  862. major);
  863. return card_register_driver(&card_driver);
  864. }
  865. static void __exit card_blk_exit(void)
  866. {
  867. card_unregister_driver(&card_driver);
  868. unregister_blkdev(major, "memorycard");
  869. }
  870. module_init(card_blk_init);
  871. module_exit(card_blk_exit);
  872. MODULE_LICENSE("GPL");
  873. MODULE_DESCRIPTION("Memory Card block device driver");
  874. module_param(major, int, 0444);
  875. MODULE_PARM_DESC(major, "specify the major device number for Memory block driver");