xen-blkfront.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512
  1. /*
  2. * blkfront.c
  3. *
  4. * XenLinux virtual block device driver.
  5. *
  6. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  7. * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
  8. * Copyright (c) 2004, Christian Limpach
  9. * Copyright (c) 2004, Andrew Warfield
  10. * Copyright (c) 2005, Christopher Clark
  11. * Copyright (c) 2005, XenSource Ltd
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License version 2
  15. * as published by the Free Software Foundation; or, when distributed
  16. * separately from the Linux kernel or incorporated into other
  17. * software packages, subject to the following license:
  18. *
  19. * Permission is hereby granted, free of charge, to any person obtaining a copy
  20. * of this source file (the "Software"), to deal in the Software without
  21. * restriction, including without limitation the rights to use, copy, modify,
  22. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  23. * and to permit persons to whom the Software is furnished to do so, subject to
  24. * the following conditions:
  25. *
  26. * The above copyright notice and this permission notice shall be included in
  27. * all copies or substantial portions of the Software.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  30. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  31. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  32. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  33. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  34. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  35. * IN THE SOFTWARE.
  36. */
  37. #include <linux/interrupt.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/hdreg.h>
  40. #include <linux/cdrom.h>
  41. #include <linux/module.h>
  42. #include <linux/slab.h>
  43. #include <linux/mutex.h>
  44. #include <linux/scatterlist.h>
  45. #include <linux/bitmap.h>
  46. #include <xen/xen.h>
  47. #include <xen/xenbus.h>
  48. #include <xen/grant_table.h>
  49. #include <xen/events.h>
  50. #include <xen/page.h>
  51. #include <xen/platform_pci.h>
  52. #include <xen/interface/grant_table.h>
  53. #include <xen/interface/io/blkif.h>
  54. #include <xen/interface/io/protocols.h>
  55. #include <asm/xen/hypervisor.h>
  56. enum blkif_state {
  57. BLKIF_STATE_DISCONNECTED,
  58. BLKIF_STATE_CONNECTED,
  59. BLKIF_STATE_SUSPENDED,
  60. };
  61. struct blk_shadow {
  62. struct blkif_request req;
  63. struct request *request;
  64. unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  65. };
  66. static DEFINE_MUTEX(blkfront_mutex);
  67. static const struct block_device_operations xlvbd_block_fops;
  68. #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
  69. /*
  70. * We have one of these per vbd, whether ide, scsi or 'other'. They
  71. * hang in private_data off the gendisk structure. We may end up
  72. * putting all kinds of interesting stuff here :-)
  73. */
  74. struct blkfront_info
  75. {
  76. spinlock_t io_lock;
  77. struct mutex mutex;
  78. struct xenbus_device *xbdev;
  79. struct gendisk *gd;
  80. int vdevice;
  81. blkif_vdev_t handle;
  82. enum blkif_state connected;
  83. int ring_ref;
  84. struct blkif_front_ring ring;
  85. struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  86. unsigned int evtchn, irq;
  87. struct request_queue *rq;
  88. struct work_struct work;
  89. struct gnttab_free_callback callback;
  90. struct blk_shadow shadow[BLK_RING_SIZE];
  91. unsigned long shadow_free;
  92. unsigned int feature_flush;
  93. unsigned int flush_op;
  94. unsigned int feature_discard:1;
  95. unsigned int feature_secdiscard:1;
  96. unsigned int discard_granularity;
  97. unsigned int discard_alignment;
  98. int is_ready;
  99. };
  100. static unsigned int nr_minors;
  101. static unsigned long *minors;
  102. static DEFINE_SPINLOCK(minor_lock);
  103. #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
  104. (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
  105. #define GRANT_INVALID_REF 0
  106. #define PARTS_PER_DISK 16
  107. #define PARTS_PER_EXT_DISK 256
  108. #define BLKIF_MAJOR(dev) ((dev)>>8)
  109. #define BLKIF_MINOR(dev) ((dev) & 0xff)
  110. #define EXT_SHIFT 28
  111. #define EXTENDED (1<<EXT_SHIFT)
  112. #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
  113. #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
  114. #define EMULATED_HD_DISK_MINOR_OFFSET (0)
  115. #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
  116. #define EMULATED_SD_DISK_MINOR_OFFSET (0)
  117. #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
  118. #define DEV_NAME "xvd" /* name in /dev */
  119. static int get_id_from_freelist(struct blkfront_info *info)
  120. {
  121. unsigned long free = info->shadow_free;
  122. BUG_ON(free >= BLK_RING_SIZE);
  123. info->shadow_free = info->shadow[free].req.u.rw.id;
  124. info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
  125. return free;
  126. }
  127. static void add_id_to_freelist(struct blkfront_info *info,
  128. unsigned long id)
  129. {
  130. info->shadow[id].req.u.rw.id = info->shadow_free;
  131. info->shadow[id].request = NULL;
  132. info->shadow_free = id;
  133. }
  134. static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
  135. {
  136. unsigned int end = minor + nr;
  137. int rc;
  138. if (end > nr_minors) {
  139. unsigned long *bitmap, *old;
  140. bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
  141. GFP_KERNEL);
  142. if (bitmap == NULL)
  143. return -ENOMEM;
  144. spin_lock(&minor_lock);
  145. if (end > nr_minors) {
  146. old = minors;
  147. memcpy(bitmap, minors,
  148. BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
  149. minors = bitmap;
  150. nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
  151. } else
  152. old = bitmap;
  153. spin_unlock(&minor_lock);
  154. kfree(old);
  155. }
  156. spin_lock(&minor_lock);
  157. if (find_next_bit(minors, end, minor) >= end) {
  158. bitmap_set(minors, minor, nr);
  159. rc = 0;
  160. } else
  161. rc = -EBUSY;
  162. spin_unlock(&minor_lock);
  163. return rc;
  164. }
  165. static void xlbd_release_minors(unsigned int minor, unsigned int nr)
  166. {
  167. unsigned int end = minor + nr;
  168. BUG_ON(end > nr_minors);
  169. spin_lock(&minor_lock);
  170. bitmap_clear(minors, minor, nr);
  171. spin_unlock(&minor_lock);
  172. }
  173. static void blkif_restart_queue_callback(void *arg)
  174. {
  175. struct blkfront_info *info = (struct blkfront_info *)arg;
  176. schedule_work(&info->work);
  177. }
  178. static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
  179. {
  180. /* We don't have real geometry info, but let's at least return
  181. values consistent with the size of the device */
  182. sector_t nsect = get_capacity(bd->bd_disk);
  183. sector_t cylinders = nsect;
  184. hg->heads = 0xff;
  185. hg->sectors = 0x3f;
  186. sector_div(cylinders, hg->heads * hg->sectors);
  187. hg->cylinders = cylinders;
  188. if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
  189. hg->cylinders = 0xffff;
  190. return 0;
  191. }
  192. static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
  193. unsigned command, unsigned long argument)
  194. {
  195. struct blkfront_info *info = bdev->bd_disk->private_data;
  196. int i;
  197. dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
  198. command, (long)argument);
  199. switch (command) {
  200. case CDROMMULTISESSION:
  201. dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
  202. for (i = 0; i < sizeof(struct cdrom_multisession); i++)
  203. if (put_user(0, (char __user *)(argument + i)))
  204. return -EFAULT;
  205. return 0;
  206. case CDROM_GET_CAPABILITY: {
  207. struct gendisk *gd = info->gd;
  208. if (gd->flags & GENHD_FL_CD)
  209. return 0;
  210. return -EINVAL;
  211. }
  212. default:
  213. /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
  214. command);*/
  215. return -EINVAL; /* same return as native Linux */
  216. }
  217. return 0;
  218. }
  219. /*
  220. * Generate a Xen blkfront IO request from a blk layer request. Reads
  221. * and writes are handled as expected.
  222. *
  223. * @req: a request struct
  224. */
  225. static int blkif_queue_request(struct request *req)
  226. {
  227. struct blkfront_info *info = req->rq_disk->private_data;
  228. unsigned long buffer_mfn;
  229. struct blkif_request *ring_req;
  230. unsigned long id;
  231. unsigned int fsect, lsect;
  232. int i, ref;
  233. grant_ref_t gref_head;
  234. struct scatterlist *sg;
  235. if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
  236. return 1;
  237. if (gnttab_alloc_grant_references(
  238. BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
  239. gnttab_request_free_callback(
  240. &info->callback,
  241. blkif_restart_queue_callback,
  242. info,
  243. BLKIF_MAX_SEGMENTS_PER_REQUEST);
  244. return 1;
  245. }
  246. /* Fill out a communications ring structure. */
  247. ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
  248. id = get_id_from_freelist(info);
  249. info->shadow[id].request = req;
  250. ring_req->u.rw.id = id;
  251. ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
  252. ring_req->u.rw.handle = info->handle;
  253. ring_req->operation = rq_data_dir(req) ?
  254. BLKIF_OP_WRITE : BLKIF_OP_READ;
  255. if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
  256. /*
  257. * Ideally we can do an unordered flush-to-disk. In case the
  258. * backend onlysupports barriers, use that. A barrier request
  259. * a superset of FUA, so we can implement it the same
  260. * way. (It's also a FLUSH+FUA, since it is
  261. * guaranteed ordered WRT previous writes.)
  262. */
  263. ring_req->operation = info->flush_op;
  264. }
  265. if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
  266. /* id, sector_number and handle are set above. */
  267. ring_req->operation = BLKIF_OP_DISCARD;
  268. ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
  269. if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
  270. ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
  271. else
  272. ring_req->u.discard.flag = 0;
  273. } else {
  274. ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
  275. info->sg);
  276. BUG_ON(ring_req->u.rw.nr_segments >
  277. BLKIF_MAX_SEGMENTS_PER_REQUEST);
  278. for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
  279. buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
  280. fsect = sg->offset >> 9;
  281. lsect = fsect + (sg->length >> 9) - 1;
  282. /* install a grant reference. */
  283. ref = gnttab_claim_grant_reference(&gref_head);
  284. BUG_ON(ref == -ENOSPC);
  285. gnttab_grant_foreign_access_ref(
  286. ref,
  287. info->xbdev->otherend_id,
  288. buffer_mfn,
  289. rq_data_dir(req));
  290. info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
  291. ring_req->u.rw.seg[i] =
  292. (struct blkif_request_segment) {
  293. .gref = ref,
  294. .first_sect = fsect,
  295. .last_sect = lsect };
  296. }
  297. }
  298. info->ring.req_prod_pvt++;
  299. /* Keep a private copy so we can reissue requests when recovering. */
  300. info->shadow[id].req = *ring_req;
  301. gnttab_free_grant_references(gref_head);
  302. return 0;
  303. }
  304. static inline void flush_requests(struct blkfront_info *info)
  305. {
  306. int notify;
  307. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
  308. if (notify)
  309. notify_remote_via_irq(info->irq);
  310. }
  311. /*
  312. * do_blkif_request
  313. * read a block; request is in a request queue
  314. */
  315. static void do_blkif_request(struct request_queue *rq)
  316. {
  317. struct blkfront_info *info = NULL;
  318. struct request *req;
  319. int queued;
  320. pr_debug("Entered do_blkif_request\n");
  321. queued = 0;
  322. while ((req = blk_peek_request(rq)) != NULL) {
  323. info = req->rq_disk->private_data;
  324. if (RING_FULL(&info->ring))
  325. goto wait;
  326. blk_start_request(req);
  327. if ((req->cmd_type != REQ_TYPE_FS) ||
  328. ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
  329. !info->flush_op)) {
  330. __blk_end_request_all(req, -EIO);
  331. continue;
  332. }
  333. pr_debug("do_blk_req %p: cmd %p, sec %lx, "
  334. "(%u/%u) buffer:%p [%s]\n",
  335. req, req->cmd, (unsigned long)blk_rq_pos(req),
  336. blk_rq_cur_sectors(req), blk_rq_sectors(req),
  337. req->buffer, rq_data_dir(req) ? "write" : "read");
  338. if (blkif_queue_request(req)) {
  339. blk_requeue_request(rq, req);
  340. wait:
  341. /* Avoid pointless unplugs. */
  342. blk_stop_queue(rq);
  343. break;
  344. }
  345. queued++;
  346. }
  347. if (queued != 0)
  348. flush_requests(info);
  349. }
  350. static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
  351. {
  352. struct request_queue *rq;
  353. struct blkfront_info *info = gd->private_data;
  354. rq = blk_init_queue(do_blkif_request, &info->io_lock);
  355. if (rq == NULL)
  356. return -1;
  357. queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
  358. if (info->feature_discard) {
  359. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
  360. blk_queue_max_discard_sectors(rq, get_capacity(gd));
  361. rq->limits.discard_granularity = info->discard_granularity;
  362. rq->limits.discard_alignment = info->discard_alignment;
  363. if (info->feature_secdiscard)
  364. queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
  365. }
  366. /* Hard sector size and max sectors impersonate the equiv. hardware. */
  367. blk_queue_logical_block_size(rq, sector_size);
  368. blk_queue_max_hw_sectors(rq, 512);
  369. /* Each segment in a request is up to an aligned page in size. */
  370. blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
  371. blk_queue_max_segment_size(rq, PAGE_SIZE);
  372. /* Ensure a merged request will fit in a single I/O ring slot. */
  373. blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
  374. /* Make sure buffer addresses are sector-aligned. */
  375. blk_queue_dma_alignment(rq, 511);
  376. /* Make sure we don't use bounce buffers. */
  377. blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
  378. gd->queue = rq;
  379. return 0;
  380. }
  381. static void xlvbd_flush(struct blkfront_info *info)
  382. {
  383. blk_queue_flush(info->rq, info->feature_flush);
  384. printk(KERN_INFO "blkfront: %s: %s: %s\n",
  385. info->gd->disk_name,
  386. info->flush_op == BLKIF_OP_WRITE_BARRIER ?
  387. "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
  388. "flush diskcache" : "barrier or flush"),
  389. info->feature_flush ? "enabled" : "disabled");
  390. }
  391. static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
  392. {
  393. int major;
  394. major = BLKIF_MAJOR(vdevice);
  395. *minor = BLKIF_MINOR(vdevice);
  396. switch (major) {
  397. case XEN_IDE0_MAJOR:
  398. *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
  399. *minor = ((*minor / 64) * PARTS_PER_DISK) +
  400. EMULATED_HD_DISK_MINOR_OFFSET;
  401. break;
  402. case XEN_IDE1_MAJOR:
  403. *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
  404. *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
  405. EMULATED_HD_DISK_MINOR_OFFSET;
  406. break;
  407. case XEN_SCSI_DISK0_MAJOR:
  408. *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
  409. *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
  410. break;
  411. case XEN_SCSI_DISK1_MAJOR:
  412. case XEN_SCSI_DISK2_MAJOR:
  413. case XEN_SCSI_DISK3_MAJOR:
  414. case XEN_SCSI_DISK4_MAJOR:
  415. case XEN_SCSI_DISK5_MAJOR:
  416. case XEN_SCSI_DISK6_MAJOR:
  417. case XEN_SCSI_DISK7_MAJOR:
  418. *offset = (*minor / PARTS_PER_DISK) +
  419. ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
  420. EMULATED_SD_DISK_NAME_OFFSET;
  421. *minor = *minor +
  422. ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
  423. EMULATED_SD_DISK_MINOR_OFFSET;
  424. break;
  425. case XEN_SCSI_DISK8_MAJOR:
  426. case XEN_SCSI_DISK9_MAJOR:
  427. case XEN_SCSI_DISK10_MAJOR:
  428. case XEN_SCSI_DISK11_MAJOR:
  429. case XEN_SCSI_DISK12_MAJOR:
  430. case XEN_SCSI_DISK13_MAJOR:
  431. case XEN_SCSI_DISK14_MAJOR:
  432. case XEN_SCSI_DISK15_MAJOR:
  433. *offset = (*minor / PARTS_PER_DISK) +
  434. ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
  435. EMULATED_SD_DISK_NAME_OFFSET;
  436. *minor = *minor +
  437. ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
  438. EMULATED_SD_DISK_MINOR_OFFSET;
  439. break;
  440. case XENVBD_MAJOR:
  441. *offset = *minor / PARTS_PER_DISK;
  442. break;
  443. default:
  444. printk(KERN_WARNING "blkfront: your disk configuration is "
  445. "incorrect, please use an xvd device instead\n");
  446. return -ENODEV;
  447. }
  448. return 0;
  449. }
  450. static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
  451. struct blkfront_info *info,
  452. u16 vdisk_info, u16 sector_size)
  453. {
  454. struct gendisk *gd;
  455. int nr_minors = 1;
  456. int err;
  457. unsigned int offset;
  458. int minor;
  459. int nr_parts;
  460. BUG_ON(info->gd != NULL);
  461. BUG_ON(info->rq != NULL);
  462. if ((info->vdevice>>EXT_SHIFT) > 1) {
  463. /* this is above the extended range; something is wrong */
  464. printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
  465. return -ENODEV;
  466. }
  467. if (!VDEV_IS_EXTENDED(info->vdevice)) {
  468. err = xen_translate_vdev(info->vdevice, &minor, &offset);
  469. if (err)
  470. return err;
  471. nr_parts = PARTS_PER_DISK;
  472. } else {
  473. minor = BLKIF_MINOR_EXT(info->vdevice);
  474. nr_parts = PARTS_PER_EXT_DISK;
  475. offset = minor / nr_parts;
  476. if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
  477. printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
  478. "emulated IDE disks,\n\t choose an xvd device name"
  479. "from xvde on\n", info->vdevice);
  480. }
  481. err = -ENODEV;
  482. if ((minor % nr_parts) == 0)
  483. nr_minors = nr_parts;
  484. err = xlbd_reserve_minors(minor, nr_minors);
  485. if (err)
  486. goto out;
  487. err = -ENODEV;
  488. gd = alloc_disk(nr_minors);
  489. if (gd == NULL)
  490. goto release;
  491. if (nr_minors > 1) {
  492. if (offset < 26)
  493. sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
  494. else
  495. sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
  496. 'a' + ((offset / 26)-1), 'a' + (offset % 26));
  497. } else {
  498. if (offset < 26)
  499. sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
  500. 'a' + offset,
  501. minor & (nr_parts - 1));
  502. else
  503. sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
  504. 'a' + ((offset / 26) - 1),
  505. 'a' + (offset % 26),
  506. minor & (nr_parts - 1));
  507. }
  508. gd->major = XENVBD_MAJOR;
  509. gd->first_minor = minor;
  510. gd->fops = &xlvbd_block_fops;
  511. gd->private_data = info;
  512. gd->driverfs_dev = &(info->xbdev->dev);
  513. set_capacity(gd, capacity);
  514. if (xlvbd_init_blk_queue(gd, sector_size)) {
  515. del_gendisk(gd);
  516. goto release;
  517. }
  518. info->rq = gd->queue;
  519. info->gd = gd;
  520. xlvbd_flush(info);
  521. if (vdisk_info & VDISK_READONLY)
  522. set_disk_ro(gd, 1);
  523. if (vdisk_info & VDISK_REMOVABLE)
  524. gd->flags |= GENHD_FL_REMOVABLE;
  525. if (vdisk_info & VDISK_CDROM)
  526. gd->flags |= GENHD_FL_CD;
  527. return 0;
  528. release:
  529. xlbd_release_minors(minor, nr_minors);
  530. out:
  531. return err;
  532. }
  533. static void xlvbd_release_gendisk(struct blkfront_info *info)
  534. {
  535. unsigned int minor, nr_minors;
  536. unsigned long flags;
  537. if (info->rq == NULL)
  538. return;
  539. spin_lock_irqsave(&info->io_lock, flags);
  540. /* No more blkif_request(). */
  541. blk_stop_queue(info->rq);
  542. /* No more gnttab callback work. */
  543. gnttab_cancel_free_callback(&info->callback);
  544. spin_unlock_irqrestore(&info->io_lock, flags);
  545. /* Flush gnttab callback work. Must be done with no locks held. */
  546. flush_work_sync(&info->work);
  547. del_gendisk(info->gd);
  548. minor = info->gd->first_minor;
  549. nr_minors = info->gd->minors;
  550. xlbd_release_minors(minor, nr_minors);
  551. blk_cleanup_queue(info->rq);
  552. info->rq = NULL;
  553. put_disk(info->gd);
  554. info->gd = NULL;
  555. }
  556. static void kick_pending_request_queues(struct blkfront_info *info)
  557. {
  558. if (!RING_FULL(&info->ring)) {
  559. /* Re-enable calldowns. */
  560. blk_start_queue(info->rq);
  561. /* Kick things off immediately. */
  562. do_blkif_request(info->rq);
  563. }
  564. }
  565. static void blkif_restart_queue(struct work_struct *work)
  566. {
  567. struct blkfront_info *info = container_of(work, struct blkfront_info, work);
  568. spin_lock_irq(&info->io_lock);
  569. if (info->connected == BLKIF_STATE_CONNECTED)
  570. kick_pending_request_queues(info);
  571. spin_unlock_irq(&info->io_lock);
  572. }
  573. static void blkif_free(struct blkfront_info *info, int suspend)
  574. {
  575. /* Prevent new requests being issued until we fix things up. */
  576. spin_lock_irq(&info->io_lock);
  577. info->connected = suspend ?
  578. BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
  579. /* No more blkif_request(). */
  580. if (info->rq)
  581. blk_stop_queue(info->rq);
  582. /* No more gnttab callback work. */
  583. gnttab_cancel_free_callback(&info->callback);
  584. spin_unlock_irq(&info->io_lock);
  585. /* Flush gnttab callback work. Must be done with no locks held. */
  586. flush_work_sync(&info->work);
  587. /* Free resources associated with old device channel. */
  588. if (info->ring_ref != GRANT_INVALID_REF) {
  589. gnttab_end_foreign_access(info->ring_ref, 0,
  590. (unsigned long)info->ring.sring);
  591. info->ring_ref = GRANT_INVALID_REF;
  592. info->ring.sring = NULL;
  593. }
  594. if (info->irq)
  595. unbind_from_irqhandler(info->irq, info);
  596. info->evtchn = info->irq = 0;
  597. }
  598. static void blkif_completion(struct blk_shadow *s)
  599. {
  600. int i;
  601. /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
  602. * flag. */
  603. for (i = 0; i < s->req.u.rw.nr_segments; i++)
  604. gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
  605. }
  606. static irqreturn_t blkif_interrupt(int irq, void *dev_id)
  607. {
  608. struct request *req;
  609. struct blkif_response *bret;
  610. RING_IDX i, rp;
  611. unsigned long flags;
  612. struct blkfront_info *info = (struct blkfront_info *)dev_id;
  613. int error;
  614. spin_lock_irqsave(&info->io_lock, flags);
  615. if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
  616. spin_unlock_irqrestore(&info->io_lock, flags);
  617. return IRQ_HANDLED;
  618. }
  619. again:
  620. rp = info->ring.sring->rsp_prod;
  621. rmb(); /* Ensure we see queued responses up to 'rp'. */
  622. for (i = info->ring.rsp_cons; i != rp; i++) {
  623. unsigned long id;
  624. bret = RING_GET_RESPONSE(&info->ring, i);
  625. id = bret->id;
  626. req = info->shadow[id].request;
  627. if (bret->operation != BLKIF_OP_DISCARD)
  628. blkif_completion(&info->shadow[id]);
  629. add_id_to_freelist(info, id);
  630. error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
  631. switch (bret->operation) {
  632. case BLKIF_OP_DISCARD:
  633. if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
  634. struct request_queue *rq = info->rq;
  635. printk(KERN_WARNING "blkfront: %s: discard op failed\n",
  636. info->gd->disk_name);
  637. error = -EOPNOTSUPP;
  638. info->feature_discard = 0;
  639. info->feature_secdiscard = 0;
  640. queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
  641. queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
  642. }
  643. __blk_end_request_all(req, error);
  644. break;
  645. case BLKIF_OP_FLUSH_DISKCACHE:
  646. case BLKIF_OP_WRITE_BARRIER:
  647. if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
  648. printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
  649. info->flush_op == BLKIF_OP_WRITE_BARRIER ?
  650. "barrier" : "flush disk cache",
  651. info->gd->disk_name);
  652. error = -EOPNOTSUPP;
  653. }
  654. if (unlikely(bret->status == BLKIF_RSP_ERROR &&
  655. info->shadow[id].req.u.rw.nr_segments == 0)) {
  656. printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
  657. info->flush_op == BLKIF_OP_WRITE_BARRIER ?
  658. "barrier" : "flush disk cache",
  659. info->gd->disk_name);
  660. error = -EOPNOTSUPP;
  661. }
  662. if (unlikely(error)) {
  663. if (error == -EOPNOTSUPP)
  664. error = 0;
  665. info->feature_flush = 0;
  666. info->flush_op = 0;
  667. xlvbd_flush(info);
  668. }
  669. /* fall through */
  670. case BLKIF_OP_READ:
  671. case BLKIF_OP_WRITE:
  672. if (unlikely(bret->status != BLKIF_RSP_OKAY))
  673. dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
  674. "request: %x\n", bret->status);
  675. __blk_end_request_all(req, error);
  676. break;
  677. default:
  678. BUG();
  679. }
  680. }
  681. info->ring.rsp_cons = i;
  682. if (i != info->ring.req_prod_pvt) {
  683. int more_to_do;
  684. RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
  685. if (more_to_do)
  686. goto again;
  687. } else
  688. info->ring.sring->rsp_event = i + 1;
  689. kick_pending_request_queues(info);
  690. spin_unlock_irqrestore(&info->io_lock, flags);
  691. return IRQ_HANDLED;
  692. }
  693. static int setup_blkring(struct xenbus_device *dev,
  694. struct blkfront_info *info)
  695. {
  696. struct blkif_sring *sring;
  697. int err;
  698. info->ring_ref = GRANT_INVALID_REF;
  699. sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
  700. if (!sring) {
  701. xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  702. return -ENOMEM;
  703. }
  704. SHARED_RING_INIT(sring);
  705. FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
  706. sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
  707. err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
  708. if (err < 0) {
  709. free_page((unsigned long)sring);
  710. info->ring.sring = NULL;
  711. goto fail;
  712. }
  713. info->ring_ref = err;
  714. err = xenbus_alloc_evtchn(dev, &info->evtchn);
  715. if (err)
  716. goto fail;
  717. err = bind_evtchn_to_irqhandler(info->evtchn,
  718. blkif_interrupt,
  719. IRQF_SAMPLE_RANDOM, "blkif", info);
  720. if (err <= 0) {
  721. xenbus_dev_fatal(dev, err,
  722. "bind_evtchn_to_irqhandler failed");
  723. goto fail;
  724. }
  725. info->irq = err;
  726. return 0;
  727. fail:
  728. blkif_free(info, 0);
  729. return err;
  730. }
  731. /* Common code used when first setting up, and when resuming. */
  732. static int talk_to_blkback(struct xenbus_device *dev,
  733. struct blkfront_info *info)
  734. {
  735. const char *message = NULL;
  736. struct xenbus_transaction xbt;
  737. int err;
  738. /* Create shared ring, alloc event channel. */
  739. err = setup_blkring(dev, info);
  740. if (err)
  741. goto out;
  742. again:
  743. err = xenbus_transaction_start(&xbt);
  744. if (err) {
  745. xenbus_dev_fatal(dev, err, "starting transaction");
  746. goto destroy_blkring;
  747. }
  748. err = xenbus_printf(xbt, dev->nodename,
  749. "ring-ref", "%u", info->ring_ref);
  750. if (err) {
  751. message = "writing ring-ref";
  752. goto abort_transaction;
  753. }
  754. err = xenbus_printf(xbt, dev->nodename,
  755. "event-channel", "%u", info->evtchn);
  756. if (err) {
  757. message = "writing event-channel";
  758. goto abort_transaction;
  759. }
  760. err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
  761. XEN_IO_PROTO_ABI_NATIVE);
  762. if (err) {
  763. message = "writing protocol";
  764. goto abort_transaction;
  765. }
  766. err = xenbus_transaction_end(xbt, 0);
  767. if (err) {
  768. if (err == -EAGAIN)
  769. goto again;
  770. xenbus_dev_fatal(dev, err, "completing transaction");
  771. goto destroy_blkring;
  772. }
  773. xenbus_switch_state(dev, XenbusStateInitialised);
  774. return 0;
  775. abort_transaction:
  776. xenbus_transaction_end(xbt, 1);
  777. if (message)
  778. xenbus_dev_fatal(dev, err, "%s", message);
  779. destroy_blkring:
  780. blkif_free(info, 0);
  781. out:
  782. return err;
  783. }
  784. /**
  785. * Entry point to this code when a new device is created. Allocate the basic
  786. * structures and the ring buffer for communication with the backend, and
  787. * inform the backend of the appropriate details for those. Switch to
  788. * Initialised state.
  789. */
  790. static int blkfront_probe(struct xenbus_device *dev,
  791. const struct xenbus_device_id *id)
  792. {
  793. int err, vdevice, i;
  794. struct blkfront_info *info;
  795. /* FIXME: Use dynamic device id if this is not set. */
  796. err = xenbus_scanf(XBT_NIL, dev->nodename,
  797. "virtual-device", "%i", &vdevice);
  798. if (err != 1) {
  799. /* go looking in the extended area instead */
  800. err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
  801. "%i", &vdevice);
  802. if (err != 1) {
  803. xenbus_dev_fatal(dev, err, "reading virtual-device");
  804. return err;
  805. }
  806. }
  807. if (xen_hvm_domain()) {
  808. char *type;
  809. int len;
  810. /* no unplug has been done: do not hook devices != xen vbds */
  811. if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
  812. int major;
  813. if (!VDEV_IS_EXTENDED(vdevice))
  814. major = BLKIF_MAJOR(vdevice);
  815. else
  816. major = XENVBD_MAJOR;
  817. if (major != XENVBD_MAJOR) {
  818. printk(KERN_INFO
  819. "%s: HVM does not support vbd %d as xen block device\n",
  820. __FUNCTION__, vdevice);
  821. return -ENODEV;
  822. }
  823. }
  824. /* do not create a PV cdrom device if we are an HVM guest */
  825. type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
  826. if (IS_ERR(type))
  827. return -ENODEV;
  828. if (strncmp(type, "cdrom", 5) == 0) {
  829. kfree(type);
  830. return -ENODEV;
  831. }
  832. kfree(type);
  833. }
  834. info = kzalloc(sizeof(*info), GFP_KERNEL);
  835. if (!info) {
  836. xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
  837. return -ENOMEM;
  838. }
  839. mutex_init(&info->mutex);
  840. spin_lock_init(&info->io_lock);
  841. info->xbdev = dev;
  842. info->vdevice = vdevice;
  843. info->connected = BLKIF_STATE_DISCONNECTED;
  844. INIT_WORK(&info->work, blkif_restart_queue);
  845. for (i = 0; i < BLK_RING_SIZE; i++)
  846. info->shadow[i].req.u.rw.id = i+1;
  847. info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  848. /* Front end dir is a number, which is used as the id. */
  849. info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
  850. dev_set_drvdata(&dev->dev, info);
  851. err = talk_to_blkback(dev, info);
  852. if (err) {
  853. kfree(info);
  854. dev_set_drvdata(&dev->dev, NULL);
  855. return err;
  856. }
  857. return 0;
  858. }
  859. static int blkif_recover(struct blkfront_info *info)
  860. {
  861. int i;
  862. struct blkif_request *req;
  863. struct blk_shadow *copy;
  864. int j;
  865. /* Stage 1: Make a safe copy of the shadow state. */
  866. copy = kmalloc(sizeof(info->shadow),
  867. GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
  868. if (!copy)
  869. return -ENOMEM;
  870. memcpy(copy, info->shadow, sizeof(info->shadow));
  871. /* Stage 2: Set up free list. */
  872. memset(&info->shadow, 0, sizeof(info->shadow));
  873. for (i = 0; i < BLK_RING_SIZE; i++)
  874. info->shadow[i].req.u.rw.id = i+1;
  875. info->shadow_free = info->ring.req_prod_pvt;
  876. info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
  877. /* Stage 3: Find pending requests and requeue them. */
  878. for (i = 0; i < BLK_RING_SIZE; i++) {
  879. /* Not in use? */
  880. if (!copy[i].request)
  881. continue;
  882. /* Grab a request slot and copy shadow state into it. */
  883. req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
  884. *req = copy[i].req;
  885. /* We get a new request id, and must reset the shadow state. */
  886. req->u.rw.id = get_id_from_freelist(info);
  887. memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
  888. if (req->operation != BLKIF_OP_DISCARD) {
  889. /* Rewrite any grant references invalidated by susp/resume. */
  890. for (j = 0; j < req->u.rw.nr_segments; j++)
  891. gnttab_grant_foreign_access_ref(
  892. req->u.rw.seg[j].gref,
  893. info->xbdev->otherend_id,
  894. pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
  895. rq_data_dir(info->shadow[req->u.rw.id].request));
  896. }
  897. info->shadow[req->u.rw.id].req = *req;
  898. info->ring.req_prod_pvt++;
  899. }
  900. kfree(copy);
  901. xenbus_switch_state(info->xbdev, XenbusStateConnected);
  902. spin_lock_irq(&info->io_lock);
  903. /* Now safe for us to use the shared ring */
  904. info->connected = BLKIF_STATE_CONNECTED;
  905. /* Send off requeued requests */
  906. flush_requests(info);
  907. /* Kick any other new requests queued since we resumed */
  908. kick_pending_request_queues(info);
  909. spin_unlock_irq(&info->io_lock);
  910. return 0;
  911. }
  912. /**
  913. * We are reconnecting to the backend, due to a suspend/resume, or a backend
  914. * driver restart. We tear down our blkif structure and recreate it, but
  915. * leave the device-layer structures intact so that this is transparent to the
  916. * rest of the kernel.
  917. */
  918. static int blkfront_resume(struct xenbus_device *dev)
  919. {
  920. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  921. int err;
  922. dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
  923. blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
  924. err = talk_to_blkback(dev, info);
  925. if (info->connected == BLKIF_STATE_SUSPENDED && !err)
  926. err = blkif_recover(info);
  927. return err;
  928. }
  929. static void
  930. blkfront_closing(struct blkfront_info *info)
  931. {
  932. struct xenbus_device *xbdev = info->xbdev;
  933. struct block_device *bdev = NULL;
  934. mutex_lock(&info->mutex);
  935. if (xbdev->state == XenbusStateClosing) {
  936. mutex_unlock(&info->mutex);
  937. return;
  938. }
  939. if (info->gd)
  940. bdev = bdget_disk(info->gd, 0);
  941. mutex_unlock(&info->mutex);
  942. if (!bdev) {
  943. xenbus_frontend_closed(xbdev);
  944. return;
  945. }
  946. mutex_lock(&bdev->bd_mutex);
  947. if (bdev->bd_openers) {
  948. xenbus_dev_error(xbdev, -EBUSY,
  949. "Device in use; refusing to close");
  950. xenbus_switch_state(xbdev, XenbusStateClosing);
  951. } else {
  952. xlvbd_release_gendisk(info);
  953. xenbus_frontend_closed(xbdev);
  954. }
  955. mutex_unlock(&bdev->bd_mutex);
  956. bdput(bdev);
  957. }
  958. static void blkfront_setup_discard(struct blkfront_info *info)
  959. {
  960. int err;
  961. char *type;
  962. unsigned int discard_granularity;
  963. unsigned int discard_alignment;
  964. unsigned int discard_secure;
  965. type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
  966. if (IS_ERR(type))
  967. return;
  968. info->feature_secdiscard = 0;
  969. if (strncmp(type, "phy", 3) == 0) {
  970. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  971. "discard-granularity", "%u", &discard_granularity,
  972. "discard-alignment", "%u", &discard_alignment,
  973. NULL);
  974. if (!err) {
  975. info->feature_discard = 1;
  976. info->discard_granularity = discard_granularity;
  977. info->discard_alignment = discard_alignment;
  978. }
  979. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  980. "discard-secure", "%d", &discard_secure,
  981. NULL);
  982. if (!err)
  983. info->feature_secdiscard = discard_secure;
  984. } else if (strncmp(type, "file", 4) == 0)
  985. info->feature_discard = 1;
  986. kfree(type);
  987. }
  988. /*
  989. * Invoked when the backend is finally 'ready' (and has told produced
  990. * the details about the physical device - #sectors, size, etc).
  991. */
  992. static void blkfront_connect(struct blkfront_info *info)
  993. {
  994. unsigned long long sectors;
  995. unsigned long sector_size;
  996. unsigned int binfo;
  997. int err;
  998. int barrier, flush, discard;
  999. switch (info->connected) {
  1000. case BLKIF_STATE_CONNECTED:
  1001. /*
  1002. * Potentially, the back-end may be signalling
  1003. * a capacity change; update the capacity.
  1004. */
  1005. err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
  1006. "sectors", "%Lu", &sectors);
  1007. if (XENBUS_EXIST_ERR(err))
  1008. return;
  1009. printk(KERN_INFO "Setting capacity to %Lu\n",
  1010. sectors);
  1011. set_capacity(info->gd, sectors);
  1012. revalidate_disk(info->gd);
  1013. /* fall through */
  1014. case BLKIF_STATE_SUSPENDED:
  1015. return;
  1016. default:
  1017. break;
  1018. }
  1019. dev_dbg(&info->xbdev->dev, "%s:%s.\n",
  1020. __func__, info->xbdev->otherend);
  1021. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  1022. "sectors", "%llu", &sectors,
  1023. "info", "%u", &binfo,
  1024. "sector-size", "%lu", &sector_size,
  1025. NULL);
  1026. if (err) {
  1027. xenbus_dev_fatal(info->xbdev, err,
  1028. "reading backend fields at %s",
  1029. info->xbdev->otherend);
  1030. return;
  1031. }
  1032. info->feature_flush = 0;
  1033. info->flush_op = 0;
  1034. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  1035. "feature-barrier", "%d", &barrier,
  1036. NULL);
  1037. /*
  1038. * If there's no "feature-barrier" defined, then it means
  1039. * we're dealing with a very old backend which writes
  1040. * synchronously; nothing to do.
  1041. *
  1042. * If there are barriers, then we use flush.
  1043. */
  1044. if (!err && barrier) {
  1045. info->feature_flush = REQ_FLUSH | REQ_FUA;
  1046. info->flush_op = BLKIF_OP_WRITE_BARRIER;
  1047. }
  1048. /*
  1049. * And if there is "feature-flush-cache" use that above
  1050. * barriers.
  1051. */
  1052. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  1053. "feature-flush-cache", "%d", &flush,
  1054. NULL);
  1055. if (!err && flush) {
  1056. info->feature_flush = REQ_FLUSH;
  1057. info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
  1058. }
  1059. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  1060. "feature-discard", "%d", &discard,
  1061. NULL);
  1062. if (!err && discard)
  1063. blkfront_setup_discard(info);
  1064. err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
  1065. if (err) {
  1066. xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
  1067. info->xbdev->otherend);
  1068. return;
  1069. }
  1070. xenbus_switch_state(info->xbdev, XenbusStateConnected);
  1071. /* Kick pending requests. */
  1072. spin_lock_irq(&info->io_lock);
  1073. info->connected = BLKIF_STATE_CONNECTED;
  1074. kick_pending_request_queues(info);
  1075. spin_unlock_irq(&info->io_lock);
  1076. add_disk(info->gd);
  1077. info->is_ready = 1;
  1078. }
  1079. /**
  1080. * Callback received when the backend's state changes.
  1081. */
  1082. static void blkback_changed(struct xenbus_device *dev,
  1083. enum xenbus_state backend_state)
  1084. {
  1085. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  1086. dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
  1087. switch (backend_state) {
  1088. case XenbusStateInitialising:
  1089. case XenbusStateInitWait:
  1090. case XenbusStateInitialised:
  1091. case XenbusStateReconfiguring:
  1092. case XenbusStateReconfigured:
  1093. case XenbusStateUnknown:
  1094. break;
  1095. case XenbusStateConnected:
  1096. blkfront_connect(info);
  1097. break;
  1098. case XenbusStateClosed:
  1099. if (dev->state == XenbusStateClosed)
  1100. break;
  1101. /* Missed the backend's Closing state -- fallthrough */
  1102. case XenbusStateClosing:
  1103. if (info)
  1104. blkfront_closing(info);
  1105. break;
  1106. }
  1107. }
  1108. static int blkfront_remove(struct xenbus_device *xbdev)
  1109. {
  1110. struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
  1111. struct block_device *bdev = NULL;
  1112. struct gendisk *disk;
  1113. dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
  1114. blkif_free(info, 0);
  1115. mutex_lock(&info->mutex);
  1116. disk = info->gd;
  1117. if (disk)
  1118. bdev = bdget_disk(disk, 0);
  1119. info->xbdev = NULL;
  1120. mutex_unlock(&info->mutex);
  1121. if (!bdev) {
  1122. kfree(info);
  1123. return 0;
  1124. }
  1125. /*
  1126. * The xbdev was removed before we reached the Closed
  1127. * state. See if it's safe to remove the disk. If the bdev
  1128. * isn't closed yet, we let release take care of it.
  1129. */
  1130. mutex_lock(&bdev->bd_mutex);
  1131. info = disk->private_data;
  1132. dev_warn(disk_to_dev(disk),
  1133. "%s was hot-unplugged, %d stale handles\n",
  1134. xbdev->nodename, bdev->bd_openers);
  1135. if (info && !bdev->bd_openers) {
  1136. xlvbd_release_gendisk(info);
  1137. disk->private_data = NULL;
  1138. kfree(info);
  1139. }
  1140. mutex_unlock(&bdev->bd_mutex);
  1141. bdput(bdev);
  1142. return 0;
  1143. }
  1144. static int blkfront_is_ready(struct xenbus_device *dev)
  1145. {
  1146. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  1147. return info->is_ready && info->xbdev;
  1148. }
  1149. static int blkif_open(struct block_device *bdev, fmode_t mode)
  1150. {
  1151. struct gendisk *disk = bdev->bd_disk;
  1152. struct blkfront_info *info;
  1153. int err = 0;
  1154. mutex_lock(&blkfront_mutex);
  1155. info = disk->private_data;
  1156. if (!info) {
  1157. /* xbdev gone */
  1158. err = -ERESTARTSYS;
  1159. goto out;
  1160. }
  1161. mutex_lock(&info->mutex);
  1162. if (!info->gd)
  1163. /* xbdev is closed */
  1164. err = -ERESTARTSYS;
  1165. mutex_unlock(&info->mutex);
  1166. out:
  1167. mutex_unlock(&blkfront_mutex);
  1168. return err;
  1169. }
  1170. static int blkif_release(struct gendisk *disk, fmode_t mode)
  1171. {
  1172. struct blkfront_info *info = disk->private_data;
  1173. struct block_device *bdev;
  1174. struct xenbus_device *xbdev;
  1175. mutex_lock(&blkfront_mutex);
  1176. bdev = bdget_disk(disk, 0);
  1177. if (bdev->bd_openers)
  1178. goto out;
  1179. /*
  1180. * Check if we have been instructed to close. We will have
  1181. * deferred this request, because the bdev was still open.
  1182. */
  1183. mutex_lock(&info->mutex);
  1184. xbdev = info->xbdev;
  1185. if (xbdev && xbdev->state == XenbusStateClosing) {
  1186. /* pending switch to state closed */
  1187. dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
  1188. xlvbd_release_gendisk(info);
  1189. xenbus_frontend_closed(info->xbdev);
  1190. }
  1191. mutex_unlock(&info->mutex);
  1192. if (!xbdev) {
  1193. /* sudden device removal */
  1194. dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
  1195. xlvbd_release_gendisk(info);
  1196. disk->private_data = NULL;
  1197. kfree(info);
  1198. }
  1199. out:
  1200. bdput(bdev);
  1201. mutex_unlock(&blkfront_mutex);
  1202. return 0;
  1203. }
  1204. static const struct block_device_operations xlvbd_block_fops =
  1205. {
  1206. .owner = THIS_MODULE,
  1207. .open = blkif_open,
  1208. .release = blkif_release,
  1209. .getgeo = blkif_getgeo,
  1210. .ioctl = blkif_ioctl,
  1211. };
  1212. static const struct xenbus_device_id blkfront_ids[] = {
  1213. { "vbd" },
  1214. { "" }
  1215. };
  1216. static DEFINE_XENBUS_DRIVER(blkfront, ,
  1217. .probe = blkfront_probe,
  1218. .remove = blkfront_remove,
  1219. .resume = blkfront_resume,
  1220. .otherend_changed = blkback_changed,
  1221. .is_ready = blkfront_is_ready,
  1222. );
  1223. static int __init xlblk_init(void)
  1224. {
  1225. int ret;
  1226. if (!xen_domain())
  1227. return -ENODEV;
  1228. if (xen_hvm_domain() && !xen_platform_pci_unplug)
  1229. return -ENODEV;
  1230. if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
  1231. printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
  1232. XENVBD_MAJOR, DEV_NAME);
  1233. return -ENODEV;
  1234. }
  1235. ret = xenbus_register_frontend(&blkfront_driver);
  1236. if (ret) {
  1237. unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
  1238. return ret;
  1239. }
  1240. return 0;
  1241. }
  1242. module_init(xlblk_init);
  1243. static void __exit xlblk_exit(void)
  1244. {
  1245. return xenbus_unregister_driver(&blkfront_driver);
  1246. }
  1247. module_exit(xlblk_exit);
  1248. MODULE_DESCRIPTION("Xen virtual block device frontend");
  1249. MODULE_LICENSE("GPL");
  1250. MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
  1251. MODULE_ALIAS("xen:vbd");
  1252. MODULE_ALIAS("xenblk");