buffer_head.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * include/linux/buffer_head.h
  3. *
  4. * Everything to do with buffer_heads.
  5. */
  6. #ifndef _LINUX_BUFFER_HEAD_H
  7. #define _LINUX_BUFFER_HEAD_H
  8. #include <linux/types.h>
  9. #include <linux/fs.h>
  10. #include <linux/linkage.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/wait.h>
  13. #include <linux/atomic.h>
  14. #ifdef CONFIG_BLOCK
  15. enum bh_state_bits {
  16. BH_Uptodate, /* Contains valid data */
  17. BH_Dirty, /* Is dirty */
  18. BH_Lock, /* Is locked */
  19. BH_Req, /* Has been submitted for I/O */
  20. BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
  21. * IO completion of other buffers in the page
  22. */
  23. BH_Mapped, /* Has a disk mapping */
  24. BH_New, /* Disk mapping was newly created by get_block */
  25. BH_Async_Read, /* Is under end_buffer_async_read I/O */
  26. BH_Async_Write, /* Is under end_buffer_async_write I/O */
  27. BH_Delay, /* Buffer is not yet allocated on disk */
  28. BH_Boundary, /* Block is followed by a discontiguity */
  29. BH_Write_EIO, /* I/O error on write */
  30. BH_Unwritten, /* Buffer is allocated on disk but not written */
  31. BH_Quiet, /* Buffer Error Prinks to be quiet */
  32. BH_Sync_Flush,
  33. BH_PrivateStart,/* not a state bit, but the first bit available
  34. * for private allocation by other entities
  35. */
  36. };
  37. #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
  38. struct page;
  39. struct buffer_head;
  40. struct address_space;
  41. typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
  42. /*
  43. * Historically, a buffer_head was used to map a single block
  44. * within a page, and of course as the unit of I/O through the
  45. * filesystem and block layers. Nowadays the basic I/O unit
  46. * is the bio, and buffer_heads are used for extracting block
  47. * mappings (via a get_block_t call), for tracking state within
  48. * a page (via a page_mapping) and for wrapping bio submission
  49. * for backward compatibility reasons (e.g. submit_bh).
  50. */
  51. struct buffer_head {
  52. unsigned long b_state; /* buffer state bitmap (see above) */
  53. struct buffer_head *b_this_page;/* circular list of page's buffers */
  54. struct page *b_page; /* the page this bh is mapped to */
  55. sector_t b_blocknr; /* start block number */
  56. size_t b_size; /* size of mapping */
  57. char *b_data; /* pointer to data within the page */
  58. struct block_device *b_bdev;
  59. bh_end_io_t *b_end_io; /* I/O completion */
  60. void *b_private; /* reserved for b_end_io */
  61. struct list_head b_assoc_buffers; /* associated with another mapping */
  62. struct address_space *b_assoc_map; /* mapping this buffer is
  63. associated with */
  64. atomic_t b_count; /* users using this buffer_head */
  65. };
  66. /*
  67. * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
  68. * and buffer_foo() functions.
  69. */
  70. #define BUFFER_FNS(bit, name) \
  71. static inline void set_buffer_##name(struct buffer_head *bh) \
  72. { \
  73. set_bit(BH_##bit, &(bh)->b_state); \
  74. } \
  75. static inline void clear_buffer_##name(struct buffer_head *bh) \
  76. { \
  77. clear_bit(BH_##bit, &(bh)->b_state); \
  78. } \
  79. static inline int buffer_##name(const struct buffer_head *bh) \
  80. { \
  81. return test_bit(BH_##bit, &(bh)->b_state); \
  82. }
  83. /*
  84. * test_set_buffer_foo() and test_clear_buffer_foo()
  85. */
  86. #define TAS_BUFFER_FNS(bit, name) \
  87. static inline int test_set_buffer_##name(struct buffer_head *bh) \
  88. { \
  89. return test_and_set_bit(BH_##bit, &(bh)->b_state); \
  90. } \
  91. static inline int test_clear_buffer_##name(struct buffer_head *bh) \
  92. { \
  93. return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
  94. } \
  95. /*
  96. * Emit the buffer bitops functions. Note that there are also functions
  97. * of the form "mark_buffer_foo()". These are higher-level functions which
  98. * do something in addition to setting a b_state bit.
  99. */
  100. BUFFER_FNS(Uptodate, uptodate)
  101. BUFFER_FNS(Dirty, dirty)
  102. TAS_BUFFER_FNS(Dirty, dirty)
  103. BUFFER_FNS(Lock, locked)
  104. BUFFER_FNS(Req, req)
  105. TAS_BUFFER_FNS(Req, req)
  106. BUFFER_FNS(Mapped, mapped)
  107. BUFFER_FNS(New, new)
  108. BUFFER_FNS(Async_Read, async_read)
  109. BUFFER_FNS(Async_Write, async_write)
  110. BUFFER_FNS(Delay, delay)
  111. BUFFER_FNS(Boundary, boundary)
  112. BUFFER_FNS(Write_EIO, write_io_error)
  113. BUFFER_FNS(Unwritten, unwritten)
  114. BUFFER_FNS(Sync_Flush, sync_flush)
  115. #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
  116. #define touch_buffer(bh) mark_page_accessed(bh->b_page)
  117. /* If we *know* page->private refers to buffer_heads */
  118. #define page_buffers(page) \
  119. ({ \
  120. BUG_ON(!PagePrivate(page)); \
  121. ((struct buffer_head *)page_private(page)); \
  122. })
  123. #define page_has_buffers(page) PagePrivate(page)
  124. /*
  125. * Declarations
  126. */
  127. void mark_buffer_dirty(struct buffer_head *bh);
  128. void mark_buffer_dirty_sync(struct buffer_head *bh);
  129. void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
  130. void set_bh_page(struct buffer_head *bh,
  131. struct page *page, unsigned long offset);
  132. int try_to_free_buffers(struct page *);
  133. struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
  134. int retry);
  135. void create_empty_buffers(struct page *, unsigned long,
  136. unsigned long b_state);
  137. void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
  138. void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
  139. void end_buffer_async_write(struct buffer_head *bh, int uptodate);
  140. /* Things to do with buffers at mapping->private_list */
  141. void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
  142. void mark_buffer_dirty_inode_sync(struct buffer_head *bh, struct inode *inode);
  143. int inode_has_buffers(struct inode *);
  144. void invalidate_inode_buffers(struct inode *);
  145. int remove_inode_buffers(struct inode *inode);
  146. int sync_mapping_buffers(struct address_space *mapping);
  147. void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
  148. void mark_buffer_async_write(struct buffer_head *bh);
  149. void __wait_on_buffer(struct buffer_head *);
  150. wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
  151. struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
  152. unsigned size);
  153. struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
  154. unsigned size, gfp_t gfp);
  155. void __brelse(struct buffer_head *);
  156. void __bforget(struct buffer_head *);
  157. void __breadahead(struct block_device *, sector_t block, unsigned int size);
  158. struct buffer_head *__bread_gfp(struct block_device *,
  159. sector_t block, unsigned size, gfp_t gfp);
  160. void invalidate_bh_lrus(void);
  161. struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
  162. void free_buffer_head(struct buffer_head * bh);
  163. void unlock_buffer(struct buffer_head *bh);
  164. void __lock_buffer(struct buffer_head *bh);
  165. void ll_rw_block(int, int, struct buffer_head * bh[]);
  166. int sync_dirty_buffer(struct buffer_head *bh);
  167. int __sync_dirty_buffer(struct buffer_head *bh, int rw);
  168. void write_dirty_buffer(struct buffer_head *bh, int rw);
  169. int submit_bh(int, struct buffer_head *);
  170. void write_boundary_block(struct block_device *bdev,
  171. sector_t bblock, unsigned blocksize);
  172. int bh_uptodate_or_lock(struct buffer_head *bh);
  173. int bh_submit_read(struct buffer_head *bh);
  174. extern int buffer_heads_over_limit;
  175. /*
  176. * Generic address_space_operations implementations for buffer_head-backed
  177. * address_spaces.
  178. */
  179. void block_invalidatepage(struct page *page, unsigned long offset);
  180. int block_write_full_page(struct page *page, get_block_t *get_block,
  181. struct writeback_control *wbc);
  182. int block_write_full_page_endio(struct page *page, get_block_t *get_block,
  183. struct writeback_control *wbc, bh_end_io_t *handler);
  184. int block_read_full_page(struct page*, get_block_t*);
  185. int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
  186. unsigned long from);
  187. int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  188. unsigned flags, struct page **pagep, get_block_t *get_block);
  189. int __block_write_begin(struct page *page, loff_t pos, unsigned len,
  190. get_block_t *get_block);
  191. int block_write_end(struct file *, struct address_space *,
  192. loff_t, unsigned, unsigned,
  193. struct page *, void *);
  194. int generic_write_end(struct file *, struct address_space *,
  195. loff_t, unsigned, unsigned,
  196. struct page *, void *);
  197. void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
  198. int cont_write_begin(struct file *, struct address_space *, loff_t,
  199. unsigned, unsigned, struct page **, void **,
  200. get_block_t *, loff_t *);
  201. int generic_cont_expand_simple(struct inode *inode, loff_t size);
  202. int block_commit_write(struct page *page, unsigned from, unsigned to);
  203. int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
  204. get_block_t get_block);
  205. int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
  206. get_block_t get_block);
  207. /* Convert errno to return value from ->page_mkwrite() call */
  208. static inline int block_page_mkwrite_return(int err)
  209. {
  210. if (err == 0)
  211. return VM_FAULT_LOCKED;
  212. if (err == -EFAULT)
  213. return VM_FAULT_NOPAGE;
  214. if (err == -ENOMEM)
  215. return VM_FAULT_OOM;
  216. if (err == -EAGAIN)
  217. return VM_FAULT_RETRY;
  218. /* -ENOSPC, -EDQUOT, -EIO ... */
  219. return VM_FAULT_SIGBUS;
  220. }
  221. sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
  222. int block_truncate_page(struct address_space *, loff_t, get_block_t *);
  223. int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
  224. struct page **, void **, get_block_t*);
  225. int nobh_write_end(struct file *, struct address_space *,
  226. loff_t, unsigned, unsigned,
  227. struct page *, void *);
  228. int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
  229. int nobh_writepage(struct page *page, get_block_t *get_block,
  230. struct writeback_control *wbc);
  231. void buffer_init(void);
  232. /*
  233. * inline definitions
  234. */
  235. static inline void attach_page_buffers(struct page *page,
  236. struct buffer_head *head)
  237. {
  238. page_cache_get(page);
  239. SetPagePrivate(page);
  240. set_page_private(page, (unsigned long)head);
  241. }
  242. static inline void get_bh(struct buffer_head *bh)
  243. {
  244. atomic_inc(&bh->b_count);
  245. }
  246. static inline void put_bh(struct buffer_head *bh)
  247. {
  248. smp_mb__before_atomic_dec();
  249. atomic_dec(&bh->b_count);
  250. }
  251. static inline void brelse(struct buffer_head *bh)
  252. {
  253. if (bh)
  254. __brelse(bh);
  255. }
  256. static inline void bforget(struct buffer_head *bh)
  257. {
  258. if (bh)
  259. __bforget(bh);
  260. }
  261. static inline struct buffer_head *
  262. sb_bread(struct super_block *sb, sector_t block)
  263. {
  264. return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
  265. }
  266. static inline struct buffer_head *
  267. sb_bread_unmovable(struct super_block *sb, sector_t block)
  268. {
  269. return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
  270. }
  271. static inline void
  272. sb_breadahead(struct super_block *sb, sector_t block)
  273. {
  274. __breadahead(sb->s_bdev, block, sb->s_blocksize);
  275. }
  276. static inline struct buffer_head *
  277. sb_getblk(struct super_block *sb, sector_t block)
  278. {
  279. return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
  280. }
  281. static inline struct buffer_head *
  282. sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
  283. {
  284. return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
  285. }
  286. static inline struct buffer_head *
  287. sb_find_get_block(struct super_block *sb, sector_t block)
  288. {
  289. return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
  290. }
  291. static inline void
  292. map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
  293. {
  294. set_buffer_mapped(bh);
  295. bh->b_bdev = sb->s_bdev;
  296. bh->b_blocknr = block;
  297. bh->b_size = sb->s_blocksize;
  298. }
  299. static inline void wait_on_buffer(struct buffer_head *bh)
  300. {
  301. might_sleep();
  302. if (buffer_locked(bh))
  303. __wait_on_buffer(bh);
  304. }
  305. static inline int trylock_buffer(struct buffer_head *bh)
  306. {
  307. return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
  308. }
  309. static inline void lock_buffer(struct buffer_head *bh)
  310. {
  311. might_sleep();
  312. if (!trylock_buffer(bh))
  313. __lock_buffer(bh);
  314. }
  315. static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
  316. sector_t block,
  317. unsigned size)
  318. {
  319. return __getblk_gfp(bdev, block, size, 0);
  320. }
  321. static inline struct buffer_head *__getblk(struct block_device *bdev,
  322. sector_t block,
  323. unsigned size)
  324. {
  325. return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
  326. }
  327. /**
  328. * __bread() - reads a specified block and returns the bh
  329. * @bdev: the block_device to read from
  330. * @block: number of block
  331. * @size: size (in bytes) to read
  332. *
  333. * Reads a specified block, and returns buffer head that contains it.
  334. * The page cache is allocated from movable area so that it can be migrated.
  335. * It returns NULL if the block was unreadable.
  336. */
  337. static inline struct buffer_head *
  338. __bread(struct block_device *bdev, sector_t block, unsigned size)
  339. {
  340. return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
  341. }
  342. extern int __set_page_dirty_buffers(struct page *page);
  343. #else /* CONFIG_BLOCK */
  344. static inline void buffer_init(void) {}
  345. static inline int try_to_free_buffers(struct page *page) { return 1; }
  346. static inline int inode_has_buffers(struct inode *inode) { return 0; }
  347. static inline void invalidate_inode_buffers(struct inode *inode) {}
  348. static inline int remove_inode_buffers(struct inode *inode) { return 1; }
  349. static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
  350. #endif /* CONFIG_BLOCK */
  351. #endif /* _LINUX_BUFFER_HEAD_H */