dax.h 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. #ifndef _LINUX_DAX_H
  2. #define _LINUX_DAX_H
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/radix-tree.h>
  6. #include <asm/pgtable.h>
  7. struct iomap_ops;
  8. /* We use lowest available exceptional entry bit for locking */
  9. #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
  10. ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
  11. struct iomap_ops *ops);
  12. ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
  13. get_block_t, dio_iodone_t, int flags);
  14. int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
  15. int dax_truncate_page(struct inode *, loff_t from, get_block_t);
  16. int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
  17. struct iomap_ops *ops);
  18. int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
  19. int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
  20. void dax_wake_mapping_entry_waiter(struct address_space *mapping,
  21. pgoff_t index, bool wake_all);
  22. #ifdef CONFIG_FS_DAX
  23. struct page *read_dax_sector(struct block_device *bdev, sector_t n);
  24. void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
  25. int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
  26. unsigned int offset, unsigned int length);
  27. #else
  28. static inline struct page *read_dax_sector(struct block_device *bdev,
  29. sector_t n)
  30. {
  31. return ERR_PTR(-ENXIO);
  32. }
  33. /* Shouldn't ever be called when dax is disabled. */
  34. static inline void dax_unlock_mapping_entry(struct address_space *mapping,
  35. pgoff_t index)
  36. {
  37. BUG();
  38. }
  39. static inline int __dax_zero_page_range(struct block_device *bdev,
  40. sector_t sector, unsigned int offset, unsigned int length)
  41. {
  42. return -ENXIO;
  43. }
  44. #endif
  45. #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
  46. int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
  47. unsigned int flags, get_block_t);
  48. #else
  49. static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
  50. pmd_t *pmd, unsigned int flags, get_block_t gb)
  51. {
  52. return VM_FAULT_FALLBACK;
  53. }
  54. #endif
  55. int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
  56. #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
  57. static inline bool dax_mapping(struct address_space *mapping)
  58. {
  59. return mapping->host && IS_DAX(mapping->host);
  60. }
  61. struct writeback_control;
  62. int dax_writeback_mapping_range(struct address_space *mapping,
  63. struct block_device *bdev, struct writeback_control *wbc);
  64. #endif