inode.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * William Irwin, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/thread_info.h>
  10. #include <asm/current.h>
  11. #include <linux/sched.h> /* remove ASAP */
  12. #include <linux/fs.h>
  13. #include <linux/mount.h>
  14. #include <linux/file.h>
  15. #include <linux/kernel.h>
  16. #include <linux/writeback.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/highmem.h>
  19. #include <linux/init.h>
  20. #include <linux/string.h>
  21. #include <linux/capability.h>
  22. #include <linux/ctype.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/hugetlb.h>
  25. #include <linux/pagevec.h>
  26. #include <linux/parser.h>
  27. #include <linux/mman.h>
  28. #include <linux/slab.h>
  29. #include <linux/dnotify.h>
  30. #include <linux/statfs.h>
  31. #include <linux/security.h>
  32. #include <linux/magic.h>
  33. #include <linux/migrate.h>
  34. #include <asm/uaccess.h>
  35. static const struct super_operations hugetlbfs_ops;
  36. static const struct address_space_operations hugetlbfs_aops;
  37. const struct file_operations hugetlbfs_file_operations;
  38. static const struct inode_operations hugetlbfs_dir_inode_operations;
  39. static const struct inode_operations hugetlbfs_inode_operations;
  40. struct hugetlbfs_config {
  41. uid_t uid;
  42. gid_t gid;
  43. umode_t mode;
  44. long nr_blocks;
  45. long nr_inodes;
  46. struct hstate *hstate;
  47. };
  48. struct hugetlbfs_inode_info {
  49. struct shared_policy policy;
  50. struct inode vfs_inode;
  51. };
  52. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  53. {
  54. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  55. }
  56. static struct backing_dev_info hugetlbfs_backing_dev_info = {
  57. .name = "hugetlbfs",
  58. .ra_pages = 0, /* No readahead */
  59. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  60. };
  61. int sysctl_hugetlb_shm_group;
  62. enum {
  63. Opt_size, Opt_nr_inodes,
  64. Opt_mode, Opt_uid, Opt_gid,
  65. Opt_pagesize,
  66. Opt_err,
  67. };
  68. static const match_table_t tokens = {
  69. {Opt_size, "size=%s"},
  70. {Opt_nr_inodes, "nr_inodes=%s"},
  71. {Opt_mode, "mode=%o"},
  72. {Opt_uid, "uid=%u"},
  73. {Opt_gid, "gid=%u"},
  74. {Opt_pagesize, "pagesize=%s"},
  75. {Opt_err, NULL},
  76. };
  77. static void huge_pagevec_release(struct pagevec *pvec)
  78. {
  79. int i;
  80. for (i = 0; i < pagevec_count(pvec); ++i)
  81. put_page(pvec->pages[i]);
  82. pagevec_reinit(pvec);
  83. }
  84. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  85. {
  86. struct inode *inode = file->f_path.dentry->d_inode;
  87. loff_t len, vma_len;
  88. int ret;
  89. struct hstate *h = hstate_file(file);
  90. /*
  91. * vma address alignment (but not the pgoff alignment) has
  92. * already been checked by prepare_hugepage_range. If you add
  93. * any error returns here, do so after setting VM_HUGETLB, so
  94. * is_vm_hugetlb_page tests below unmap_region go the right
  95. * way when do_mmap_pgoff unwinds (may be important on powerpc
  96. * and ia64).
  97. */
  98. vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
  99. vma->vm_ops = &hugetlb_vm_ops;
  100. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  101. return -EINVAL;
  102. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  103. mutex_lock(&inode->i_mutex);
  104. file_accessed(file);
  105. ret = -ENOMEM;
  106. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  107. if (hugetlb_reserve_pages(inode,
  108. vma->vm_pgoff >> huge_page_order(h),
  109. len >> huge_page_shift(h), vma,
  110. vma->vm_flags))
  111. goto out;
  112. ret = 0;
  113. hugetlb_prefault_arch_hook(vma->vm_mm);
  114. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  115. inode->i_size = len;
  116. out:
  117. mutex_unlock(&inode->i_mutex);
  118. return ret;
  119. }
  120. /*
  121. * Called under down_write(mmap_sem).
  122. */
  123. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  124. static unsigned long
  125. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  126. unsigned long len, unsigned long pgoff, unsigned long flags)
  127. {
  128. struct mm_struct *mm = current->mm;
  129. struct vm_area_struct *vma;
  130. unsigned long start_addr;
  131. struct hstate *h = hstate_file(file);
  132. if (len & ~huge_page_mask(h))
  133. return -EINVAL;
  134. if (len > TASK_SIZE)
  135. return -ENOMEM;
  136. if (flags & MAP_FIXED) {
  137. if (prepare_hugepage_range(file, addr, len))
  138. return -EINVAL;
  139. return addr;
  140. }
  141. if (addr) {
  142. addr = ALIGN(addr, huge_page_size(h));
  143. vma = find_vma(mm, addr);
  144. if (TASK_SIZE - len >= addr &&
  145. (!vma || addr + len <= vm_start_gap(vma)))
  146. return addr;
  147. }
  148. if (len > mm->cached_hole_size)
  149. start_addr = mm->free_area_cache;
  150. else {
  151. start_addr = TASK_UNMAPPED_BASE;
  152. mm->cached_hole_size = 0;
  153. }
  154. full_search:
  155. addr = ALIGN(start_addr, huge_page_size(h));
  156. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  157. /* At this point: (!vma || addr < vma->vm_end). */
  158. if (TASK_SIZE - len < addr) {
  159. /*
  160. * Start a new search - just in case we missed
  161. * some holes.
  162. */
  163. if (start_addr != TASK_UNMAPPED_BASE) {
  164. start_addr = TASK_UNMAPPED_BASE;
  165. mm->cached_hole_size = 0;
  166. goto full_search;
  167. }
  168. return -ENOMEM;
  169. }
  170. if (!vma || addr + len <= vma->vm_start) {
  171. mm->free_area_cache = addr + len;
  172. return addr;
  173. }
  174. if (addr + mm->cached_hole_size < vma->vm_start)
  175. mm->cached_hole_size = vma->vm_start - addr;
  176. addr = ALIGN(vma->vm_end, huge_page_size(h));
  177. }
  178. }
  179. #endif
  180. static int
  181. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  182. char __user *buf, unsigned long count,
  183. unsigned long size)
  184. {
  185. char *kaddr;
  186. unsigned long left, copied = 0;
  187. int i, chunksize;
  188. if (size > count)
  189. size = count;
  190. /* Find which 4k chunk and offset with in that chunk */
  191. i = offset >> PAGE_CACHE_SHIFT;
  192. offset = offset & ~PAGE_CACHE_MASK;
  193. while (size) {
  194. chunksize = PAGE_CACHE_SIZE;
  195. if (offset)
  196. chunksize -= offset;
  197. if (chunksize > size)
  198. chunksize = size;
  199. kaddr = kmap(&page[i]);
  200. left = __copy_to_user(buf, kaddr + offset, chunksize);
  201. kunmap(&page[i]);
  202. if (left) {
  203. copied += (chunksize - left);
  204. break;
  205. }
  206. offset = 0;
  207. size -= chunksize;
  208. buf += chunksize;
  209. copied += chunksize;
  210. i++;
  211. }
  212. return copied ? copied : -EFAULT;
  213. }
  214. /*
  215. * Support for read() - Find the page attached to f_mapping and copy out the
  216. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  217. * since it has PAGE_CACHE_SIZE assumptions.
  218. */
  219. static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
  220. size_t len, loff_t *ppos)
  221. {
  222. struct hstate *h = hstate_file(filp);
  223. struct address_space *mapping = filp->f_mapping;
  224. struct inode *inode = mapping->host;
  225. unsigned long index = *ppos >> huge_page_shift(h);
  226. unsigned long offset = *ppos & ~huge_page_mask(h);
  227. unsigned long end_index;
  228. loff_t isize;
  229. ssize_t retval = 0;
  230. /* validate length */
  231. if (len == 0)
  232. goto out;
  233. for (;;) {
  234. struct page *page;
  235. unsigned long nr, ret;
  236. int ra;
  237. /* nr is the maximum number of bytes to copy from this page */
  238. nr = huge_page_size(h);
  239. isize = i_size_read(inode);
  240. if (!isize)
  241. goto out;
  242. end_index = (isize - 1) >> huge_page_shift(h);
  243. if (index >= end_index) {
  244. if (index > end_index)
  245. goto out;
  246. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  247. if (nr <= offset)
  248. goto out;
  249. }
  250. nr = nr - offset;
  251. /* Find the page */
  252. page = find_lock_page(mapping, index);
  253. if (unlikely(page == NULL)) {
  254. /*
  255. * We have a HOLE, zero out the user-buffer for the
  256. * length of the hole or request.
  257. */
  258. ret = len < nr ? len : nr;
  259. if (clear_user(buf, ret))
  260. ra = -EFAULT;
  261. else
  262. ra = 0;
  263. } else {
  264. unlock_page(page);
  265. /*
  266. * We have the page, copy it to user space buffer.
  267. */
  268. ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
  269. ret = ra;
  270. page_cache_release(page);
  271. }
  272. if (ra < 0) {
  273. if (retval == 0)
  274. retval = ra;
  275. goto out;
  276. }
  277. offset += ret;
  278. retval += ret;
  279. len -= ret;
  280. index += offset >> huge_page_shift(h);
  281. offset &= ~huge_page_mask(h);
  282. /* short read or no more work */
  283. if ((ret != nr) || (len == 0))
  284. break;
  285. }
  286. out:
  287. *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
  288. return retval;
  289. }
  290. static int hugetlbfs_write_begin(struct file *file,
  291. struct address_space *mapping,
  292. loff_t pos, unsigned len, unsigned flags,
  293. struct page **pagep, void **fsdata)
  294. {
  295. return -EINVAL;
  296. }
  297. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  298. loff_t pos, unsigned len, unsigned copied,
  299. struct page *page, void *fsdata)
  300. {
  301. BUG();
  302. return -EINVAL;
  303. }
  304. static void truncate_huge_page(struct page *page)
  305. {
  306. cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
  307. ClearPageUptodate(page);
  308. delete_from_page_cache(page);
  309. }
  310. static void truncate_hugepages(struct inode *inode, loff_t lstart)
  311. {
  312. struct hstate *h = hstate_inode(inode);
  313. struct address_space *mapping = &inode->i_data;
  314. const pgoff_t start = lstart >> huge_page_shift(h);
  315. struct pagevec pvec;
  316. pgoff_t next;
  317. int i, freed = 0;
  318. pagevec_init(&pvec, 0);
  319. next = start;
  320. while (1) {
  321. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  322. if (next == start)
  323. break;
  324. next = start;
  325. continue;
  326. }
  327. for (i = 0; i < pagevec_count(&pvec); ++i) {
  328. struct page *page = pvec.pages[i];
  329. lock_page(page);
  330. if (page->index > next)
  331. next = page->index;
  332. ++next;
  333. truncate_huge_page(page);
  334. unlock_page(page);
  335. freed++;
  336. }
  337. huge_pagevec_release(&pvec);
  338. }
  339. BUG_ON(!lstart && mapping->nrpages);
  340. hugetlb_unreserve_pages(inode, start, freed);
  341. }
  342. static void hugetlbfs_evict_inode(struct inode *inode)
  343. {
  344. truncate_hugepages(inode, 0);
  345. end_writeback(inode);
  346. }
  347. static inline void
  348. hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
  349. {
  350. struct vm_area_struct *vma;
  351. struct prio_tree_iter iter;
  352. vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
  353. unsigned long v_offset;
  354. /*
  355. * Can the expression below overflow on 32-bit arches?
  356. * No, because the prio_tree returns us only those vmas
  357. * which overlap the truncated area starting at pgoff,
  358. * and no vma on a 32-bit arch can span beyond the 4GB.
  359. */
  360. if (vma->vm_pgoff < pgoff)
  361. v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
  362. else
  363. v_offset = 0;
  364. __unmap_hugepage_range(vma,
  365. vma->vm_start + v_offset, vma->vm_end, NULL);
  366. }
  367. }
  368. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  369. {
  370. pgoff_t pgoff;
  371. struct address_space *mapping = inode->i_mapping;
  372. struct hstate *h = hstate_inode(inode);
  373. BUG_ON(offset & ~huge_page_mask(h));
  374. pgoff = offset >> PAGE_SHIFT;
  375. i_size_write(inode, offset);
  376. mutex_lock(&mapping->i_mmap_mutex);
  377. if (!prio_tree_empty(&mapping->i_mmap))
  378. hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
  379. mutex_unlock(&mapping->i_mmap_mutex);
  380. truncate_hugepages(inode, offset);
  381. return 0;
  382. }
  383. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  384. {
  385. struct inode *inode = dentry->d_inode;
  386. struct hstate *h = hstate_inode(inode);
  387. int error;
  388. unsigned int ia_valid = attr->ia_valid;
  389. BUG_ON(!inode);
  390. error = inode_change_ok(inode, attr);
  391. if (error)
  392. return error;
  393. if (ia_valid & ATTR_SIZE) {
  394. error = -EINVAL;
  395. if (attr->ia_size & ~huge_page_mask(h))
  396. return -EINVAL;
  397. error = hugetlb_vmtruncate(inode, attr->ia_size);
  398. if (error)
  399. return error;
  400. }
  401. setattr_copy(inode, attr);
  402. mark_inode_dirty(inode);
  403. return 0;
  404. }
  405. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  406. struct hugetlbfs_config *config)
  407. {
  408. struct inode *inode;
  409. inode = new_inode(sb);
  410. if (inode) {
  411. struct hugetlbfs_inode_info *info;
  412. inode->i_ino = get_next_ino();
  413. inode->i_mode = S_IFDIR | config->mode;
  414. inode->i_uid = config->uid;
  415. inode->i_gid = config->gid;
  416. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  417. info = HUGETLBFS_I(inode);
  418. mpol_shared_policy_init(&info->policy, NULL);
  419. inode->i_op = &hugetlbfs_dir_inode_operations;
  420. inode->i_fop = &simple_dir_operations;
  421. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  422. inc_nlink(inode);
  423. lockdep_annotate_inode_mutex_key(inode);
  424. }
  425. return inode;
  426. }
  427. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  428. struct inode *dir,
  429. umode_t mode, dev_t dev)
  430. {
  431. struct inode *inode;
  432. inode = new_inode(sb);
  433. if (inode) {
  434. struct hugetlbfs_inode_info *info;
  435. inode->i_ino = get_next_ino();
  436. inode_init_owner(inode, dir, mode);
  437. inode->i_mapping->a_ops = &hugetlbfs_aops;
  438. inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
  439. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  440. INIT_LIST_HEAD(&inode->i_mapping->private_list);
  441. info = HUGETLBFS_I(inode);
  442. /*
  443. * The policy is initialized here even if we are creating a
  444. * private inode because initialization simply creates an
  445. * an empty rb tree and calls spin_lock_init(), later when we
  446. * call mpol_free_shared_policy() it will just return because
  447. * the rb tree will still be empty.
  448. */
  449. mpol_shared_policy_init(&info->policy, NULL);
  450. switch (mode & S_IFMT) {
  451. default:
  452. init_special_inode(inode, mode, dev);
  453. break;
  454. case S_IFREG:
  455. inode->i_op = &hugetlbfs_inode_operations;
  456. inode->i_fop = &hugetlbfs_file_operations;
  457. break;
  458. case S_IFDIR:
  459. inode->i_op = &hugetlbfs_dir_inode_operations;
  460. inode->i_fop = &simple_dir_operations;
  461. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  462. inc_nlink(inode);
  463. break;
  464. case S_IFLNK:
  465. inode->i_op = &page_symlink_inode_operations;
  466. break;
  467. }
  468. lockdep_annotate_inode_mutex_key(inode);
  469. }
  470. return inode;
  471. }
  472. /*
  473. * File creation. Allocate an inode, and we're done..
  474. */
  475. static int hugetlbfs_mknod(struct inode *dir,
  476. struct dentry *dentry, umode_t mode, dev_t dev)
  477. {
  478. struct inode *inode;
  479. int error = -ENOSPC;
  480. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  481. if (inode) {
  482. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  483. d_instantiate(dentry, inode);
  484. dget(dentry); /* Extra count - pin the dentry in core */
  485. error = 0;
  486. }
  487. return error;
  488. }
  489. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  490. {
  491. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  492. if (!retval)
  493. inc_nlink(dir);
  494. return retval;
  495. }
  496. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
  497. {
  498. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  499. }
  500. static int hugetlbfs_symlink(struct inode *dir,
  501. struct dentry *dentry, const char *symname)
  502. {
  503. struct inode *inode;
  504. int error = -ENOSPC;
  505. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  506. if (inode) {
  507. int l = strlen(symname)+1;
  508. error = page_symlink(inode, symname, l);
  509. if (!error) {
  510. d_instantiate(dentry, inode);
  511. dget(dentry);
  512. } else
  513. iput(inode);
  514. }
  515. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  516. return error;
  517. }
  518. /*
  519. * mark the head page dirty
  520. */
  521. static int hugetlbfs_set_page_dirty(struct page *page)
  522. {
  523. struct page *head = compound_head(page);
  524. SetPageDirty(head);
  525. return 0;
  526. }
  527. static int hugetlbfs_migrate_page(struct address_space *mapping,
  528. struct page *newpage, struct page *page,
  529. enum migrate_mode mode)
  530. {
  531. int rc;
  532. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  533. if (rc)
  534. return rc;
  535. migrate_page_copy(newpage, page);
  536. return 0;
  537. }
  538. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  539. {
  540. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  541. struct hstate *h = hstate_inode(dentry->d_inode);
  542. buf->f_type = HUGETLBFS_MAGIC;
  543. buf->f_bsize = huge_page_size(h);
  544. if (sbinfo) {
  545. spin_lock(&sbinfo->stat_lock);
  546. /* If no limits set, just report 0 for max/free/used
  547. * blocks, like simple_statfs() */
  548. if (sbinfo->spool) {
  549. long free_pages;
  550. spin_lock(&sbinfo->spool->lock);
  551. buf->f_blocks = sbinfo->spool->max_hpages;
  552. free_pages = sbinfo->spool->max_hpages
  553. - sbinfo->spool->used_hpages;
  554. buf->f_bavail = buf->f_bfree = free_pages;
  555. spin_unlock(&sbinfo->spool->lock);
  556. buf->f_files = sbinfo->max_inodes;
  557. buf->f_ffree = sbinfo->free_inodes;
  558. }
  559. spin_unlock(&sbinfo->stat_lock);
  560. }
  561. buf->f_namelen = NAME_MAX;
  562. return 0;
  563. }
  564. static void hugetlbfs_put_super(struct super_block *sb)
  565. {
  566. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  567. if (sbi) {
  568. sb->s_fs_info = NULL;
  569. if (sbi->spool)
  570. hugepage_put_subpool(sbi->spool);
  571. kfree(sbi);
  572. }
  573. }
  574. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  575. {
  576. if (sbinfo->free_inodes >= 0) {
  577. spin_lock(&sbinfo->stat_lock);
  578. if (unlikely(!sbinfo->free_inodes)) {
  579. spin_unlock(&sbinfo->stat_lock);
  580. return 0;
  581. }
  582. sbinfo->free_inodes--;
  583. spin_unlock(&sbinfo->stat_lock);
  584. }
  585. return 1;
  586. }
  587. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  588. {
  589. if (sbinfo->free_inodes >= 0) {
  590. spin_lock(&sbinfo->stat_lock);
  591. sbinfo->free_inodes++;
  592. spin_unlock(&sbinfo->stat_lock);
  593. }
  594. }
  595. static struct kmem_cache *hugetlbfs_inode_cachep;
  596. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  597. {
  598. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  599. struct hugetlbfs_inode_info *p;
  600. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  601. return NULL;
  602. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  603. if (unlikely(!p)) {
  604. hugetlbfs_inc_free_inodes(sbinfo);
  605. return NULL;
  606. }
  607. return &p->vfs_inode;
  608. }
  609. static void hugetlbfs_i_callback(struct rcu_head *head)
  610. {
  611. struct inode *inode = container_of(head, struct inode, i_rcu);
  612. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  613. }
  614. static void hugetlbfs_destroy_inode(struct inode *inode)
  615. {
  616. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  617. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  618. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  619. }
  620. static const struct address_space_operations hugetlbfs_aops = {
  621. .write_begin = hugetlbfs_write_begin,
  622. .write_end = hugetlbfs_write_end,
  623. .set_page_dirty = hugetlbfs_set_page_dirty,
  624. .migratepage = hugetlbfs_migrate_page,
  625. };
  626. static void init_once(void *foo)
  627. {
  628. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  629. inode_init_once(&ei->vfs_inode);
  630. }
  631. const struct file_operations hugetlbfs_file_operations = {
  632. .read = hugetlbfs_read,
  633. .mmap = hugetlbfs_file_mmap,
  634. .fsync = noop_fsync,
  635. .get_unmapped_area = hugetlb_get_unmapped_area,
  636. .llseek = default_llseek,
  637. };
  638. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  639. .create = hugetlbfs_create,
  640. .lookup = simple_lookup,
  641. .link = simple_link,
  642. .unlink = simple_unlink,
  643. .symlink = hugetlbfs_symlink,
  644. .mkdir = hugetlbfs_mkdir,
  645. .rmdir = simple_rmdir,
  646. .mknod = hugetlbfs_mknod,
  647. .rename = simple_rename,
  648. .setattr = hugetlbfs_setattr,
  649. };
  650. static const struct inode_operations hugetlbfs_inode_operations = {
  651. .setattr = hugetlbfs_setattr,
  652. };
  653. static const struct super_operations hugetlbfs_ops = {
  654. .alloc_inode = hugetlbfs_alloc_inode,
  655. .destroy_inode = hugetlbfs_destroy_inode,
  656. .evict_inode = hugetlbfs_evict_inode,
  657. .statfs = hugetlbfs_statfs,
  658. .put_super = hugetlbfs_put_super,
  659. .show_options = generic_show_options,
  660. };
  661. static int
  662. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  663. {
  664. char *p, *rest;
  665. substring_t args[MAX_OPT_ARGS];
  666. int option;
  667. unsigned long long size = 0;
  668. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
  669. if (!options)
  670. return 0;
  671. while ((p = strsep(&options, ",")) != NULL) {
  672. int token;
  673. if (!*p)
  674. continue;
  675. token = match_token(p, tokens, args);
  676. switch (token) {
  677. case Opt_uid:
  678. if (match_int(&args[0], &option))
  679. goto bad_val;
  680. pconfig->uid = option;
  681. break;
  682. case Opt_gid:
  683. if (match_int(&args[0], &option))
  684. goto bad_val;
  685. pconfig->gid = option;
  686. break;
  687. case Opt_mode:
  688. if (match_octal(&args[0], &option))
  689. goto bad_val;
  690. pconfig->mode = option & 01777U;
  691. break;
  692. case Opt_size: {
  693. /* memparse() will accept a K/M/G without a digit */
  694. if (!isdigit(*args[0].from))
  695. goto bad_val;
  696. size = memparse(args[0].from, &rest);
  697. setsize = SIZE_STD;
  698. if (*rest == '%')
  699. setsize = SIZE_PERCENT;
  700. break;
  701. }
  702. case Opt_nr_inodes:
  703. /* memparse() will accept a K/M/G without a digit */
  704. if (!isdigit(*args[0].from))
  705. goto bad_val;
  706. pconfig->nr_inodes = memparse(args[0].from, &rest);
  707. break;
  708. case Opt_pagesize: {
  709. unsigned long ps;
  710. ps = memparse(args[0].from, &rest);
  711. pconfig->hstate = size_to_hstate(ps);
  712. if (!pconfig->hstate) {
  713. printk(KERN_ERR
  714. "hugetlbfs: Unsupported page size %lu MB\n",
  715. ps >> 20);
  716. return -EINVAL;
  717. }
  718. break;
  719. }
  720. default:
  721. printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
  722. p);
  723. return -EINVAL;
  724. break;
  725. }
  726. }
  727. /* Do size after hstate is set up */
  728. if (setsize > NO_SIZE) {
  729. struct hstate *h = pconfig->hstate;
  730. if (setsize == SIZE_PERCENT) {
  731. size <<= huge_page_shift(h);
  732. size *= h->max_huge_pages;
  733. do_div(size, 100);
  734. }
  735. pconfig->nr_blocks = (size >> huge_page_shift(h));
  736. }
  737. return 0;
  738. bad_val:
  739. printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
  740. args[0].from, p);
  741. return -EINVAL;
  742. }
  743. static int
  744. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  745. {
  746. int ret;
  747. struct hugetlbfs_config config;
  748. struct hugetlbfs_sb_info *sbinfo;
  749. save_mount_options(sb, data);
  750. config.nr_blocks = -1; /* No limit on size by default */
  751. config.nr_inodes = -1; /* No limit on number of inodes by default */
  752. config.uid = current_fsuid();
  753. config.gid = current_fsgid();
  754. config.mode = 0755;
  755. config.hstate = &default_hstate;
  756. ret = hugetlbfs_parse_options(data, &config);
  757. if (ret)
  758. return ret;
  759. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  760. if (!sbinfo)
  761. return -ENOMEM;
  762. sb->s_fs_info = sbinfo;
  763. sbinfo->hstate = config.hstate;
  764. spin_lock_init(&sbinfo->stat_lock);
  765. sbinfo->max_inodes = config.nr_inodes;
  766. sbinfo->free_inodes = config.nr_inodes;
  767. sbinfo->spool = NULL;
  768. if (config.nr_blocks != -1) {
  769. sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
  770. if (!sbinfo->spool)
  771. goto out_free;
  772. }
  773. sb->s_maxbytes = MAX_LFS_FILESIZE;
  774. sb->s_blocksize = huge_page_size(config.hstate);
  775. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  776. sb->s_magic = HUGETLBFS_MAGIC;
  777. sb->s_op = &hugetlbfs_ops;
  778. sb->s_time_gran = 1;
  779. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  780. if (!sb->s_root)
  781. goto out_free;
  782. return 0;
  783. out_free:
  784. if (sbinfo->spool)
  785. kfree(sbinfo->spool);
  786. kfree(sbinfo);
  787. return -ENOMEM;
  788. }
  789. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  790. int flags, const char *dev_name, void *data)
  791. {
  792. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  793. }
  794. static struct file_system_type hugetlbfs_fs_type = {
  795. .name = "hugetlbfs",
  796. .mount = hugetlbfs_mount,
  797. .kill_sb = kill_litter_super,
  798. };
  799. MODULE_ALIAS_FS("hugetlbfs");
  800. static struct vfsmount *hugetlbfs_vfsmount;
  801. static int can_do_hugetlb_shm(void)
  802. {
  803. return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
  804. }
  805. /*
  806. * Note that size should be aligned to proper hugepage size in caller side,
  807. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  808. */
  809. struct file *hugetlb_file_setup(const char *name, size_t size,
  810. vm_flags_t acctflag, struct user_struct **user,
  811. int creat_flags)
  812. {
  813. int error = -ENOMEM;
  814. struct file *file;
  815. struct inode *inode;
  816. struct path path;
  817. struct dentry *root;
  818. struct qstr quick_string;
  819. *user = NULL;
  820. if (!hugetlbfs_vfsmount)
  821. return ERR_PTR(-ENOENT);
  822. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  823. *user = current_user();
  824. if (user_shm_lock(size, *user)) {
  825. task_lock(current);
  826. printk_once(KERN_WARNING
  827. "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  828. current->comm, current->pid);
  829. task_unlock(current);
  830. } else {
  831. *user = NULL;
  832. return ERR_PTR(-EPERM);
  833. }
  834. }
  835. root = hugetlbfs_vfsmount->mnt_root;
  836. quick_string.name = name;
  837. quick_string.len = strlen(quick_string.name);
  838. quick_string.hash = 0;
  839. path.dentry = d_alloc(root, &quick_string);
  840. if (!path.dentry)
  841. goto out_shm_unlock;
  842. path.mnt = mntget(hugetlbfs_vfsmount);
  843. error = -ENOSPC;
  844. inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
  845. if (!inode)
  846. goto out_dentry;
  847. error = -ENOMEM;
  848. if (hugetlb_reserve_pages(inode, 0,
  849. size >> huge_page_shift(hstate_inode(inode)), NULL,
  850. acctflag))
  851. goto out_inode;
  852. d_instantiate(path.dentry, inode);
  853. inode->i_size = size;
  854. clear_nlink(inode);
  855. error = -ENFILE;
  856. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  857. &hugetlbfs_file_operations);
  858. if (!file)
  859. goto out_dentry; /* inode is already attached */
  860. return file;
  861. out_inode:
  862. iput(inode);
  863. out_dentry:
  864. path_put(&path);
  865. out_shm_unlock:
  866. if (*user) {
  867. user_shm_unlock(size, *user);
  868. *user = NULL;
  869. }
  870. return ERR_PTR(error);
  871. }
  872. static int __init init_hugetlbfs_fs(void)
  873. {
  874. int error;
  875. struct vfsmount *vfsmount;
  876. error = bdi_init(&hugetlbfs_backing_dev_info);
  877. if (error)
  878. return error;
  879. error = -ENOMEM;
  880. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  881. sizeof(struct hugetlbfs_inode_info),
  882. 0, 0, init_once);
  883. if (hugetlbfs_inode_cachep == NULL)
  884. goto out2;
  885. error = register_filesystem(&hugetlbfs_fs_type);
  886. if (error)
  887. goto out;
  888. vfsmount = kern_mount(&hugetlbfs_fs_type);
  889. if (!IS_ERR(vfsmount)) {
  890. hugetlbfs_vfsmount = vfsmount;
  891. return 0;
  892. }
  893. error = PTR_ERR(vfsmount);
  894. out:
  895. kmem_cache_destroy(hugetlbfs_inode_cachep);
  896. out2:
  897. bdi_destroy(&hugetlbfs_backing_dev_info);
  898. return error;
  899. }
  900. static void __exit exit_hugetlbfs_fs(void)
  901. {
  902. /*
  903. * Make sure all delayed rcu free inodes are flushed before we
  904. * destroy cache.
  905. */
  906. rcu_barrier();
  907. kmem_cache_destroy(hugetlbfs_inode_cachep);
  908. kern_unmount(hugetlbfs_vfsmount);
  909. unregister_filesystem(&hugetlbfs_fs_type);
  910. bdi_destroy(&hugetlbfs_backing_dev_info);
  911. }
  912. module_init(init_hugetlbfs_fs)
  913. module_exit(exit_hugetlbfs_fs)
  914. MODULE_LICENSE("GPL");