tmpfs-idr.patch 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. SPDX-License-Identifier: GPL-2.0
  2. diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
  3. index 134c686c8676..632d447caf64 100644
  4. --- a/include/linux/shmem_fs.h
  5. +++ b/include/linux/shmem_fs.h
  6. @@ -51,10 +51,13 @@ struct shmem_quota_limits {
  7. };
  8. struct shmem_sb_info {
  9. + struct mutex idr_lock;
  10. + bool idr_nouse;
  11. + struct idr idr; /* manages inode-number */
  12. unsigned long max_blocks; /* How many blocks are allowed */
  13. struct percpu_counter used_blocks; /* How many are allocated */
  14. - unsigned long max_inodes; /* How many inodes are allowed */
  15. - unsigned long free_ispace; /* How much ispace left for allocation */
  16. + int max_inodes; /* How many inodes are allowed */
  17. + unsigned long free_ispace; /* How many are left for allocation */
  18. raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
  19. umode_t mode; /* Mount mode for root directory */
  20. unsigned char huge; /* Whether to try for hugepages */
  21. diff --git a/mm/shmem.c b/mm/shmem.c
  22. index db7dd45c9181..554f1c80882e 100644
  23. --- a/mm/shmem.c
  24. +++ b/mm/shmem.c
  25. @@ -111,7 +111,7 @@ struct shmem_falloc {
  26. struct shmem_options {
  27. unsigned long long blocks;
  28. - unsigned long long inodes;
  29. + int inodes;
  30. struct mempolicy *mpol;
  31. kuid_t uid;
  32. kgid_t gid;
  33. @@ -136,12 +136,14 @@ static unsigned long shmem_default_max_blocks(void)
  34. return totalram_pages() / 2;
  35. }
  36. -static unsigned long shmem_default_max_inodes(void)
  37. +static int shmem_default_max_inodes(void)
  38. {
  39. unsigned long nr_pages = totalram_pages();
  40. + unsigned long ul;
  41. - return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
  42. - ULONG_MAX / BOGO_INODE_SIZE);
  43. + ul = INT_MAX;
  44. + ul = min3(ul, nr_pages - totalhigh_pages(), nr_pages / 2);
  45. + return ul;
  46. }
  47. #endif
  48. @@ -1284,6 +1286,11 @@ static void shmem_evict_inode(struct inode *inode)
  49. }
  50. simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
  51. + if (!sbinfo->idr_nouse && inode->i_ino) {
  52. + mutex_lock(&sbinfo->idr_lock);
  53. + idr_remove(&sbinfo->idr, inode->i_ino);
  54. + mutex_unlock(&sbinfo->idr_lock);
  55. + }
  56. shmem_free_inode(inode->i_sb, freed);
  57. WARN_ON(inode->i_blocks);
  58. clear_inode(inode);
  59. @@ -2525,6 +2532,25 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
  60. break;
  61. }
  62. + if (!sbinfo->idr_nouse) {
  63. + /* inum 0 and 1 are unused */
  64. + mutex_lock(&sbinfo->idr_lock);
  65. + ino = idr_alloc(&sbinfo->idr, inode, 2, INT_MAX,
  66. + GFP_NOFS);
  67. + if (ino > 0) {
  68. + inode->i_ino = ino;
  69. + mutex_unlock(&sbinfo->idr_lock);
  70. + __insert_inode_hash(inode, inode->i_ino);
  71. + } else {
  72. + inode->i_ino = 0;
  73. + mutex_unlock(&sbinfo->idr_lock);
  74. + iput(inode);
  75. + /* shmem_free_inode() will be called */
  76. + inode = NULL;
  77. + }
  78. + } else
  79. + inode->i_ino = ino;
  80. +
  81. lockdep_annotate_inode_mutex_key(inode);
  82. return inode;
  83. }
  84. @@ -3773,8 +3799,7 @@ static struct dentry *shmem_get_parent(struct dentry *child)
  85. static int shmem_match(struct inode *ino, void *vfh)
  86. {
  87. __u32 *fh = vfh;
  88. - __u64 inum = fh[2];
  89. - inum = (inum << 32) | fh[1];
  90. + __u64 inum = fh[1];
  91. return ino->i_ino == inum && fh[0] == ino->i_generation;
  92. }
  93. @@ -3794,14 +3819,11 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
  94. struct dentry *dentry = NULL;
  95. u64 inum;
  96. - if (fh_len < 3)
  97. + if (fh_len < 2)
  98. return NULL;
  99. - inum = fid->raw[2];
  100. - inum = (inum << 32) | fid->raw[1];
  101. -
  102. - inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
  103. - shmem_match, fid->raw);
  104. + inum = fid->raw[1];
  105. + inode = ilookup5(sb, inum, shmem_match, fid->raw);
  106. if (inode) {
  107. dentry = shmem_find_alias(inode);
  108. iput(inode);
  109. @@ -3813,30 +3835,15 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
  110. static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
  111. struct inode *parent)
  112. {
  113. - if (*len < 3) {
  114. - *len = 3;
  115. + if (*len < 2) {
  116. + *len = 2;
  117. return FILEID_INVALID;
  118. }
  119. - if (inode_unhashed(inode)) {
  120. - /* Unfortunately insert_inode_hash is not idempotent,
  121. - * so as we hash inodes here rather than at creation
  122. - * time, we need a lock to ensure we only try
  123. - * to do it once
  124. - */
  125. - static DEFINE_SPINLOCK(lock);
  126. - spin_lock(&lock);
  127. - if (inode_unhashed(inode))
  128. - __insert_inode_hash(inode,
  129. - inode->i_ino + inode->i_generation);
  130. - spin_unlock(&lock);
  131. - }
  132. -
  133. fh[0] = inode->i_generation;
  134. fh[1] = inode->i_ino;
  135. - fh[2] = ((__u64)inode->i_ino) >> 32;
  136. - *len = 3;
  137. + *len = 2;
  138. return 1;
  139. }
  140. @@ -3935,7 +3942,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
  141. break;
  142. case Opt_nr_inodes:
  143. ctx->inodes = memparse(param->string, &rest);
  144. - if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
  145. + if (*rest || ctx->inodes < 2 || ctx->inodes > INT_MAX)
  146. goto bad_value;
  147. ctx->seen |= SHMEM_SEEN_INODES;
  148. break;
  149. @@ -4221,7 +4228,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
  150. if (sbinfo->max_blocks != shmem_default_max_blocks())
  151. seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
  152. if (sbinfo->max_inodes != shmem_default_max_inodes())
  153. - seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
  154. + seq_printf(seq, ",nr_inodes=%d", sbinfo->max_inodes);
  155. if (sbinfo->mode != (0777 | S_ISVTX))
  156. seq_printf(seq, ",mode=%03ho", sbinfo->mode);
  157. if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
  158. @@ -4275,6 +4282,8 @@ static void shmem_put_super(struct super_block *sb)
  159. #ifdef CONFIG_TMPFS_QUOTA
  160. shmem_disable_quotas(sb);
  161. #endif
  162. + if (!sbinfo->idr_nouse)
  163. + idr_destroy(&sbinfo->idr);
  164. free_percpu(sbinfo->ino_batch);
  165. percpu_counter_destroy(&sbinfo->used_blocks);
  166. mpol_put(sbinfo->mpol);
  167. @@ -4319,9 +4328,11 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
  168. #else
  169. sb->s_flags |= SB_NOUSER;
  170. #endif
  171. + mutex_init(&sbinfo->idr_lock);
  172. + idr_init(&sbinfo->idr);
  173. sbinfo->max_blocks = ctx->blocks;
  174. sbinfo->max_inodes = ctx->inodes;
  175. - sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
  176. + sbinfo->free_ispace = (unsigned long)sbinfo->max_inodes * BOGO_INODE_SIZE;
  177. if (sb->s_flags & SB_KERNMOUNT) {
  178. sbinfo->ino_batch = alloc_percpu(ino_t);
  179. if (!sbinfo->ino_batch)
  180. @@ -4464,6 +4475,15 @@ static int shmem_error_remove_page(struct address_space *mapping,
  181. return 0;
  182. }
  183. +static __init void shmem_no_idr(struct super_block *sb)
  184. +{
  185. + struct shmem_sb_info *sbinfo;
  186. +
  187. + sbinfo = SHMEM_SB(sb);
  188. + sbinfo->idr_nouse = true;
  189. + idr_destroy(&sbinfo->idr);
  190. +}
  191. +
  192. const struct address_space_operations shmem_aops = {
  193. .writepage = shmem_writepage,
  194. .dirty_folio = noop_dirty_folio,
  195. @@ -4637,6 +4657,7 @@ void __init shmem_init(void)
  196. pr_err("Could not kern_mount tmpfs\n");
  197. goto out1;
  198. }
  199. + shmem_no_idr(shm_mnt->mnt_sb);
  200. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  201. if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)