ucount.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License as
  4. * published by the Free Software Foundation, version 2 of the
  5. * License.
  6. */
  7. #include <linux/stat.h>
  8. #include <linux/sysctl.h>
  9. #include <linux/slab.h>
  10. #include <linux/cred.h>
  11. #include <linux/hash.h>
  12. #include <linux/user_namespace.h>
  13. #define UCOUNTS_HASHTABLE_BITS 10
  14. static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
  15. static DEFINE_SPINLOCK(ucounts_lock);
  16. #define ucounts_hashfn(ns, uid) \
  17. hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
  18. UCOUNTS_HASHTABLE_BITS)
  19. #define ucounts_hashentry(ns, uid) \
  20. (ucounts_hashtable + ucounts_hashfn(ns, uid))
  21. #ifdef CONFIG_SYSCTL
  22. static struct ctl_table_set *
  23. set_lookup(struct ctl_table_root *root)
  24. {
  25. return &current_user_ns()->set;
  26. }
  27. static int set_is_seen(struct ctl_table_set *set)
  28. {
  29. return &current_user_ns()->set == set;
  30. }
  31. static int set_permissions(struct ctl_table_header *head,
  32. struct ctl_table *table)
  33. {
  34. struct user_namespace *user_ns =
  35. container_of(head->set, struct user_namespace, set);
  36. int mode;
  37. /* Allow users with CAP_SYS_RESOURCE unrestrained access */
  38. if (ns_capable(user_ns, CAP_SYS_RESOURCE))
  39. mode = (table->mode & S_IRWXU) >> 6;
  40. else
  41. /* Allow all others at most read-only access */
  42. mode = table->mode & S_IROTH;
  43. return (mode << 6) | (mode << 3) | mode;
  44. }
  45. static struct ctl_table_root set_root = {
  46. .lookup = set_lookup,
  47. .permissions = set_permissions,
  48. };
  49. static int zero = 0;
  50. static int int_max = INT_MAX;
  51. #define UCOUNT_ENTRY(name) \
  52. { \
  53. .procname = name, \
  54. .maxlen = sizeof(int), \
  55. .mode = 0644, \
  56. .proc_handler = proc_dointvec_minmax, \
  57. .extra1 = &zero, \
  58. .extra2 = &int_max, \
  59. }
  60. static struct ctl_table user_table[] = {
  61. UCOUNT_ENTRY("max_user_namespaces"),
  62. UCOUNT_ENTRY("max_pid_namespaces"),
  63. UCOUNT_ENTRY("max_uts_namespaces"),
  64. UCOUNT_ENTRY("max_ipc_namespaces"),
  65. UCOUNT_ENTRY("max_net_namespaces"),
  66. UCOUNT_ENTRY("max_mnt_namespaces"),
  67. UCOUNT_ENTRY("max_cgroup_namespaces"),
  68. #ifdef CONFIG_INOTIFY_USER
  69. UCOUNT_ENTRY("max_inotify_instances"),
  70. UCOUNT_ENTRY("max_inotify_watches"),
  71. #endif
  72. { }
  73. };
  74. #endif /* CONFIG_SYSCTL */
  75. bool setup_userns_sysctls(struct user_namespace *ns)
  76. {
  77. #ifdef CONFIG_SYSCTL
  78. struct ctl_table *tbl;
  79. setup_sysctl_set(&ns->set, &set_root, set_is_seen);
  80. tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
  81. if (tbl) {
  82. int i;
  83. for (i = 0; i < UCOUNT_COUNTS; i++) {
  84. tbl[i].data = &ns->ucount_max[i];
  85. }
  86. ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
  87. }
  88. if (!ns->sysctls) {
  89. kfree(tbl);
  90. retire_sysctl_set(&ns->set);
  91. return false;
  92. }
  93. #endif
  94. return true;
  95. }
  96. void retire_userns_sysctls(struct user_namespace *ns)
  97. {
  98. #ifdef CONFIG_SYSCTL
  99. struct ctl_table *tbl;
  100. tbl = ns->sysctls->ctl_table_arg;
  101. unregister_sysctl_table(ns->sysctls);
  102. retire_sysctl_set(&ns->set);
  103. kfree(tbl);
  104. #endif
  105. }
  106. static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
  107. {
  108. struct ucounts *ucounts;
  109. hlist_for_each_entry(ucounts, hashent, node) {
  110. if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
  111. return ucounts;
  112. }
  113. return NULL;
  114. }
  115. static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
  116. {
  117. struct hlist_head *hashent = ucounts_hashentry(ns, uid);
  118. struct ucounts *ucounts, *new;
  119. spin_lock_irq(&ucounts_lock);
  120. ucounts = find_ucounts(ns, uid, hashent);
  121. if (!ucounts) {
  122. spin_unlock_irq(&ucounts_lock);
  123. new = kzalloc(sizeof(*new), GFP_KERNEL);
  124. if (!new)
  125. return NULL;
  126. new->ns = ns;
  127. new->uid = uid;
  128. new->count = 0;
  129. spin_lock_irq(&ucounts_lock);
  130. ucounts = find_ucounts(ns, uid, hashent);
  131. if (ucounts) {
  132. kfree(new);
  133. } else {
  134. hlist_add_head(&new->node, hashent);
  135. ucounts = new;
  136. }
  137. }
  138. if (ucounts->count == INT_MAX)
  139. ucounts = NULL;
  140. else
  141. ucounts->count += 1;
  142. spin_unlock_irq(&ucounts_lock);
  143. return ucounts;
  144. }
  145. static void put_ucounts(struct ucounts *ucounts)
  146. {
  147. unsigned long flags;
  148. spin_lock_irqsave(&ucounts_lock, flags);
  149. ucounts->count -= 1;
  150. if (!ucounts->count)
  151. hlist_del_init(&ucounts->node);
  152. else
  153. ucounts = NULL;
  154. spin_unlock_irqrestore(&ucounts_lock, flags);
  155. kfree(ucounts);
  156. }
  157. static inline bool atomic_inc_below(atomic_t *v, int u)
  158. {
  159. int c, old;
  160. c = atomic_read(v);
  161. for (;;) {
  162. if (unlikely(c >= u))
  163. return false;
  164. old = atomic_cmpxchg(v, c, c+1);
  165. if (likely(old == c))
  166. return true;
  167. c = old;
  168. }
  169. }
  170. struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
  171. enum ucount_type type)
  172. {
  173. struct ucounts *ucounts, *iter, *bad;
  174. struct user_namespace *tns;
  175. ucounts = get_ucounts(ns, uid);
  176. for (iter = ucounts; iter; iter = tns->ucounts) {
  177. int max;
  178. tns = iter->ns;
  179. max = READ_ONCE(tns->ucount_max[type]);
  180. if (!atomic_inc_below(&iter->ucount[type], max))
  181. goto fail;
  182. }
  183. return ucounts;
  184. fail:
  185. bad = iter;
  186. for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
  187. atomic_dec(&iter->ucount[type]);
  188. put_ucounts(ucounts);
  189. return NULL;
  190. }
  191. void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
  192. {
  193. struct ucounts *iter;
  194. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  195. int dec = atomic_dec_if_positive(&iter->ucount[type]);
  196. WARN_ON_ONCE(dec < 0);
  197. }
  198. put_ucounts(ucounts);
  199. }
  200. static __init int user_namespace_sysctl_init(void)
  201. {
  202. #ifdef CONFIG_SYSCTL
  203. static struct ctl_table_header *user_header;
  204. static struct ctl_table empty[1];
  205. /*
  206. * It is necessary to register the user directory in the
  207. * default set so that registrations in the child sets work
  208. * properly.
  209. */
  210. user_header = register_sysctl("user", empty);
  211. kmemleak_ignore(user_header);
  212. BUG_ON(!user_header);
  213. BUG_ON(!setup_userns_sysctls(&init_user_ns));
  214. #endif
  215. return 0;
  216. }
  217. subsys_initcall(user_namespace_sysctl_init);