pid_namespace.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Pid namespaces
  3. *
  4. * Authors:
  5. * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  6. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  7. * Many thanks to Oleg Nesterov for comments and help
  8. *
  9. */
  10. #include <linux/pid.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/err.h>
  14. #include <linux/acct.h>
  15. #include <linux/slab.h>
  16. #include <linux/proc_fs.h>
  17. #define BITS_PER_PAGE (PAGE_SIZE*8)
  18. struct pid_cache {
  19. int nr_ids;
  20. char name[16];
  21. struct kmem_cache *cachep;
  22. struct list_head list;
  23. };
  24. static LIST_HEAD(pid_caches_lh);
  25. static DEFINE_MUTEX(pid_caches_mutex);
  26. static struct kmem_cache *pid_ns_cachep;
  27. /*
  28. * creates the kmem cache to allocate pids from.
  29. * @nr_ids: the number of numerical ids this pid will have to carry
  30. */
  31. static struct kmem_cache *create_pid_cachep(int nr_ids)
  32. {
  33. struct pid_cache *pcache;
  34. struct kmem_cache *cachep;
  35. mutex_lock(&pid_caches_mutex);
  36. list_for_each_entry(pcache, &pid_caches_lh, list)
  37. if (pcache->nr_ids == nr_ids)
  38. goto out;
  39. pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
  40. if (pcache == NULL)
  41. goto err_alloc;
  42. snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
  43. cachep = kmem_cache_create(pcache->name,
  44. sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
  45. 0, SLAB_HWCACHE_ALIGN, NULL);
  46. if (cachep == NULL)
  47. goto err_cachep;
  48. pcache->nr_ids = nr_ids;
  49. pcache->cachep = cachep;
  50. list_add(&pcache->list, &pid_caches_lh);
  51. out:
  52. mutex_unlock(&pid_caches_mutex);
  53. return pcache->cachep;
  54. err_cachep:
  55. kfree(pcache);
  56. err_alloc:
  57. mutex_unlock(&pid_caches_mutex);
  58. return NULL;
  59. }
  60. static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
  61. {
  62. struct pid_namespace *ns;
  63. unsigned int level = parent_pid_ns->level + 1;
  64. int i, err = -ENOMEM;
  65. ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
  66. if (ns == NULL)
  67. goto out;
  68. ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  69. if (!ns->pidmap[0].page)
  70. goto out_free;
  71. ns->pid_cachep = create_pid_cachep(level + 1);
  72. if (ns->pid_cachep == NULL)
  73. goto out_free_map;
  74. kref_init(&ns->kref);
  75. ns->level = level;
  76. ns->parent = get_pid_ns(parent_pid_ns);
  77. set_bit(0, ns->pidmap[0].page);
  78. atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
  79. for (i = 1; i < PIDMAP_ENTRIES; i++)
  80. atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
  81. err = pid_ns_prepare_proc(ns);
  82. if (err)
  83. goto out_put_parent_pid_ns;
  84. return ns;
  85. out_put_parent_pid_ns:
  86. put_pid_ns(parent_pid_ns);
  87. out_free_map:
  88. kfree(ns->pidmap[0].page);
  89. out_free:
  90. kmem_cache_free(pid_ns_cachep, ns);
  91. out:
  92. return ERR_PTR(err);
  93. }
  94. static void destroy_pid_namespace(struct pid_namespace *ns)
  95. {
  96. int i;
  97. for (i = 0; i < PIDMAP_ENTRIES; i++)
  98. kfree(ns->pidmap[i].page);
  99. kmem_cache_free(pid_ns_cachep, ns);
  100. }
  101. struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
  102. {
  103. if (!(flags & CLONE_NEWPID))
  104. return get_pid_ns(old_ns);
  105. if (flags & (CLONE_THREAD|CLONE_PARENT))
  106. return ERR_PTR(-EINVAL);
  107. return create_pid_namespace(old_ns);
  108. }
  109. void free_pid_ns(struct kref *kref)
  110. {
  111. struct pid_namespace *ns, *parent;
  112. ns = container_of(kref, struct pid_namespace, kref);
  113. parent = ns->parent;
  114. destroy_pid_namespace(ns);
  115. if (parent != NULL)
  116. put_pid_ns(parent);
  117. }
  118. void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  119. {
  120. int nr;
  121. int rc;
  122. struct task_struct *task;
  123. /*
  124. * The last thread in the cgroup-init thread group is terminating.
  125. * Find remaining pid_ts in the namespace, signal and wait for them
  126. * to exit.
  127. *
  128. * Note: This signals each threads in the namespace - even those that
  129. * belong to the same thread group, To avoid this, we would have
  130. * to walk the entire tasklist looking a processes in this
  131. * namespace, but that could be unnecessarily expensive if the
  132. * pid namespace has just a few processes. Or we need to
  133. * maintain a tasklist for each pid namespace.
  134. *
  135. */
  136. read_lock(&tasklist_lock);
  137. nr = next_pidmap(pid_ns, 1);
  138. while (nr > 0) {
  139. rcu_read_lock();
  140. /*
  141. * Any nested-container's init processes won't ignore the
  142. * SEND_SIG_NOINFO signal, see send_signal()->si_fromuser().
  143. */
  144. task = pid_task(find_vpid(nr), PIDTYPE_PID);
  145. if (task)
  146. send_sig_info(SIGKILL, SEND_SIG_NOINFO, task);
  147. rcu_read_unlock();
  148. nr = next_pidmap(pid_ns, nr);
  149. }
  150. read_unlock(&tasklist_lock);
  151. do {
  152. clear_thread_flag(TIF_SIGPENDING);
  153. rc = sys_wait4(-1, NULL, __WALL, NULL);
  154. } while (rc != -ECHILD);
  155. acct_exit_ns(pid_ns);
  156. return;
  157. }
  158. static __init int pid_namespaces_init(void)
  159. {
  160. pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
  161. return 0;
  162. }
  163. __initcall(pid_namespaces_init);