shm.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/shm.h>
  27. #include <linux/init.h>
  28. #include <linux/file.h>
  29. #include <linux/mman.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/audit.h>
  34. #include <linux/capability.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/seq_file.h>
  37. #include <linux/rwsem.h>
  38. #include <linux/nsproxy.h>
  39. #include <linux/mount.h>
  40. #include <linux/ipc_namespace.h>
  41. #include <asm/uaccess.h>
  42. #include "util.h"
  43. struct shm_file_data {
  44. int id;
  45. struct ipc_namespace *ns;
  46. struct file *file;
  47. const struct vm_operations_struct *vm_ops;
  48. };
  49. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  50. static const struct file_operations shm_file_operations;
  51. static const struct vm_operations_struct shm_vm_ops;
  52. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  53. #define shm_unlock(shp) \
  54. ipc_unlock(&(shp)->shm_perm)
  55. static int newseg(struct ipc_namespace *, struct ipc_params *);
  56. static void shm_open(struct vm_area_struct *vma);
  57. static void shm_close(struct vm_area_struct *vma);
  58. static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
  59. #ifdef CONFIG_PROC_FS
  60. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  61. #endif
  62. void shm_init_ns(struct ipc_namespace *ns)
  63. {
  64. ns->shm_ctlmax = SHMMAX;
  65. ns->shm_ctlall = SHMALL;
  66. ns->shm_ctlmni = SHMMNI;
  67. ns->shm_rmid_forced = 0;
  68. ns->shm_tot = 0;
  69. ipc_init_ids(&shm_ids(ns));
  70. }
  71. /*
  72. * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
  73. * Only shm_ids.rw_mutex remains locked on exit.
  74. */
  75. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  76. {
  77. struct shmid_kernel *shp;
  78. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  79. if (shp->shm_nattch){
  80. shp->shm_perm.mode |= SHM_DEST;
  81. /* Do not find it any more */
  82. shp->shm_perm.key = IPC_PRIVATE;
  83. shm_unlock(shp);
  84. } else
  85. shm_destroy(ns, shp);
  86. }
  87. #ifdef CONFIG_IPC_NS
  88. void shm_exit_ns(struct ipc_namespace *ns)
  89. {
  90. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  91. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  92. }
  93. #endif
  94. static int __init ipc_ns_init(void)
  95. {
  96. shm_init_ns(&init_ipc_ns);
  97. return 0;
  98. }
  99. pure_initcall(ipc_ns_init);
  100. void __init shm_init (void)
  101. {
  102. ipc_init_proc_interface("sysvipc/shm",
  103. #if BITS_PER_LONG <= 32
  104. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  105. #else
  106. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  107. #endif
  108. IPC_SHM_IDS, sysvipc_shm_proc_show);
  109. }
  110. /*
  111. * shm_lock_(check_) routines are called in the paths where the rw_mutex
  112. * is not necessarily held.
  113. */
  114. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  115. {
  116. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  117. if (IS_ERR(ipcp))
  118. return (struct shmid_kernel *)ipcp;
  119. return container_of(ipcp, struct shmid_kernel, shm_perm);
  120. }
  121. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  122. {
  123. rcu_read_lock();
  124. spin_lock(&ipcp->shm_perm.lock);
  125. }
  126. static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
  127. int id)
  128. {
  129. struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
  130. if (IS_ERR(ipcp))
  131. return (struct shmid_kernel *)ipcp;
  132. return container_of(ipcp, struct shmid_kernel, shm_perm);
  133. }
  134. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  135. {
  136. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  137. }
  138. /* This is called by fork, once for every shm attach. */
  139. static void shm_open(struct vm_area_struct *vma)
  140. {
  141. struct file *file = vma->vm_file;
  142. struct shm_file_data *sfd = shm_file_data(file);
  143. struct shmid_kernel *shp;
  144. shp = shm_lock(sfd->ns, sfd->id);
  145. BUG_ON(IS_ERR(shp));
  146. shp->shm_atim = get_seconds();
  147. shp->shm_lprid = task_tgid_vnr(current);
  148. shp->shm_nattch++;
  149. shm_unlock(shp);
  150. }
  151. /*
  152. * shm_destroy - free the struct shmid_kernel
  153. *
  154. * @ns: namespace
  155. * @shp: struct to free
  156. *
  157. * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
  158. * but returns with shp unlocked and freed.
  159. */
  160. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  161. {
  162. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  163. shm_rmid(ns, shp);
  164. shm_unlock(shp);
  165. if (!is_file_hugepages(shp->shm_file))
  166. shmem_lock(shp->shm_file, 0, shp->mlock_user);
  167. else if (shp->mlock_user)
  168. user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
  169. shp->mlock_user);
  170. fput (shp->shm_file);
  171. security_shm_free(shp);
  172. ipc_rcu_putref(shp);
  173. }
  174. /*
  175. * shm_may_destroy - identifies whether shm segment should be destroyed now
  176. *
  177. * Returns true if and only if there are no active users of the segment and
  178. * one of the following is true:
  179. *
  180. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  181. *
  182. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  183. */
  184. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  185. {
  186. return (shp->shm_nattch == 0) &&
  187. (ns->shm_rmid_forced ||
  188. (shp->shm_perm.mode & SHM_DEST));
  189. }
  190. /*
  191. * remove the attach descriptor vma.
  192. * free memory for segment if it is marked destroyed.
  193. * The descriptor has already been removed from the current->mm->mmap list
  194. * and will later be kfree()d.
  195. */
  196. static void shm_close(struct vm_area_struct *vma)
  197. {
  198. struct file * file = vma->vm_file;
  199. struct shm_file_data *sfd = shm_file_data(file);
  200. struct shmid_kernel *shp;
  201. struct ipc_namespace *ns = sfd->ns;
  202. down_write(&shm_ids(ns).rw_mutex);
  203. /* remove from the list of attaches of the shm segment */
  204. shp = shm_lock(ns, sfd->id);
  205. BUG_ON(IS_ERR(shp));
  206. shp->shm_lprid = task_tgid_vnr(current);
  207. shp->shm_dtim = get_seconds();
  208. shp->shm_nattch--;
  209. if (shm_may_destroy(ns, shp))
  210. shm_destroy(ns, shp);
  211. else
  212. shm_unlock(shp);
  213. up_write(&shm_ids(ns).rw_mutex);
  214. }
  215. /* Called with ns->shm_ids(ns).rw_mutex locked */
  216. static int shm_try_destroy_current(int id, void *p, void *data)
  217. {
  218. struct ipc_namespace *ns = data;
  219. struct kern_ipc_perm *ipcp = p;
  220. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  221. if (shp->shm_creator != current)
  222. return 0;
  223. /*
  224. * Mark it as orphaned to destroy the segment when
  225. * kernel.shm_rmid_forced is changed.
  226. * It is noop if the following shm_may_destroy() returns true.
  227. */
  228. shp->shm_creator = NULL;
  229. /*
  230. * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
  231. * is not set, it shouldn't be deleted here.
  232. */
  233. if (!ns->shm_rmid_forced)
  234. return 0;
  235. if (shm_may_destroy(ns, shp)) {
  236. shm_lock_by_ptr(shp);
  237. shm_destroy(ns, shp);
  238. }
  239. return 0;
  240. }
  241. /* Called with ns->shm_ids(ns).rw_mutex locked */
  242. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  243. {
  244. struct ipc_namespace *ns = data;
  245. struct kern_ipc_perm *ipcp = p;
  246. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  247. /*
  248. * We want to destroy segments without users and with already
  249. * exit'ed originating process.
  250. *
  251. * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
  252. */
  253. if (shp->shm_creator != NULL)
  254. return 0;
  255. if (shm_may_destroy(ns, shp)) {
  256. shm_lock_by_ptr(shp);
  257. shm_destroy(ns, shp);
  258. }
  259. return 0;
  260. }
  261. void shm_destroy_orphaned(struct ipc_namespace *ns)
  262. {
  263. down_write(&shm_ids(ns).rw_mutex);
  264. if (shm_ids(ns).in_use)
  265. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  266. up_write(&shm_ids(ns).rw_mutex);
  267. }
  268. void exit_shm(struct task_struct *task)
  269. {
  270. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  271. if (shm_ids(ns).in_use == 0)
  272. return;
  273. /* Destroy all already created segments, but not mapped yet */
  274. down_write(&shm_ids(ns).rw_mutex);
  275. if (shm_ids(ns).in_use)
  276. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
  277. up_write(&shm_ids(ns).rw_mutex);
  278. }
  279. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  280. {
  281. struct file *file = vma->vm_file;
  282. struct shm_file_data *sfd = shm_file_data(file);
  283. return sfd->vm_ops->fault(vma, vmf);
  284. }
  285. #ifdef CONFIG_NUMA
  286. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  287. {
  288. struct file *file = vma->vm_file;
  289. struct shm_file_data *sfd = shm_file_data(file);
  290. int err = 0;
  291. if (sfd->vm_ops->set_policy)
  292. err = sfd->vm_ops->set_policy(vma, new);
  293. return err;
  294. }
  295. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  296. unsigned long addr)
  297. {
  298. struct file *file = vma->vm_file;
  299. struct shm_file_data *sfd = shm_file_data(file);
  300. struct mempolicy *pol = NULL;
  301. if (sfd->vm_ops->get_policy)
  302. pol = sfd->vm_ops->get_policy(vma, addr);
  303. else if (vma->vm_policy)
  304. pol = vma->vm_policy;
  305. return pol;
  306. }
  307. #endif
  308. static int shm_mmap(struct file * file, struct vm_area_struct * vma)
  309. {
  310. struct shm_file_data *sfd = shm_file_data(file);
  311. int ret;
  312. ret = sfd->file->f_op->mmap(sfd->file, vma);
  313. if (ret != 0)
  314. return ret;
  315. sfd->vm_ops = vma->vm_ops;
  316. #ifdef CONFIG_MMU
  317. BUG_ON(!sfd->vm_ops->fault);
  318. #endif
  319. vma->vm_ops = &shm_vm_ops;
  320. shm_open(vma);
  321. return ret;
  322. }
  323. static int shm_release(struct inode *ino, struct file *file)
  324. {
  325. struct shm_file_data *sfd = shm_file_data(file);
  326. put_ipc_ns(sfd->ns);
  327. shm_file_data(file) = NULL;
  328. kfree(sfd);
  329. return 0;
  330. }
  331. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  332. {
  333. struct shm_file_data *sfd = shm_file_data(file);
  334. if (!sfd->file->f_op->fsync)
  335. return -EINVAL;
  336. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  337. }
  338. static unsigned long shm_get_unmapped_area(struct file *file,
  339. unsigned long addr, unsigned long len, unsigned long pgoff,
  340. unsigned long flags)
  341. {
  342. struct shm_file_data *sfd = shm_file_data(file);
  343. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  344. pgoff, flags);
  345. }
  346. static const struct file_operations shm_file_operations = {
  347. .mmap = shm_mmap,
  348. .fsync = shm_fsync,
  349. .release = shm_release,
  350. #ifndef CONFIG_MMU
  351. .get_unmapped_area = shm_get_unmapped_area,
  352. #endif
  353. .llseek = noop_llseek,
  354. };
  355. static const struct file_operations shm_file_operations_huge = {
  356. .mmap = shm_mmap,
  357. .fsync = shm_fsync,
  358. .release = shm_release,
  359. .get_unmapped_area = shm_get_unmapped_area,
  360. .llseek = noop_llseek,
  361. };
  362. int is_file_shm_hugepages(struct file *file)
  363. {
  364. return file->f_op == &shm_file_operations_huge;
  365. }
  366. static const struct vm_operations_struct shm_vm_ops = {
  367. .open = shm_open, /* callback for a new vm-area open */
  368. .close = shm_close, /* callback for when the vm-area is released */
  369. .fault = shm_fault,
  370. #if defined(CONFIG_NUMA)
  371. .set_policy = shm_set_policy,
  372. .get_policy = shm_get_policy,
  373. #endif
  374. };
  375. /**
  376. * newseg - Create a new shared memory segment
  377. * @ns: namespace
  378. * @params: ptr to the structure that contains key, size and shmflg
  379. *
  380. * Called with shm_ids.rw_mutex held as a writer.
  381. */
  382. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  383. {
  384. key_t key = params->key;
  385. int shmflg = params->flg;
  386. size_t size = params->u.size;
  387. int error;
  388. struct shmid_kernel *shp;
  389. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  390. struct file * file;
  391. char name[13];
  392. int id;
  393. vm_flags_t acctflag = 0;
  394. if (size < SHMMIN || size > ns->shm_ctlmax)
  395. return -EINVAL;
  396. if (ns->shm_tot + numpages > ns->shm_ctlall)
  397. return -ENOSPC;
  398. shp = ipc_rcu_alloc(sizeof(*shp));
  399. if (!shp)
  400. return -ENOMEM;
  401. shp->shm_perm.key = key;
  402. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  403. shp->mlock_user = NULL;
  404. shp->shm_perm.security = NULL;
  405. error = security_shm_alloc(shp);
  406. if (error) {
  407. ipc_rcu_putref(shp);
  408. return error;
  409. }
  410. sprintf (name, "SYSV%08x", key);
  411. if (shmflg & SHM_HUGETLB) {
  412. size_t hugesize = ALIGN(size, huge_page_size(&default_hstate));
  413. /* hugetlb_file_setup applies strict accounting */
  414. if (shmflg & SHM_NORESERVE)
  415. acctflag = VM_NORESERVE;
  416. file = hugetlb_file_setup(name, hugesize, acctflag,
  417. &shp->mlock_user, HUGETLB_SHMFS_INODE);
  418. } else {
  419. /*
  420. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  421. * if it's asked for.
  422. */
  423. if ((shmflg & SHM_NORESERVE) &&
  424. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  425. acctflag = VM_NORESERVE;
  426. file = shmem_file_setup(name, size, acctflag);
  427. }
  428. error = PTR_ERR(file);
  429. if (IS_ERR(file))
  430. goto no_file;
  431. shp->shm_cprid = task_tgid_vnr(current);
  432. shp->shm_lprid = 0;
  433. shp->shm_atim = shp->shm_dtim = 0;
  434. shp->shm_ctim = get_seconds();
  435. shp->shm_segsz = size;
  436. shp->shm_nattch = 0;
  437. shp->shm_file = file;
  438. shp->shm_creator = current;
  439. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  440. if (id < 0) {
  441. error = id;
  442. goto no_id;
  443. }
  444. /*
  445. * shmid gets reported as "inode#" in /proc/pid/maps.
  446. * proc-ps tools use this. Changing this will break them.
  447. */
  448. file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
  449. ns->shm_tot += numpages;
  450. error = shp->shm_perm.id;
  451. shm_unlock(shp);
  452. return error;
  453. no_id:
  454. if (is_file_hugepages(file) && shp->mlock_user)
  455. user_shm_unlock(size, shp->mlock_user);
  456. fput(file);
  457. no_file:
  458. security_shm_free(shp);
  459. ipc_rcu_putref(shp);
  460. return error;
  461. }
  462. /*
  463. * Called with shm_ids.rw_mutex and ipcp locked.
  464. */
  465. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  466. {
  467. struct shmid_kernel *shp;
  468. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  469. return security_shm_associate(shp, shmflg);
  470. }
  471. /*
  472. * Called with shm_ids.rw_mutex and ipcp locked.
  473. */
  474. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  475. struct ipc_params *params)
  476. {
  477. struct shmid_kernel *shp;
  478. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  479. if (shp->shm_segsz < params->u.size)
  480. return -EINVAL;
  481. return 0;
  482. }
  483. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  484. {
  485. struct ipc_namespace *ns;
  486. struct ipc_ops shm_ops;
  487. struct ipc_params shm_params;
  488. ns = current->nsproxy->ipc_ns;
  489. shm_ops.getnew = newseg;
  490. shm_ops.associate = shm_security;
  491. shm_ops.more_checks = shm_more_checks;
  492. shm_params.key = key;
  493. shm_params.flg = shmflg;
  494. shm_params.u.size = size;
  495. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  496. }
  497. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  498. {
  499. switch(version) {
  500. case IPC_64:
  501. return copy_to_user(buf, in, sizeof(*in));
  502. case IPC_OLD:
  503. {
  504. struct shmid_ds out;
  505. memset(&out, 0, sizeof(out));
  506. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  507. out.shm_segsz = in->shm_segsz;
  508. out.shm_atime = in->shm_atime;
  509. out.shm_dtime = in->shm_dtime;
  510. out.shm_ctime = in->shm_ctime;
  511. out.shm_cpid = in->shm_cpid;
  512. out.shm_lpid = in->shm_lpid;
  513. out.shm_nattch = in->shm_nattch;
  514. return copy_to_user(buf, &out, sizeof(out));
  515. }
  516. default:
  517. return -EINVAL;
  518. }
  519. }
  520. static inline unsigned long
  521. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  522. {
  523. switch(version) {
  524. case IPC_64:
  525. if (copy_from_user(out, buf, sizeof(*out)))
  526. return -EFAULT;
  527. return 0;
  528. case IPC_OLD:
  529. {
  530. struct shmid_ds tbuf_old;
  531. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  532. return -EFAULT;
  533. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  534. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  535. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  536. return 0;
  537. }
  538. default:
  539. return -EINVAL;
  540. }
  541. }
  542. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  543. {
  544. switch(version) {
  545. case IPC_64:
  546. return copy_to_user(buf, in, sizeof(*in));
  547. case IPC_OLD:
  548. {
  549. struct shminfo out;
  550. if(in->shmmax > INT_MAX)
  551. out.shmmax = INT_MAX;
  552. else
  553. out.shmmax = (int)in->shmmax;
  554. out.shmmin = in->shmmin;
  555. out.shmmni = in->shmmni;
  556. out.shmseg = in->shmseg;
  557. out.shmall = in->shmall;
  558. return copy_to_user(buf, &out, sizeof(out));
  559. }
  560. default:
  561. return -EINVAL;
  562. }
  563. }
  564. /*
  565. * Calculate and add used RSS and swap pages of a shm.
  566. * Called with shm_ids.rw_mutex held as a reader
  567. */
  568. static void shm_add_rss_swap(struct shmid_kernel *shp,
  569. unsigned long *rss_add, unsigned long *swp_add)
  570. {
  571. struct inode *inode;
  572. inode = shp->shm_file->f_path.dentry->d_inode;
  573. if (is_file_hugepages(shp->shm_file)) {
  574. struct address_space *mapping = inode->i_mapping;
  575. struct hstate *h = hstate_file(shp->shm_file);
  576. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  577. } else {
  578. #ifdef CONFIG_SHMEM
  579. struct shmem_inode_info *info = SHMEM_I(inode);
  580. spin_lock(&info->lock);
  581. *rss_add += inode->i_mapping->nrpages;
  582. *swp_add += info->swapped;
  583. spin_unlock(&info->lock);
  584. #else
  585. *rss_add += inode->i_mapping->nrpages;
  586. #endif
  587. }
  588. }
  589. /*
  590. * Called with shm_ids.rw_mutex held as a reader
  591. */
  592. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  593. unsigned long *swp)
  594. {
  595. int next_id;
  596. int total, in_use;
  597. *rss = 0;
  598. *swp = 0;
  599. in_use = shm_ids(ns).in_use;
  600. for (total = 0, next_id = 0; total < in_use; next_id++) {
  601. struct kern_ipc_perm *ipc;
  602. struct shmid_kernel *shp;
  603. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  604. if (ipc == NULL)
  605. continue;
  606. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  607. shm_add_rss_swap(shp, rss, swp);
  608. total++;
  609. }
  610. }
  611. /*
  612. * This function handles some shmctl commands which require the rw_mutex
  613. * to be held in write mode.
  614. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  615. */
  616. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  617. struct shmid_ds __user *buf, int version)
  618. {
  619. struct kern_ipc_perm *ipcp;
  620. struct shmid64_ds shmid64;
  621. struct shmid_kernel *shp;
  622. int err;
  623. if (cmd == IPC_SET) {
  624. if (copy_shmid_from_user(&shmid64, buf, version))
  625. return -EFAULT;
  626. }
  627. ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
  628. &shmid64.shm_perm, 0);
  629. if (IS_ERR(ipcp))
  630. return PTR_ERR(ipcp);
  631. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  632. err = security_shm_shmctl(shp, cmd);
  633. if (err)
  634. goto out_unlock;
  635. switch (cmd) {
  636. case IPC_RMID:
  637. do_shm_rmid(ns, ipcp);
  638. goto out_up;
  639. case IPC_SET:
  640. ipc_update_perm(&shmid64.shm_perm, ipcp);
  641. shp->shm_ctim = get_seconds();
  642. break;
  643. default:
  644. err = -EINVAL;
  645. }
  646. out_unlock:
  647. shm_unlock(shp);
  648. out_up:
  649. up_write(&shm_ids(ns).rw_mutex);
  650. return err;
  651. }
  652. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  653. {
  654. struct shmid_kernel *shp;
  655. int err, version;
  656. struct ipc_namespace *ns;
  657. if (cmd < 0 || shmid < 0) {
  658. err = -EINVAL;
  659. goto out;
  660. }
  661. version = ipc_parse_version(&cmd);
  662. ns = current->nsproxy->ipc_ns;
  663. switch (cmd) { /* replace with proc interface ? */
  664. case IPC_INFO:
  665. {
  666. struct shminfo64 shminfo;
  667. err = security_shm_shmctl(NULL, cmd);
  668. if (err)
  669. return err;
  670. memset(&shminfo, 0, sizeof(shminfo));
  671. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  672. shminfo.shmmax = ns->shm_ctlmax;
  673. shminfo.shmall = ns->shm_ctlall;
  674. shminfo.shmmin = SHMMIN;
  675. if(copy_shminfo_to_user (buf, &shminfo, version))
  676. return -EFAULT;
  677. down_read(&shm_ids(ns).rw_mutex);
  678. err = ipc_get_maxid(&shm_ids(ns));
  679. up_read(&shm_ids(ns).rw_mutex);
  680. if(err<0)
  681. err = 0;
  682. goto out;
  683. }
  684. case SHM_INFO:
  685. {
  686. struct shm_info shm_info;
  687. err = security_shm_shmctl(NULL, cmd);
  688. if (err)
  689. return err;
  690. memset(&shm_info, 0, sizeof(shm_info));
  691. down_read(&shm_ids(ns).rw_mutex);
  692. shm_info.used_ids = shm_ids(ns).in_use;
  693. shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
  694. shm_info.shm_tot = ns->shm_tot;
  695. shm_info.swap_attempts = 0;
  696. shm_info.swap_successes = 0;
  697. err = ipc_get_maxid(&shm_ids(ns));
  698. up_read(&shm_ids(ns).rw_mutex);
  699. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  700. err = -EFAULT;
  701. goto out;
  702. }
  703. err = err < 0 ? 0 : err;
  704. goto out;
  705. }
  706. case SHM_STAT:
  707. case IPC_STAT:
  708. {
  709. struct shmid64_ds tbuf;
  710. int result;
  711. if (cmd == SHM_STAT) {
  712. shp = shm_lock(ns, shmid);
  713. if (IS_ERR(shp)) {
  714. err = PTR_ERR(shp);
  715. goto out;
  716. }
  717. result = shp->shm_perm.id;
  718. } else {
  719. shp = shm_lock_check(ns, shmid);
  720. if (IS_ERR(shp)) {
  721. err = PTR_ERR(shp);
  722. goto out;
  723. }
  724. result = 0;
  725. }
  726. err = -EACCES;
  727. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  728. goto out_unlock;
  729. err = security_shm_shmctl(shp, cmd);
  730. if (err)
  731. goto out_unlock;
  732. memset(&tbuf, 0, sizeof(tbuf));
  733. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  734. tbuf.shm_segsz = shp->shm_segsz;
  735. tbuf.shm_atime = shp->shm_atim;
  736. tbuf.shm_dtime = shp->shm_dtim;
  737. tbuf.shm_ctime = shp->shm_ctim;
  738. tbuf.shm_cpid = shp->shm_cprid;
  739. tbuf.shm_lpid = shp->shm_lprid;
  740. tbuf.shm_nattch = shp->shm_nattch;
  741. shm_unlock(shp);
  742. if(copy_shmid_to_user (buf, &tbuf, version))
  743. err = -EFAULT;
  744. else
  745. err = result;
  746. goto out;
  747. }
  748. case SHM_LOCK:
  749. case SHM_UNLOCK:
  750. {
  751. struct file *shm_file;
  752. shp = shm_lock_check(ns, shmid);
  753. if (IS_ERR(shp)) {
  754. err = PTR_ERR(shp);
  755. goto out;
  756. }
  757. audit_ipc_obj(&(shp->shm_perm));
  758. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  759. uid_t euid = current_euid();
  760. err = -EPERM;
  761. if (euid != shp->shm_perm.uid &&
  762. euid != shp->shm_perm.cuid)
  763. goto out_unlock;
  764. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
  765. goto out_unlock;
  766. }
  767. err = security_shm_shmctl(shp, cmd);
  768. if (err)
  769. goto out_unlock;
  770. shm_file = shp->shm_file;
  771. if (is_file_hugepages(shm_file))
  772. goto out_unlock;
  773. if (cmd == SHM_LOCK) {
  774. struct user_struct *user = current_user();
  775. err = shmem_lock(shm_file, 1, user);
  776. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  777. shp->shm_perm.mode |= SHM_LOCKED;
  778. shp->mlock_user = user;
  779. }
  780. goto out_unlock;
  781. }
  782. /* SHM_UNLOCK */
  783. if (!(shp->shm_perm.mode & SHM_LOCKED))
  784. goto out_unlock;
  785. shmem_lock(shm_file, 0, shp->mlock_user);
  786. shp->shm_perm.mode &= ~SHM_LOCKED;
  787. shp->mlock_user = NULL;
  788. get_file(shm_file);
  789. shm_unlock(shp);
  790. shmem_unlock_mapping(shm_file->f_mapping);
  791. fput(shm_file);
  792. goto out;
  793. }
  794. case IPC_RMID:
  795. case IPC_SET:
  796. err = shmctl_down(ns, shmid, cmd, buf, version);
  797. return err;
  798. default:
  799. return -EINVAL;
  800. }
  801. out_unlock:
  802. shm_unlock(shp);
  803. out:
  804. return err;
  805. }
  806. /*
  807. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  808. *
  809. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  810. * "raddr" thing points to kernel space, and there has to be a wrapper around
  811. * this.
  812. */
  813. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  814. unsigned long shmlba)
  815. {
  816. struct shmid_kernel *shp;
  817. unsigned long addr;
  818. unsigned long size;
  819. struct file * file;
  820. int err;
  821. unsigned long flags;
  822. unsigned long prot;
  823. int acc_mode;
  824. unsigned long user_addr;
  825. struct ipc_namespace *ns;
  826. struct shm_file_data *sfd;
  827. struct path path;
  828. fmode_t f_mode;
  829. err = -EINVAL;
  830. if (shmid < 0)
  831. goto out;
  832. else if ((addr = (ulong)shmaddr)) {
  833. if (addr & (shmlba - 1)) {
  834. /*
  835. * Round down to the nearest multiple of shmlba.
  836. * For sane do_mmap_pgoff() parameters, avoid
  837. * round downs that trigger nil-page and MAP_FIXED.
  838. */
  839. if ((shmflg & SHM_RND) && addr >= shmlba)
  840. addr &= ~(shmlba - 1);
  841. else
  842. #ifndef __ARCH_FORCE_SHMLBA
  843. if (addr & ~PAGE_MASK)
  844. #endif
  845. goto out;
  846. }
  847. flags = MAP_SHARED | MAP_FIXED;
  848. } else {
  849. if ((shmflg & SHM_REMAP))
  850. goto out;
  851. flags = MAP_SHARED;
  852. }
  853. if (shmflg & SHM_RDONLY) {
  854. prot = PROT_READ;
  855. acc_mode = S_IRUGO;
  856. f_mode = FMODE_READ;
  857. } else {
  858. prot = PROT_READ | PROT_WRITE;
  859. acc_mode = S_IRUGO | S_IWUGO;
  860. f_mode = FMODE_READ | FMODE_WRITE;
  861. }
  862. if (shmflg & SHM_EXEC) {
  863. prot |= PROT_EXEC;
  864. acc_mode |= S_IXUGO;
  865. }
  866. /*
  867. * We cannot rely on the fs check since SYSV IPC does have an
  868. * additional creator id...
  869. */
  870. ns = current->nsproxy->ipc_ns;
  871. shp = shm_lock_check(ns, shmid);
  872. if (IS_ERR(shp)) {
  873. err = PTR_ERR(shp);
  874. goto out;
  875. }
  876. err = -EACCES;
  877. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  878. goto out_unlock;
  879. err = security_shm_shmat(shp, shmaddr, shmflg);
  880. if (err)
  881. goto out_unlock;
  882. path = shp->shm_file->f_path;
  883. path_get(&path);
  884. shp->shm_nattch++;
  885. size = i_size_read(path.dentry->d_inode);
  886. shm_unlock(shp);
  887. err = -ENOMEM;
  888. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  889. if (!sfd)
  890. goto out_put_dentry;
  891. file = alloc_file(&path, f_mode,
  892. is_file_hugepages(shp->shm_file) ?
  893. &shm_file_operations_huge :
  894. &shm_file_operations);
  895. if (!file)
  896. goto out_free;
  897. file->private_data = sfd;
  898. file->f_mapping = shp->shm_file->f_mapping;
  899. sfd->id = shp->shm_perm.id;
  900. sfd->ns = get_ipc_ns(ns);
  901. sfd->file = shp->shm_file;
  902. sfd->vm_ops = NULL;
  903. down_write(&current->mm->mmap_sem);
  904. if (addr && !(shmflg & SHM_REMAP)) {
  905. err = -EINVAL;
  906. if (find_vma_intersection(current->mm, addr, addr + size))
  907. goto invalid;
  908. /*
  909. * If shm segment goes below stack, make sure there is some
  910. * space left for the stack to grow (at least 4 pages).
  911. */
  912. if (addr < current->mm->start_stack &&
  913. addr > current->mm->start_stack - size - PAGE_SIZE * 5)
  914. goto invalid;
  915. }
  916. user_addr = do_mmap (file, addr, size, prot, flags, 0);
  917. *raddr = user_addr;
  918. err = 0;
  919. if (IS_ERR_VALUE(user_addr))
  920. err = (long)user_addr;
  921. invalid:
  922. up_write(&current->mm->mmap_sem);
  923. fput(file);
  924. out_nattch:
  925. down_write(&shm_ids(ns).rw_mutex);
  926. shp = shm_lock(ns, shmid);
  927. BUG_ON(IS_ERR(shp));
  928. shp->shm_nattch--;
  929. if (shm_may_destroy(ns, shp))
  930. shm_destroy(ns, shp);
  931. else
  932. shm_unlock(shp);
  933. up_write(&shm_ids(ns).rw_mutex);
  934. out:
  935. return err;
  936. out_unlock:
  937. shm_unlock(shp);
  938. goto out;
  939. out_free:
  940. kfree(sfd);
  941. out_put_dentry:
  942. path_put(&path);
  943. goto out_nattch;
  944. }
  945. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  946. {
  947. unsigned long ret;
  948. long err;
  949. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  950. if (err)
  951. return err;
  952. force_successful_syscall_return();
  953. return (long)ret;
  954. }
  955. /*
  956. * detach and kill segment if marked destroyed.
  957. * The work is done in shm_close.
  958. */
  959. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  960. {
  961. struct mm_struct *mm = current->mm;
  962. struct vm_area_struct *vma;
  963. unsigned long addr = (unsigned long)shmaddr;
  964. int retval = -EINVAL;
  965. #ifdef CONFIG_MMU
  966. loff_t size = 0;
  967. struct vm_area_struct *next;
  968. #endif
  969. if (addr & ~PAGE_MASK)
  970. return retval;
  971. down_write(&mm->mmap_sem);
  972. /*
  973. * This function tries to be smart and unmap shm segments that
  974. * were modified by partial mlock or munmap calls:
  975. * - It first determines the size of the shm segment that should be
  976. * unmapped: It searches for a vma that is backed by shm and that
  977. * started at address shmaddr. It records it's size and then unmaps
  978. * it.
  979. * - Then it unmaps all shm vmas that started at shmaddr and that
  980. * are within the initially determined size.
  981. * Errors from do_munmap are ignored: the function only fails if
  982. * it's called with invalid parameters or if it's called to unmap
  983. * a part of a vma. Both calls in this function are for full vmas,
  984. * the parameters are directly copied from the vma itself and always
  985. * valid - therefore do_munmap cannot fail. (famous last words?)
  986. */
  987. /*
  988. * If it had been mremap()'d, the starting address would not
  989. * match the usual checks anyway. So assume all vma's are
  990. * above the starting address given.
  991. */
  992. vma = find_vma(mm, addr);
  993. #ifdef CONFIG_MMU
  994. while (vma) {
  995. next = vma->vm_next;
  996. /*
  997. * Check if the starting address would match, i.e. it's
  998. * a fragment created by mprotect() and/or munmap(), or it
  999. * otherwise it starts at this address with no hassles.
  1000. */
  1001. if ((vma->vm_ops == &shm_vm_ops) &&
  1002. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1003. size = vma->vm_file->f_path.dentry->d_inode->i_size;
  1004. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1005. /*
  1006. * We discovered the size of the shm segment, so
  1007. * break out of here and fall through to the next
  1008. * loop that uses the size information to stop
  1009. * searching for matching vma's.
  1010. */
  1011. retval = 0;
  1012. vma = next;
  1013. break;
  1014. }
  1015. vma = next;
  1016. }
  1017. /*
  1018. * We need look no further than the maximum address a fragment
  1019. * could possibly have landed at. Also cast things to loff_t to
  1020. * prevent overflows and make comparisons vs. equal-width types.
  1021. */
  1022. size = PAGE_ALIGN(size);
  1023. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1024. next = vma->vm_next;
  1025. /* finding a matching vma now does not alter retval */
  1026. if ((vma->vm_ops == &shm_vm_ops) &&
  1027. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
  1028. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1029. vma = next;
  1030. }
  1031. #else /* CONFIG_MMU */
  1032. /* under NOMMU conditions, the exact address to be destroyed must be
  1033. * given */
  1034. retval = -EINVAL;
  1035. if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1036. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1037. retval = 0;
  1038. }
  1039. #endif
  1040. up_write(&mm->mmap_sem);
  1041. return retval;
  1042. }
  1043. #ifdef CONFIG_PROC_FS
  1044. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1045. {
  1046. struct shmid_kernel *shp = it;
  1047. unsigned long rss = 0, swp = 0;
  1048. shm_add_rss_swap(shp, &rss, &swp);
  1049. #if BITS_PER_LONG <= 32
  1050. #define SIZE_SPEC "%10lu"
  1051. #else
  1052. #define SIZE_SPEC "%21lu"
  1053. #endif
  1054. return seq_printf(s,
  1055. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1056. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1057. SIZE_SPEC " " SIZE_SPEC "\n",
  1058. shp->shm_perm.key,
  1059. shp->shm_perm.id,
  1060. shp->shm_perm.mode,
  1061. shp->shm_segsz,
  1062. shp->shm_cprid,
  1063. shp->shm_lprid,
  1064. shp->shm_nattch,
  1065. shp->shm_perm.uid,
  1066. shp->shm_perm.gid,
  1067. shp->shm_perm.cuid,
  1068. shp->shm_perm.cgid,
  1069. shp->shm_atim,
  1070. shp->shm_dtim,
  1071. shp->shm_ctim,
  1072. rss * PAGE_SIZE,
  1073. swp * PAGE_SIZE);
  1074. }
  1075. #endif