msg.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. /*
  2. * linux/ipc/msg.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Removed all the remaining kerneld mess
  6. * Catch the -EFAULT stuff properly
  7. * Use GFP_KERNEL for messages as in 1.2
  8. * Fixed up the unchecked user space derefs
  9. * Copyright (C) 1998 Alan Cox & Andi Kleen
  10. *
  11. * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  12. *
  13. * mostly rewritten, threaded and wake-one semantics added
  14. * MSGMAX limit removed, sysctl's added
  15. * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  16. *
  17. * support for audit of ipc object properties and permission changes
  18. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19. *
  20. * namespaces support
  21. * OpenVZ, SWsoft Inc.
  22. * Pavel Emelianov <xemul@openvz.org>
  23. */
  24. #include <linux/capability.h>
  25. #include <linux/msg.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/init.h>
  28. #include <linux/mm.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/list.h>
  31. #include <linux/security.h>
  32. #include <linux/sched.h>
  33. #include <linux/syscalls.h>
  34. #include <linux/audit.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/nsproxy.h>
  38. #include <linux/ipc_namespace.h>
  39. #include <asm/current.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. /*
  43. * one msg_receiver structure for each sleeping receiver:
  44. */
  45. struct msg_receiver {
  46. struct list_head r_list;
  47. struct task_struct *r_tsk;
  48. int r_mode;
  49. long r_msgtype;
  50. long r_maxsize;
  51. struct msg_msg *volatile r_msg;
  52. };
  53. /* one msg_sender for each sleeping sender */
  54. struct msg_sender {
  55. struct list_head list;
  56. struct task_struct *tsk;
  57. };
  58. #define SEARCH_ANY 1
  59. #define SEARCH_EQUAL 2
  60. #define SEARCH_NOTEQUAL 3
  61. #define SEARCH_LESSEQUAL 4
  62. #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
  63. #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
  64. static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
  65. static int newque(struct ipc_namespace *, struct ipc_params *);
  66. #ifdef CONFIG_PROC_FS
  67. static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
  68. #endif
  69. /*
  70. * Scale msgmni with the available lowmem size: the memory dedicated to msg
  71. * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
  72. * Also take into account the number of nsproxies created so far.
  73. * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
  74. */
  75. void recompute_msgmni(struct ipc_namespace *ns)
  76. {
  77. struct sysinfo i;
  78. unsigned long allowed;
  79. int nb_ns;
  80. si_meminfo(&i);
  81. allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
  82. / MSGMNB;
  83. nb_ns = atomic_read(&nr_ipc_ns);
  84. allowed /= nb_ns;
  85. if (allowed < MSGMNI) {
  86. ns->msg_ctlmni = MSGMNI;
  87. return;
  88. }
  89. if (allowed > IPCMNI / nb_ns) {
  90. ns->msg_ctlmni = IPCMNI / nb_ns;
  91. return;
  92. }
  93. ns->msg_ctlmni = allowed;
  94. }
  95. void msg_init_ns(struct ipc_namespace *ns)
  96. {
  97. ns->msg_ctlmax = MSGMAX;
  98. ns->msg_ctlmnb = MSGMNB;
  99. recompute_msgmni(ns);
  100. atomic_set(&ns->msg_bytes, 0);
  101. atomic_set(&ns->msg_hdrs, 0);
  102. ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
  103. }
  104. #ifdef CONFIG_IPC_NS
  105. void msg_exit_ns(struct ipc_namespace *ns)
  106. {
  107. free_ipcs(ns, &msg_ids(ns), freeque);
  108. idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
  109. }
  110. #endif
  111. void __init msg_init(void)
  112. {
  113. msg_init_ns(&init_ipc_ns);
  114. printk(KERN_INFO "msgmni has been set to %d\n",
  115. init_ipc_ns.msg_ctlmni);
  116. ipc_init_proc_interface("sysvipc/msg",
  117. " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
  118. IPC_MSG_IDS, sysvipc_msg_proc_show);
  119. }
  120. /*
  121. * msg_lock_(check_) routines are called in the paths where the rw_mutex
  122. * is not held.
  123. */
  124. static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
  125. {
  126. struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
  127. if (IS_ERR(ipcp))
  128. return (struct msg_queue *)ipcp;
  129. return container_of(ipcp, struct msg_queue, q_perm);
  130. }
  131. static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
  132. int id)
  133. {
  134. struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
  135. if (IS_ERR(ipcp))
  136. return (struct msg_queue *)ipcp;
  137. return container_of(ipcp, struct msg_queue, q_perm);
  138. }
  139. static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
  140. {
  141. ipc_rmid(&msg_ids(ns), &s->q_perm);
  142. }
  143. /**
  144. * newque - Create a new msg queue
  145. * @ns: namespace
  146. * @params: ptr to the structure that contains the key and msgflg
  147. *
  148. * Called with msg_ids.rw_mutex held (writer)
  149. */
  150. static int newque(struct ipc_namespace *ns, struct ipc_params *params)
  151. {
  152. struct msg_queue *msq;
  153. int id, retval;
  154. key_t key = params->key;
  155. int msgflg = params->flg;
  156. msq = ipc_rcu_alloc(sizeof(*msq));
  157. if (!msq)
  158. return -ENOMEM;
  159. msq->q_perm.mode = msgflg & S_IRWXUGO;
  160. msq->q_perm.key = key;
  161. msq->q_perm.security = NULL;
  162. retval = security_msg_queue_alloc(msq);
  163. if (retval) {
  164. ipc_rcu_putref(msq);
  165. return retval;
  166. }
  167. /*
  168. * ipc_addid() locks msq
  169. */
  170. id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
  171. if (id < 0) {
  172. security_msg_queue_free(msq);
  173. ipc_rcu_putref(msq);
  174. return id;
  175. }
  176. msq->q_stime = msq->q_rtime = 0;
  177. msq->q_ctime = get_seconds();
  178. msq->q_cbytes = msq->q_qnum = 0;
  179. msq->q_qbytes = ns->msg_ctlmnb;
  180. msq->q_lspid = msq->q_lrpid = 0;
  181. INIT_LIST_HEAD(&msq->q_messages);
  182. INIT_LIST_HEAD(&msq->q_receivers);
  183. INIT_LIST_HEAD(&msq->q_senders);
  184. msg_unlock(msq);
  185. return msq->q_perm.id;
  186. }
  187. static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
  188. {
  189. mss->tsk = current;
  190. current->state = TASK_INTERRUPTIBLE;
  191. list_add_tail(&mss->list, &msq->q_senders);
  192. }
  193. static inline void ss_del(struct msg_sender *mss)
  194. {
  195. if (mss->list.next != NULL)
  196. list_del(&mss->list);
  197. }
  198. static void ss_wakeup(struct list_head *h, int kill)
  199. {
  200. struct list_head *tmp;
  201. tmp = h->next;
  202. while (tmp != h) {
  203. struct msg_sender *mss;
  204. mss = list_entry(tmp, struct msg_sender, list);
  205. tmp = tmp->next;
  206. if (kill)
  207. mss->list.next = NULL;
  208. wake_up_process(mss->tsk);
  209. }
  210. }
  211. static void expunge_all(struct msg_queue *msq, int res)
  212. {
  213. struct list_head *tmp;
  214. tmp = msq->q_receivers.next;
  215. while (tmp != &msq->q_receivers) {
  216. struct msg_receiver *msr;
  217. msr = list_entry(tmp, struct msg_receiver, r_list);
  218. tmp = tmp->next;
  219. msr->r_msg = NULL;
  220. wake_up_process(msr->r_tsk);
  221. smp_mb();
  222. msr->r_msg = ERR_PTR(res);
  223. }
  224. }
  225. /*
  226. * freeque() wakes up waiters on the sender and receiver waiting queue,
  227. * removes the message queue from message queue ID IDR, and cleans up all the
  228. * messages associated with this queue.
  229. *
  230. * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
  231. * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
  232. */
  233. static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  234. {
  235. struct list_head *tmp;
  236. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  237. expunge_all(msq, -EIDRM);
  238. ss_wakeup(&msq->q_senders, 1);
  239. msg_rmid(ns, msq);
  240. msg_unlock(msq);
  241. tmp = msq->q_messages.next;
  242. while (tmp != &msq->q_messages) {
  243. struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
  244. tmp = tmp->next;
  245. atomic_dec(&ns->msg_hdrs);
  246. free_msg(msg);
  247. }
  248. atomic_sub(msq->q_cbytes, &ns->msg_bytes);
  249. security_msg_queue_free(msq);
  250. ipc_rcu_putref(msq);
  251. }
  252. /*
  253. * Called with msg_ids.rw_mutex and ipcp locked.
  254. */
  255. static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
  256. {
  257. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  258. return security_msg_queue_associate(msq, msgflg);
  259. }
  260. SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
  261. {
  262. struct ipc_namespace *ns;
  263. struct ipc_ops msg_ops;
  264. struct ipc_params msg_params;
  265. ns = current->nsproxy->ipc_ns;
  266. msg_ops.getnew = newque;
  267. msg_ops.associate = msg_security;
  268. msg_ops.more_checks = NULL;
  269. msg_params.key = key;
  270. msg_params.flg = msgflg;
  271. return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
  272. }
  273. static inline unsigned long
  274. copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
  275. {
  276. switch(version) {
  277. case IPC_64:
  278. return copy_to_user(buf, in, sizeof(*in));
  279. case IPC_OLD:
  280. {
  281. struct msqid_ds out;
  282. memset(&out, 0, sizeof(out));
  283. ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
  284. out.msg_stime = in->msg_stime;
  285. out.msg_rtime = in->msg_rtime;
  286. out.msg_ctime = in->msg_ctime;
  287. if (in->msg_cbytes > USHRT_MAX)
  288. out.msg_cbytes = USHRT_MAX;
  289. else
  290. out.msg_cbytes = in->msg_cbytes;
  291. out.msg_lcbytes = in->msg_cbytes;
  292. if (in->msg_qnum > USHRT_MAX)
  293. out.msg_qnum = USHRT_MAX;
  294. else
  295. out.msg_qnum = in->msg_qnum;
  296. if (in->msg_qbytes > USHRT_MAX)
  297. out.msg_qbytes = USHRT_MAX;
  298. else
  299. out.msg_qbytes = in->msg_qbytes;
  300. out.msg_lqbytes = in->msg_qbytes;
  301. out.msg_lspid = in->msg_lspid;
  302. out.msg_lrpid = in->msg_lrpid;
  303. return copy_to_user(buf, &out, sizeof(out));
  304. }
  305. default:
  306. return -EINVAL;
  307. }
  308. }
  309. static inline unsigned long
  310. copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
  311. {
  312. switch(version) {
  313. case IPC_64:
  314. if (copy_from_user(out, buf, sizeof(*out)))
  315. return -EFAULT;
  316. return 0;
  317. case IPC_OLD:
  318. {
  319. struct msqid_ds tbuf_old;
  320. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  321. return -EFAULT;
  322. out->msg_perm.uid = tbuf_old.msg_perm.uid;
  323. out->msg_perm.gid = tbuf_old.msg_perm.gid;
  324. out->msg_perm.mode = tbuf_old.msg_perm.mode;
  325. if (tbuf_old.msg_qbytes == 0)
  326. out->msg_qbytes = tbuf_old.msg_lqbytes;
  327. else
  328. out->msg_qbytes = tbuf_old.msg_qbytes;
  329. return 0;
  330. }
  331. default:
  332. return -EINVAL;
  333. }
  334. }
  335. /*
  336. * This function handles some msgctl commands which require the rw_mutex
  337. * to be held in write mode.
  338. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  339. */
  340. static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
  341. struct msqid_ds __user *buf, int version)
  342. {
  343. struct kern_ipc_perm *ipcp;
  344. struct msqid64_ds uninitialized_var(msqid64);
  345. struct msg_queue *msq;
  346. int err;
  347. if (cmd == IPC_SET) {
  348. if (copy_msqid_from_user(&msqid64, buf, version))
  349. return -EFAULT;
  350. }
  351. ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
  352. &msqid64.msg_perm, msqid64.msg_qbytes);
  353. if (IS_ERR(ipcp))
  354. return PTR_ERR(ipcp);
  355. msq = container_of(ipcp, struct msg_queue, q_perm);
  356. err = security_msg_queue_msgctl(msq, cmd);
  357. if (err)
  358. goto out_unlock;
  359. switch (cmd) {
  360. case IPC_RMID:
  361. freeque(ns, ipcp);
  362. goto out_up;
  363. case IPC_SET:
  364. if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
  365. !capable(CAP_SYS_RESOURCE)) {
  366. err = -EPERM;
  367. goto out_unlock;
  368. }
  369. msq->q_qbytes = msqid64.msg_qbytes;
  370. ipc_update_perm(&msqid64.msg_perm, ipcp);
  371. msq->q_ctime = get_seconds();
  372. /* sleeping receivers might be excluded by
  373. * stricter permissions.
  374. */
  375. expunge_all(msq, -EAGAIN);
  376. /* sleeping senders might be able to send
  377. * due to a larger queue size.
  378. */
  379. ss_wakeup(&msq->q_senders, 0);
  380. break;
  381. default:
  382. err = -EINVAL;
  383. }
  384. out_unlock:
  385. msg_unlock(msq);
  386. out_up:
  387. up_write(&msg_ids(ns).rw_mutex);
  388. return err;
  389. }
  390. SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
  391. {
  392. struct msg_queue *msq;
  393. int err, version;
  394. struct ipc_namespace *ns;
  395. if (msqid < 0 || cmd < 0)
  396. return -EINVAL;
  397. version = ipc_parse_version(&cmd);
  398. ns = current->nsproxy->ipc_ns;
  399. switch (cmd) {
  400. case IPC_INFO:
  401. case MSG_INFO:
  402. {
  403. struct msginfo msginfo;
  404. int max_id;
  405. if (!buf)
  406. return -EFAULT;
  407. /*
  408. * We must not return kernel stack data.
  409. * due to padding, it's not enough
  410. * to set all member fields.
  411. */
  412. err = security_msg_queue_msgctl(NULL, cmd);
  413. if (err)
  414. return err;
  415. memset(&msginfo, 0, sizeof(msginfo));
  416. msginfo.msgmni = ns->msg_ctlmni;
  417. msginfo.msgmax = ns->msg_ctlmax;
  418. msginfo.msgmnb = ns->msg_ctlmnb;
  419. msginfo.msgssz = MSGSSZ;
  420. msginfo.msgseg = MSGSEG;
  421. down_read(&msg_ids(ns).rw_mutex);
  422. if (cmd == MSG_INFO) {
  423. msginfo.msgpool = msg_ids(ns).in_use;
  424. msginfo.msgmap = atomic_read(&ns->msg_hdrs);
  425. msginfo.msgtql = atomic_read(&ns->msg_bytes);
  426. } else {
  427. msginfo.msgmap = MSGMAP;
  428. msginfo.msgpool = MSGPOOL;
  429. msginfo.msgtql = MSGTQL;
  430. }
  431. max_id = ipc_get_maxid(&msg_ids(ns));
  432. up_read(&msg_ids(ns).rw_mutex);
  433. if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
  434. return -EFAULT;
  435. return (max_id < 0) ? 0 : max_id;
  436. }
  437. case MSG_STAT: /* msqid is an index rather than a msg queue id */
  438. case IPC_STAT:
  439. {
  440. struct msqid64_ds tbuf;
  441. int success_return;
  442. if (!buf)
  443. return -EFAULT;
  444. if (cmd == MSG_STAT) {
  445. msq = msg_lock(ns, msqid);
  446. if (IS_ERR(msq))
  447. return PTR_ERR(msq);
  448. success_return = msq->q_perm.id;
  449. } else {
  450. msq = msg_lock_check(ns, msqid);
  451. if (IS_ERR(msq))
  452. return PTR_ERR(msq);
  453. success_return = 0;
  454. }
  455. err = -EACCES;
  456. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  457. goto out_unlock;
  458. err = security_msg_queue_msgctl(msq, cmd);
  459. if (err)
  460. goto out_unlock;
  461. memset(&tbuf, 0, sizeof(tbuf));
  462. kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
  463. tbuf.msg_stime = msq->q_stime;
  464. tbuf.msg_rtime = msq->q_rtime;
  465. tbuf.msg_ctime = msq->q_ctime;
  466. tbuf.msg_cbytes = msq->q_cbytes;
  467. tbuf.msg_qnum = msq->q_qnum;
  468. tbuf.msg_qbytes = msq->q_qbytes;
  469. tbuf.msg_lspid = msq->q_lspid;
  470. tbuf.msg_lrpid = msq->q_lrpid;
  471. msg_unlock(msq);
  472. if (copy_msqid_to_user(buf, &tbuf, version))
  473. return -EFAULT;
  474. return success_return;
  475. }
  476. case IPC_SET:
  477. case IPC_RMID:
  478. err = msgctl_down(ns, msqid, cmd, buf, version);
  479. return err;
  480. default:
  481. return -EINVAL;
  482. }
  483. out_unlock:
  484. msg_unlock(msq);
  485. return err;
  486. }
  487. static int testmsg(struct msg_msg *msg, long type, int mode)
  488. {
  489. switch(mode)
  490. {
  491. case SEARCH_ANY:
  492. return 1;
  493. case SEARCH_LESSEQUAL:
  494. if (msg->m_type <=type)
  495. return 1;
  496. break;
  497. case SEARCH_EQUAL:
  498. if (msg->m_type == type)
  499. return 1;
  500. break;
  501. case SEARCH_NOTEQUAL:
  502. if (msg->m_type != type)
  503. return 1;
  504. break;
  505. }
  506. return 0;
  507. }
  508. static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
  509. {
  510. struct list_head *tmp;
  511. tmp = msq->q_receivers.next;
  512. while (tmp != &msq->q_receivers) {
  513. struct msg_receiver *msr;
  514. msr = list_entry(tmp, struct msg_receiver, r_list);
  515. tmp = tmp->next;
  516. if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
  517. !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
  518. msr->r_msgtype, msr->r_mode)) {
  519. list_del(&msr->r_list);
  520. if (msr->r_maxsize < msg->m_ts) {
  521. msr->r_msg = NULL;
  522. wake_up_process(msr->r_tsk);
  523. smp_mb();
  524. msr->r_msg = ERR_PTR(-E2BIG);
  525. } else {
  526. msr->r_msg = NULL;
  527. msq->q_lrpid = task_pid_vnr(msr->r_tsk);
  528. msq->q_rtime = get_seconds();
  529. wake_up_process(msr->r_tsk);
  530. smp_mb();
  531. msr->r_msg = msg;
  532. return 1;
  533. }
  534. }
  535. }
  536. return 0;
  537. }
  538. long do_msgsnd(int msqid, long mtype, void __user *mtext,
  539. size_t msgsz, int msgflg)
  540. {
  541. struct msg_queue *msq;
  542. struct msg_msg *msg;
  543. int err;
  544. struct ipc_namespace *ns;
  545. ns = current->nsproxy->ipc_ns;
  546. if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
  547. return -EINVAL;
  548. if (mtype < 1)
  549. return -EINVAL;
  550. msg = load_msg(mtext, msgsz);
  551. if (IS_ERR(msg))
  552. return PTR_ERR(msg);
  553. msg->m_type = mtype;
  554. msg->m_ts = msgsz;
  555. msq = msg_lock_check(ns, msqid);
  556. if (IS_ERR(msq)) {
  557. err = PTR_ERR(msq);
  558. goto out_free;
  559. }
  560. for (;;) {
  561. struct msg_sender s;
  562. err = -EACCES;
  563. if (ipcperms(ns, &msq->q_perm, S_IWUGO))
  564. goto out_unlock_free;
  565. err = security_msg_queue_msgsnd(msq, msg, msgflg);
  566. if (err)
  567. goto out_unlock_free;
  568. if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
  569. 1 + msq->q_qnum <= msq->q_qbytes) {
  570. break;
  571. }
  572. /* queue full, wait: */
  573. if (msgflg & IPC_NOWAIT) {
  574. err = -EAGAIN;
  575. goto out_unlock_free;
  576. }
  577. ss_add(msq, &s);
  578. ipc_rcu_getref(msq);
  579. msg_unlock(msq);
  580. schedule();
  581. ipc_lock_by_ptr(&msq->q_perm);
  582. ipc_rcu_putref(msq);
  583. if (msq->q_perm.deleted) {
  584. err = -EIDRM;
  585. goto out_unlock_free;
  586. }
  587. ss_del(&s);
  588. if (signal_pending(current)) {
  589. err = -ERESTARTNOHAND;
  590. goto out_unlock_free;
  591. }
  592. }
  593. msq->q_lspid = task_tgid_vnr(current);
  594. msq->q_stime = get_seconds();
  595. if (!pipelined_send(msq, msg)) {
  596. /* no one is waiting for this message, enqueue it */
  597. list_add_tail(&msg->m_list, &msq->q_messages);
  598. msq->q_cbytes += msgsz;
  599. msq->q_qnum++;
  600. atomic_add(msgsz, &ns->msg_bytes);
  601. atomic_inc(&ns->msg_hdrs);
  602. }
  603. err = 0;
  604. msg = NULL;
  605. out_unlock_free:
  606. msg_unlock(msq);
  607. out_free:
  608. if (msg != NULL)
  609. free_msg(msg);
  610. return err;
  611. }
  612. SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  613. int, msgflg)
  614. {
  615. long mtype;
  616. if (get_user(mtype, &msgp->mtype))
  617. return -EFAULT;
  618. return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
  619. }
  620. static inline int convert_mode(long *msgtyp, int msgflg)
  621. {
  622. /*
  623. * find message of correct type.
  624. * msgtyp = 0 => get first.
  625. * msgtyp > 0 => get first message of matching type.
  626. * msgtyp < 0 => get message with least type must be < abs(msgtype).
  627. */
  628. if (*msgtyp == 0)
  629. return SEARCH_ANY;
  630. if (*msgtyp < 0) {
  631. *msgtyp = -*msgtyp;
  632. return SEARCH_LESSEQUAL;
  633. }
  634. if (msgflg & MSG_EXCEPT)
  635. return SEARCH_NOTEQUAL;
  636. return SEARCH_EQUAL;
  637. }
  638. long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
  639. size_t msgsz, long msgtyp, int msgflg)
  640. {
  641. struct msg_queue *msq;
  642. struct msg_msg *msg;
  643. int mode;
  644. struct ipc_namespace *ns;
  645. if (msqid < 0 || (long) msgsz < 0)
  646. return -EINVAL;
  647. mode = convert_mode(&msgtyp, msgflg);
  648. ns = current->nsproxy->ipc_ns;
  649. msq = msg_lock_check(ns, msqid);
  650. if (IS_ERR(msq))
  651. return PTR_ERR(msq);
  652. for (;;) {
  653. struct msg_receiver msr_d;
  654. struct list_head *tmp;
  655. msg = ERR_PTR(-EACCES);
  656. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  657. goto out_unlock;
  658. msg = ERR_PTR(-EAGAIN);
  659. tmp = msq->q_messages.next;
  660. while (tmp != &msq->q_messages) {
  661. struct msg_msg *walk_msg;
  662. walk_msg = list_entry(tmp, struct msg_msg, m_list);
  663. if (testmsg(walk_msg, msgtyp, mode) &&
  664. !security_msg_queue_msgrcv(msq, walk_msg, current,
  665. msgtyp, mode)) {
  666. msg = walk_msg;
  667. if (mode == SEARCH_LESSEQUAL &&
  668. walk_msg->m_type != 1) {
  669. msg = walk_msg;
  670. msgtyp = walk_msg->m_type - 1;
  671. } else {
  672. msg = walk_msg;
  673. break;
  674. }
  675. }
  676. tmp = tmp->next;
  677. }
  678. if (!IS_ERR(msg)) {
  679. /*
  680. * Found a suitable message.
  681. * Unlink it from the queue.
  682. */
  683. if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
  684. msg = ERR_PTR(-E2BIG);
  685. goto out_unlock;
  686. }
  687. list_del(&msg->m_list);
  688. msq->q_qnum--;
  689. msq->q_rtime = get_seconds();
  690. msq->q_lrpid = task_tgid_vnr(current);
  691. msq->q_cbytes -= msg->m_ts;
  692. atomic_sub(msg->m_ts, &ns->msg_bytes);
  693. atomic_dec(&ns->msg_hdrs);
  694. ss_wakeup(&msq->q_senders, 0);
  695. msg_unlock(msq);
  696. break;
  697. }
  698. /* No message waiting. Wait for a message */
  699. if (msgflg & IPC_NOWAIT) {
  700. msg = ERR_PTR(-ENOMSG);
  701. goto out_unlock;
  702. }
  703. list_add_tail(&msr_d.r_list, &msq->q_receivers);
  704. msr_d.r_tsk = current;
  705. msr_d.r_msgtype = msgtyp;
  706. msr_d.r_mode = mode;
  707. if (msgflg & MSG_NOERROR)
  708. msr_d.r_maxsize = INT_MAX;
  709. else
  710. msr_d.r_maxsize = msgsz;
  711. msr_d.r_msg = ERR_PTR(-EAGAIN);
  712. current->state = TASK_INTERRUPTIBLE;
  713. msg_unlock(msq);
  714. schedule();
  715. /* Lockless receive, part 1:
  716. * Disable preemption. We don't hold a reference to the queue
  717. * and getting a reference would defeat the idea of a lockless
  718. * operation, thus the code relies on rcu to guarantee the
  719. * existence of msq:
  720. * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
  721. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
  722. * rcu_read_lock() prevents preemption between reading r_msg
  723. * and the spin_lock() inside ipc_lock_by_ptr().
  724. */
  725. rcu_read_lock();
  726. /* Lockless receive, part 2:
  727. * Wait until pipelined_send or expunge_all are outside of
  728. * wake_up_process(). There is a race with exit(), see
  729. * ipc/mqueue.c for the details.
  730. */
  731. msg = (struct msg_msg*)msr_d.r_msg;
  732. while (msg == NULL) {
  733. cpu_relax();
  734. msg = (struct msg_msg *)msr_d.r_msg;
  735. }
  736. /* Lockless receive, part 3:
  737. * If there is a message or an error then accept it without
  738. * locking.
  739. */
  740. if (msg != ERR_PTR(-EAGAIN)) {
  741. rcu_read_unlock();
  742. break;
  743. }
  744. /* Lockless receive, part 3:
  745. * Acquire the queue spinlock.
  746. */
  747. ipc_lock_by_ptr(&msq->q_perm);
  748. rcu_read_unlock();
  749. /* Lockless receive, part 4:
  750. * Repeat test after acquiring the spinlock.
  751. */
  752. msg = (struct msg_msg*)msr_d.r_msg;
  753. if (msg != ERR_PTR(-EAGAIN))
  754. goto out_unlock;
  755. list_del(&msr_d.r_list);
  756. if (signal_pending(current)) {
  757. msg = ERR_PTR(-ERESTARTNOHAND);
  758. out_unlock:
  759. msg_unlock(msq);
  760. break;
  761. }
  762. }
  763. if (IS_ERR(msg))
  764. return PTR_ERR(msg);
  765. msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
  766. *pmtype = msg->m_type;
  767. if (store_msg(mtext, msg, msgsz))
  768. msgsz = -EFAULT;
  769. free_msg(msg);
  770. return msgsz;
  771. }
  772. SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  773. long, msgtyp, int, msgflg)
  774. {
  775. long err, mtype;
  776. err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
  777. if (err < 0)
  778. goto out;
  779. if (put_user(mtype, &msgp->mtype))
  780. err = -EFAULT;
  781. out:
  782. return err;
  783. }
  784. #ifdef CONFIG_PROC_FS
  785. static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
  786. {
  787. struct msg_queue *msq = it;
  788. return seq_printf(s,
  789. "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
  790. msq->q_perm.key,
  791. msq->q_perm.id,
  792. msq->q_perm.mode,
  793. msq->q_cbytes,
  794. msq->q_qnum,
  795. msq->q_lspid,
  796. msq->q_lrpid,
  797. msq->q_perm.uid,
  798. msq->q_perm.gid,
  799. msq->q_perm.cuid,
  800. msq->q_perm.cgid,
  801. msq->q_stime,
  802. msq->q_rtime,
  803. msq->q_ctime);
  804. }
  805. #endif