qrtr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications Inc.
  3. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/netlink.h>
  16. #include <linux/qrtr.h>
  17. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  18. #include <net/sock.h>
  19. #include "qrtr.h"
  20. #define QRTR_PROTO_VER 1
  21. /* auto-bind range */
  22. #define QRTR_MIN_EPH_SOCKET 0x4000
  23. #define QRTR_MAX_EPH_SOCKET 0x7fff
  24. enum qrtr_pkt_type {
  25. QRTR_TYPE_DATA = 1,
  26. QRTR_TYPE_HELLO = 2,
  27. QRTR_TYPE_BYE = 3,
  28. QRTR_TYPE_NEW_SERVER = 4,
  29. QRTR_TYPE_DEL_SERVER = 5,
  30. QRTR_TYPE_DEL_CLIENT = 6,
  31. QRTR_TYPE_RESUME_TX = 7,
  32. QRTR_TYPE_EXIT = 8,
  33. QRTR_TYPE_PING = 9,
  34. };
  35. /**
  36. * struct qrtr_hdr - (I|R)PCrouter packet header
  37. * @version: protocol version
  38. * @type: packet type; one of QRTR_TYPE_*
  39. * @src_node_id: source node
  40. * @src_port_id: source port
  41. * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
  42. * @size: length of packet, excluding this header
  43. * @dst_node_id: destination node
  44. * @dst_port_id: destination port
  45. */
  46. struct qrtr_hdr {
  47. __le32 version;
  48. __le32 type;
  49. __le32 src_node_id;
  50. __le32 src_port_id;
  51. __le32 confirm_rx;
  52. __le32 size;
  53. __le32 dst_node_id;
  54. __le32 dst_port_id;
  55. } __packed;
  56. #define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
  57. #define QRTR_NODE_BCAST ((unsigned int)-1)
  58. #define QRTR_PORT_CTRL ((unsigned int)-2)
  59. struct qrtr_sock {
  60. /* WARNING: sk must be the first member */
  61. struct sock sk;
  62. struct sockaddr_qrtr us;
  63. struct sockaddr_qrtr peer;
  64. };
  65. static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
  66. {
  67. BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
  68. return container_of(sk, struct qrtr_sock, sk);
  69. }
  70. static unsigned int qrtr_local_nid = -1;
  71. /* for node ids */
  72. static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
  73. /* broadcast list */
  74. static LIST_HEAD(qrtr_all_nodes);
  75. /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
  76. static DEFINE_MUTEX(qrtr_node_lock);
  77. /* local port allocation management */
  78. static DEFINE_IDR(qrtr_ports);
  79. static DEFINE_MUTEX(qrtr_port_lock);
  80. /**
  81. * struct qrtr_node - endpoint node
  82. * @ep_lock: lock for endpoint management and callbacks
  83. * @ep: endpoint
  84. * @ref: reference count for node
  85. * @nid: node id
  86. * @rx_queue: receive queue
  87. * @work: scheduled work struct for recv work
  88. * @item: list item for broadcast list
  89. */
  90. struct qrtr_node {
  91. struct mutex ep_lock;
  92. struct qrtr_endpoint *ep;
  93. struct kref ref;
  94. unsigned int nid;
  95. struct sk_buff_head rx_queue;
  96. struct work_struct work;
  97. struct list_head item;
  98. };
  99. /* Release node resources and free the node.
  100. *
  101. * Do not call directly, use qrtr_node_release. To be used with
  102. * kref_put_mutex. As such, the node mutex is expected to be locked on call.
  103. */
  104. static void __qrtr_node_release(struct kref *kref)
  105. {
  106. struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
  107. if (node->nid != QRTR_EP_NID_AUTO)
  108. radix_tree_delete(&qrtr_nodes, node->nid);
  109. list_del(&node->item);
  110. mutex_unlock(&qrtr_node_lock);
  111. skb_queue_purge(&node->rx_queue);
  112. kfree(node);
  113. }
  114. /* Increment reference to node. */
  115. static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
  116. {
  117. if (node)
  118. kref_get(&node->ref);
  119. return node;
  120. }
  121. /* Decrement reference to node and release as necessary. */
  122. static void qrtr_node_release(struct qrtr_node *node)
  123. {
  124. if (!node)
  125. return;
  126. kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
  127. }
  128. /* Pass an outgoing packet socket buffer to the endpoint driver. */
  129. static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  130. {
  131. int rc = -ENODEV;
  132. mutex_lock(&node->ep_lock);
  133. if (node->ep)
  134. rc = node->ep->xmit(node->ep, skb);
  135. else
  136. kfree_skb(skb);
  137. mutex_unlock(&node->ep_lock);
  138. return rc;
  139. }
  140. /* Lookup node by id.
  141. *
  142. * callers must release with qrtr_node_release()
  143. */
  144. static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
  145. {
  146. struct qrtr_node *node;
  147. mutex_lock(&qrtr_node_lock);
  148. node = radix_tree_lookup(&qrtr_nodes, nid);
  149. node = qrtr_node_acquire(node);
  150. mutex_unlock(&qrtr_node_lock);
  151. return node;
  152. }
  153. /* Assign node id to node.
  154. *
  155. * This is mostly useful for automatic node id assignment, based on
  156. * the source id in the incoming packet.
  157. */
  158. static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
  159. {
  160. if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
  161. return;
  162. mutex_lock(&qrtr_node_lock);
  163. radix_tree_insert(&qrtr_nodes, nid, node);
  164. node->nid = nid;
  165. mutex_unlock(&qrtr_node_lock);
  166. }
  167. /**
  168. * qrtr_endpoint_post() - post incoming data
  169. * @ep: endpoint handle
  170. * @data: data pointer
  171. * @len: size of data in bytes
  172. *
  173. * Return: 0 on success; negative error code on failure
  174. */
  175. int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
  176. {
  177. struct qrtr_node *node = ep->node;
  178. const struct qrtr_hdr *phdr = data;
  179. struct sk_buff *skb;
  180. unsigned int psize;
  181. unsigned int size;
  182. unsigned int type;
  183. unsigned int ver;
  184. unsigned int dst;
  185. if (len < QRTR_HDR_SIZE || len & 3)
  186. return -EINVAL;
  187. ver = le32_to_cpu(phdr->version);
  188. size = le32_to_cpu(phdr->size);
  189. type = le32_to_cpu(phdr->type);
  190. dst = le32_to_cpu(phdr->dst_port_id);
  191. psize = (size + 3) & ~3;
  192. if (ver != QRTR_PROTO_VER)
  193. return -EINVAL;
  194. if (len != psize + QRTR_HDR_SIZE)
  195. return -EINVAL;
  196. if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
  197. return -EINVAL;
  198. skb = netdev_alloc_skb(NULL, len);
  199. if (!skb)
  200. return -ENOMEM;
  201. skb_reset_transport_header(skb);
  202. memcpy(skb_put(skb, len), data, len);
  203. skb_queue_tail(&node->rx_queue, skb);
  204. schedule_work(&node->work);
  205. return 0;
  206. }
  207. EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
  208. /* Allocate and construct a resume-tx packet. */
  209. static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
  210. u32 dst_node, u32 port)
  211. {
  212. const int pkt_len = 20;
  213. struct qrtr_hdr *hdr;
  214. struct sk_buff *skb;
  215. __le32 *buf;
  216. skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
  217. if (!skb)
  218. return NULL;
  219. skb_reset_transport_header(skb);
  220. hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
  221. hdr->version = cpu_to_le32(QRTR_PROTO_VER);
  222. hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  223. hdr->src_node_id = cpu_to_le32(src_node);
  224. hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  225. hdr->confirm_rx = cpu_to_le32(0);
  226. hdr->size = cpu_to_le32(pkt_len);
  227. hdr->dst_node_id = cpu_to_le32(dst_node);
  228. hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  229. buf = (__le32 *)skb_put(skb, pkt_len);
  230. memset(buf, 0, pkt_len);
  231. buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  232. buf[1] = cpu_to_le32(src_node);
  233. buf[2] = cpu_to_le32(port);
  234. return skb;
  235. }
  236. static struct qrtr_sock *qrtr_port_lookup(int port);
  237. static void qrtr_port_put(struct qrtr_sock *ipc);
  238. /* Handle and route a received packet.
  239. *
  240. * This will auto-reply with resume-tx packet as necessary.
  241. */
  242. static void qrtr_node_rx_work(struct work_struct *work)
  243. {
  244. struct qrtr_node *node = container_of(work, struct qrtr_node, work);
  245. struct sk_buff *skb;
  246. while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
  247. const struct qrtr_hdr *phdr;
  248. u32 dst_node, dst_port;
  249. struct qrtr_sock *ipc;
  250. u32 src_node;
  251. int confirm;
  252. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  253. src_node = le32_to_cpu(phdr->src_node_id);
  254. dst_node = le32_to_cpu(phdr->dst_node_id);
  255. dst_port = le32_to_cpu(phdr->dst_port_id);
  256. confirm = !!phdr->confirm_rx;
  257. qrtr_node_assign(node, src_node);
  258. ipc = qrtr_port_lookup(dst_port);
  259. if (!ipc) {
  260. kfree_skb(skb);
  261. } else {
  262. if (sock_queue_rcv_skb(&ipc->sk, skb))
  263. kfree_skb(skb);
  264. qrtr_port_put(ipc);
  265. }
  266. if (confirm) {
  267. skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
  268. if (!skb)
  269. break;
  270. if (qrtr_node_enqueue(node, skb))
  271. break;
  272. }
  273. }
  274. }
  275. /**
  276. * qrtr_endpoint_register() - register a new endpoint
  277. * @ep: endpoint to register
  278. * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
  279. * Return: 0 on success; negative error code on failure
  280. *
  281. * The specified endpoint must have the xmit function pointer set on call.
  282. */
  283. int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
  284. {
  285. struct qrtr_node *node;
  286. if (!ep || !ep->xmit)
  287. return -EINVAL;
  288. node = kzalloc(sizeof(*node), GFP_KERNEL);
  289. if (!node)
  290. return -ENOMEM;
  291. INIT_WORK(&node->work, qrtr_node_rx_work);
  292. kref_init(&node->ref);
  293. mutex_init(&node->ep_lock);
  294. skb_queue_head_init(&node->rx_queue);
  295. node->nid = QRTR_EP_NID_AUTO;
  296. node->ep = ep;
  297. qrtr_node_assign(node, nid);
  298. mutex_lock(&qrtr_node_lock);
  299. list_add(&node->item, &qrtr_all_nodes);
  300. mutex_unlock(&qrtr_node_lock);
  301. ep->node = node;
  302. return 0;
  303. }
  304. EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
  305. /**
  306. * qrtr_endpoint_unregister - unregister endpoint
  307. * @ep: endpoint to unregister
  308. */
  309. void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
  310. {
  311. struct qrtr_node *node = ep->node;
  312. mutex_lock(&node->ep_lock);
  313. node->ep = NULL;
  314. mutex_unlock(&node->ep_lock);
  315. qrtr_node_release(node);
  316. ep->node = NULL;
  317. }
  318. EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
  319. /* Lookup socket by port.
  320. *
  321. * Callers must release with qrtr_port_put()
  322. */
  323. static struct qrtr_sock *qrtr_port_lookup(int port)
  324. {
  325. struct qrtr_sock *ipc;
  326. if (port == QRTR_PORT_CTRL)
  327. port = 0;
  328. mutex_lock(&qrtr_port_lock);
  329. ipc = idr_find(&qrtr_ports, port);
  330. if (ipc)
  331. sock_hold(&ipc->sk);
  332. mutex_unlock(&qrtr_port_lock);
  333. return ipc;
  334. }
  335. /* Release acquired socket. */
  336. static void qrtr_port_put(struct qrtr_sock *ipc)
  337. {
  338. sock_put(&ipc->sk);
  339. }
  340. /* Remove port assignment. */
  341. static void qrtr_port_remove(struct qrtr_sock *ipc)
  342. {
  343. int port = ipc->us.sq_port;
  344. if (port == QRTR_PORT_CTRL)
  345. port = 0;
  346. __sock_put(&ipc->sk);
  347. mutex_lock(&qrtr_port_lock);
  348. idr_remove(&qrtr_ports, port);
  349. mutex_unlock(&qrtr_port_lock);
  350. }
  351. /* Assign port number to socket.
  352. *
  353. * Specify port in the integer pointed to by port, and it will be adjusted
  354. * on return as necesssary.
  355. *
  356. * Port may be:
  357. * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
  358. * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
  359. * >QRTR_MIN_EPH_SOCKET: Specified; available to all
  360. */
  361. static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
  362. {
  363. int rc;
  364. mutex_lock(&qrtr_port_lock);
  365. if (!*port) {
  366. rc = idr_alloc(&qrtr_ports, ipc,
  367. QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
  368. GFP_ATOMIC);
  369. if (rc >= 0)
  370. *port = rc;
  371. } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
  372. rc = -EACCES;
  373. } else if (*port == QRTR_PORT_CTRL) {
  374. rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
  375. } else {
  376. rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
  377. if (rc >= 0)
  378. *port = rc;
  379. }
  380. mutex_unlock(&qrtr_port_lock);
  381. if (rc == -ENOSPC)
  382. return -EADDRINUSE;
  383. else if (rc < 0)
  384. return rc;
  385. sock_hold(&ipc->sk);
  386. return 0;
  387. }
  388. /* Bind socket to address.
  389. *
  390. * Socket should be locked upon call.
  391. */
  392. static int __qrtr_bind(struct socket *sock,
  393. const struct sockaddr_qrtr *addr, int zapped)
  394. {
  395. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  396. struct sock *sk = sock->sk;
  397. int port;
  398. int rc;
  399. /* rebinding ok */
  400. if (!zapped && addr->sq_port == ipc->us.sq_port)
  401. return 0;
  402. port = addr->sq_port;
  403. rc = qrtr_port_assign(ipc, &port);
  404. if (rc)
  405. return rc;
  406. /* unbind previous, if any */
  407. if (!zapped)
  408. qrtr_port_remove(ipc);
  409. ipc->us.sq_port = port;
  410. sock_reset_flag(sk, SOCK_ZAPPED);
  411. return 0;
  412. }
  413. /* Auto bind to an ephemeral port. */
  414. static int qrtr_autobind(struct socket *sock)
  415. {
  416. struct sock *sk = sock->sk;
  417. struct sockaddr_qrtr addr;
  418. if (!sock_flag(sk, SOCK_ZAPPED))
  419. return 0;
  420. addr.sq_family = AF_QIPCRTR;
  421. addr.sq_node = qrtr_local_nid;
  422. addr.sq_port = 0;
  423. return __qrtr_bind(sock, &addr, 1);
  424. }
  425. /* Bind socket to specified sockaddr. */
  426. static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
  427. {
  428. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  429. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  430. struct sock *sk = sock->sk;
  431. int rc;
  432. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  433. return -EINVAL;
  434. if (addr->sq_node != ipc->us.sq_node)
  435. return -EINVAL;
  436. lock_sock(sk);
  437. rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
  438. release_sock(sk);
  439. return rc;
  440. }
  441. /* Queue packet to local peer socket. */
  442. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  443. {
  444. const struct qrtr_hdr *phdr;
  445. struct qrtr_sock *ipc;
  446. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  447. ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
  448. if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
  449. kfree_skb(skb);
  450. return -ENODEV;
  451. }
  452. if (sock_queue_rcv_skb(&ipc->sk, skb)) {
  453. qrtr_port_put(ipc);
  454. kfree_skb(skb);
  455. return -ENOSPC;
  456. }
  457. qrtr_port_put(ipc);
  458. return 0;
  459. }
  460. /* Queue packet for broadcast. */
  461. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  462. {
  463. struct sk_buff *skbn;
  464. mutex_lock(&qrtr_node_lock);
  465. list_for_each_entry(node, &qrtr_all_nodes, item) {
  466. skbn = skb_clone(skb, GFP_KERNEL);
  467. if (!skbn)
  468. break;
  469. skb_set_owner_w(skbn, skb->sk);
  470. qrtr_node_enqueue(node, skbn);
  471. }
  472. mutex_unlock(&qrtr_node_lock);
  473. qrtr_local_enqueue(node, skb);
  474. return 0;
  475. }
  476. static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  477. {
  478. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  479. int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
  480. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  481. struct sock *sk = sock->sk;
  482. struct qrtr_node *node;
  483. struct qrtr_hdr *hdr;
  484. struct sk_buff *skb;
  485. size_t plen;
  486. int rc;
  487. if (msg->msg_flags & ~(MSG_DONTWAIT))
  488. return -EINVAL;
  489. if (len > 65535)
  490. return -EMSGSIZE;
  491. lock_sock(sk);
  492. if (addr) {
  493. if (msg->msg_namelen < sizeof(*addr)) {
  494. release_sock(sk);
  495. return -EINVAL;
  496. }
  497. if (addr->sq_family != AF_QIPCRTR) {
  498. release_sock(sk);
  499. return -EINVAL;
  500. }
  501. rc = qrtr_autobind(sock);
  502. if (rc) {
  503. release_sock(sk);
  504. return rc;
  505. }
  506. } else if (sk->sk_state == TCP_ESTABLISHED) {
  507. addr = &ipc->peer;
  508. } else {
  509. release_sock(sk);
  510. return -ENOTCONN;
  511. }
  512. node = NULL;
  513. if (addr->sq_node == QRTR_NODE_BCAST) {
  514. enqueue_fn = qrtr_bcast_enqueue;
  515. } else if (addr->sq_node == ipc->us.sq_node) {
  516. enqueue_fn = qrtr_local_enqueue;
  517. } else {
  518. enqueue_fn = qrtr_node_enqueue;
  519. node = qrtr_node_lookup(addr->sq_node);
  520. if (!node) {
  521. release_sock(sk);
  522. return -ECONNRESET;
  523. }
  524. }
  525. plen = (len + 3) & ~3;
  526. skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
  527. msg->msg_flags & MSG_DONTWAIT, &rc);
  528. if (!skb)
  529. goto out_node;
  530. skb_reset_transport_header(skb);
  531. skb_put(skb, len + QRTR_HDR_SIZE);
  532. hdr = (struct qrtr_hdr *)skb_transport_header(skb);
  533. hdr->version = cpu_to_le32(QRTR_PROTO_VER);
  534. hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
  535. hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
  536. hdr->confirm_rx = cpu_to_le32(0);
  537. hdr->size = cpu_to_le32(len);
  538. hdr->dst_node_id = cpu_to_le32(addr->sq_node);
  539. hdr->dst_port_id = cpu_to_le32(addr->sq_port);
  540. rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
  541. &msg->msg_iter, len);
  542. if (rc) {
  543. kfree_skb(skb);
  544. goto out_node;
  545. }
  546. if (plen != len) {
  547. skb_pad(skb, plen - len);
  548. skb_put(skb, plen - len);
  549. }
  550. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  551. if (len < 4) {
  552. rc = -EINVAL;
  553. kfree_skb(skb);
  554. goto out_node;
  555. }
  556. /* control messages already require the type as 'command' */
  557. skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
  558. } else {
  559. hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
  560. }
  561. rc = enqueue_fn(node, skb);
  562. if (rc >= 0)
  563. rc = len;
  564. out_node:
  565. qrtr_node_release(node);
  566. release_sock(sk);
  567. return rc;
  568. }
  569. static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
  570. size_t size, int flags)
  571. {
  572. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  573. const struct qrtr_hdr *phdr;
  574. struct sock *sk = sock->sk;
  575. struct sk_buff *skb;
  576. int copied, rc;
  577. lock_sock(sk);
  578. if (sock_flag(sk, SOCK_ZAPPED)) {
  579. release_sock(sk);
  580. return -EADDRNOTAVAIL;
  581. }
  582. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  583. flags & MSG_DONTWAIT, &rc);
  584. if (!skb) {
  585. release_sock(sk);
  586. return rc;
  587. }
  588. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  589. copied = le32_to_cpu(phdr->size);
  590. if (copied > size) {
  591. copied = size;
  592. msg->msg_flags |= MSG_TRUNC;
  593. }
  594. rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
  595. if (rc < 0)
  596. goto out;
  597. rc = copied;
  598. if (addr) {
  599. addr->sq_family = AF_QIPCRTR;
  600. addr->sq_node = le32_to_cpu(phdr->src_node_id);
  601. addr->sq_port = le32_to_cpu(phdr->src_port_id);
  602. msg->msg_namelen = sizeof(*addr);
  603. }
  604. out:
  605. skb_free_datagram(sk, skb);
  606. release_sock(sk);
  607. return rc;
  608. }
  609. static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
  610. int len, int flags)
  611. {
  612. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  613. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  614. struct sock *sk = sock->sk;
  615. int rc;
  616. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  617. return -EINVAL;
  618. lock_sock(sk);
  619. sk->sk_state = TCP_CLOSE;
  620. sock->state = SS_UNCONNECTED;
  621. rc = qrtr_autobind(sock);
  622. if (rc) {
  623. release_sock(sk);
  624. return rc;
  625. }
  626. ipc->peer = *addr;
  627. sock->state = SS_CONNECTED;
  628. sk->sk_state = TCP_ESTABLISHED;
  629. release_sock(sk);
  630. return 0;
  631. }
  632. static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
  633. int *len, int peer)
  634. {
  635. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  636. struct sockaddr_qrtr qaddr;
  637. struct sock *sk = sock->sk;
  638. lock_sock(sk);
  639. if (peer) {
  640. if (sk->sk_state != TCP_ESTABLISHED) {
  641. release_sock(sk);
  642. return -ENOTCONN;
  643. }
  644. qaddr = ipc->peer;
  645. } else {
  646. qaddr = ipc->us;
  647. }
  648. release_sock(sk);
  649. *len = sizeof(qaddr);
  650. qaddr.sq_family = AF_QIPCRTR;
  651. memcpy(saddr, &qaddr, sizeof(qaddr));
  652. return 0;
  653. }
  654. static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  655. {
  656. void __user *argp = (void __user *)arg;
  657. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  658. struct sock *sk = sock->sk;
  659. struct sockaddr_qrtr *sq;
  660. struct sk_buff *skb;
  661. struct ifreq ifr;
  662. long len = 0;
  663. int rc = 0;
  664. lock_sock(sk);
  665. switch (cmd) {
  666. case TIOCOUTQ:
  667. len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
  668. if (len < 0)
  669. len = 0;
  670. rc = put_user(len, (int __user *)argp);
  671. break;
  672. case TIOCINQ:
  673. skb = skb_peek(&sk->sk_receive_queue);
  674. if (skb)
  675. len = skb->len - QRTR_HDR_SIZE;
  676. rc = put_user(len, (int __user *)argp);
  677. break;
  678. case SIOCGIFADDR:
  679. if (copy_from_user(&ifr, argp, sizeof(ifr))) {
  680. rc = -EFAULT;
  681. break;
  682. }
  683. sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
  684. *sq = ipc->us;
  685. if (copy_to_user(argp, &ifr, sizeof(ifr))) {
  686. rc = -EFAULT;
  687. break;
  688. }
  689. break;
  690. case SIOCGSTAMP:
  691. rc = sock_get_timestamp(sk, argp);
  692. break;
  693. case SIOCADDRT:
  694. case SIOCDELRT:
  695. case SIOCSIFADDR:
  696. case SIOCGIFDSTADDR:
  697. case SIOCSIFDSTADDR:
  698. case SIOCGIFBRDADDR:
  699. case SIOCSIFBRDADDR:
  700. case SIOCGIFNETMASK:
  701. case SIOCSIFNETMASK:
  702. rc = -EINVAL;
  703. break;
  704. default:
  705. rc = -ENOIOCTLCMD;
  706. break;
  707. }
  708. release_sock(sk);
  709. return rc;
  710. }
  711. static int qrtr_release(struct socket *sock)
  712. {
  713. struct sock *sk = sock->sk;
  714. struct qrtr_sock *ipc;
  715. if (!sk)
  716. return 0;
  717. lock_sock(sk);
  718. ipc = qrtr_sk(sk);
  719. sk->sk_shutdown = SHUTDOWN_MASK;
  720. if (!sock_flag(sk, SOCK_DEAD))
  721. sk->sk_state_change(sk);
  722. sock_set_flag(sk, SOCK_DEAD);
  723. sock->sk = NULL;
  724. if (!sock_flag(sk, SOCK_ZAPPED))
  725. qrtr_port_remove(ipc);
  726. skb_queue_purge(&sk->sk_receive_queue);
  727. release_sock(sk);
  728. sock_put(sk);
  729. return 0;
  730. }
  731. static const struct proto_ops qrtr_proto_ops = {
  732. .owner = THIS_MODULE,
  733. .family = AF_QIPCRTR,
  734. .bind = qrtr_bind,
  735. .connect = qrtr_connect,
  736. .socketpair = sock_no_socketpair,
  737. .accept = sock_no_accept,
  738. .listen = sock_no_listen,
  739. .sendmsg = qrtr_sendmsg,
  740. .recvmsg = qrtr_recvmsg,
  741. .getname = qrtr_getname,
  742. .ioctl = qrtr_ioctl,
  743. .poll = datagram_poll,
  744. .shutdown = sock_no_shutdown,
  745. .setsockopt = sock_no_setsockopt,
  746. .getsockopt = sock_no_getsockopt,
  747. .release = qrtr_release,
  748. .mmap = sock_no_mmap,
  749. .sendpage = sock_no_sendpage,
  750. };
  751. static struct proto qrtr_proto = {
  752. .name = "QIPCRTR",
  753. .owner = THIS_MODULE,
  754. .obj_size = sizeof(struct qrtr_sock),
  755. };
  756. static int qrtr_create(struct net *net, struct socket *sock,
  757. int protocol, int kern)
  758. {
  759. struct qrtr_sock *ipc;
  760. struct sock *sk;
  761. if (sock->type != SOCK_DGRAM)
  762. return -EPROTOTYPE;
  763. sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
  764. if (!sk)
  765. return -ENOMEM;
  766. sock_set_flag(sk, SOCK_ZAPPED);
  767. sock_init_data(sock, sk);
  768. sock->ops = &qrtr_proto_ops;
  769. ipc = qrtr_sk(sk);
  770. ipc->us.sq_family = AF_QIPCRTR;
  771. ipc->us.sq_node = qrtr_local_nid;
  772. ipc->us.sq_port = 0;
  773. return 0;
  774. }
  775. static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
  776. [IFA_LOCAL] = { .type = NLA_U32 },
  777. };
  778. static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
  779. {
  780. struct nlattr *tb[IFA_MAX + 1];
  781. struct ifaddrmsg *ifm;
  782. int rc;
  783. if (!netlink_capable(skb, CAP_NET_ADMIN))
  784. return -EPERM;
  785. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  786. return -EPERM;
  787. ASSERT_RTNL();
  788. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
  789. if (rc < 0)
  790. return rc;
  791. ifm = nlmsg_data(nlh);
  792. if (!tb[IFA_LOCAL])
  793. return -EINVAL;
  794. qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
  795. return 0;
  796. }
  797. static const struct net_proto_family qrtr_family = {
  798. .owner = THIS_MODULE,
  799. .family = AF_QIPCRTR,
  800. .create = qrtr_create,
  801. };
  802. static int __init qrtr_proto_init(void)
  803. {
  804. int rc;
  805. rc = proto_register(&qrtr_proto, 1);
  806. if (rc)
  807. return rc;
  808. rc = sock_register(&qrtr_family);
  809. if (rc) {
  810. proto_unregister(&qrtr_proto);
  811. return rc;
  812. }
  813. rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
  814. return 0;
  815. }
  816. module_init(qrtr_proto_init);
  817. static void __exit qrtr_proto_fini(void)
  818. {
  819. rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
  820. sock_unregister(qrtr_family.family);
  821. proto_unregister(&qrtr_proto);
  822. }
  823. module_exit(qrtr_proto_fini);
  824. MODULE_DESCRIPTION("Qualcomm IPC-router driver");
  825. MODULE_LICENSE("GPL v2");