qrtr.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications Inc.
  3. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/netlink.h>
  16. #include <linux/qrtr.h>
  17. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  18. #include <net/sock.h>
  19. #include "qrtr.h"
  20. #define QRTR_PROTO_VER 1
  21. /* auto-bind range */
  22. #define QRTR_MIN_EPH_SOCKET 0x4000
  23. #define QRTR_MAX_EPH_SOCKET 0x7fff
  24. enum qrtr_pkt_type {
  25. QRTR_TYPE_DATA = 1,
  26. QRTR_TYPE_HELLO = 2,
  27. QRTR_TYPE_BYE = 3,
  28. QRTR_TYPE_NEW_SERVER = 4,
  29. QRTR_TYPE_DEL_SERVER = 5,
  30. QRTR_TYPE_DEL_CLIENT = 6,
  31. QRTR_TYPE_RESUME_TX = 7,
  32. QRTR_TYPE_EXIT = 8,
  33. QRTR_TYPE_PING = 9,
  34. };
  35. /**
  36. * struct qrtr_hdr - (I|R)PCrouter packet header
  37. * @version: protocol version
  38. * @type: packet type; one of QRTR_TYPE_*
  39. * @src_node_id: source node
  40. * @src_port_id: source port
  41. * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
  42. * @size: length of packet, excluding this header
  43. * @dst_node_id: destination node
  44. * @dst_port_id: destination port
  45. */
  46. struct qrtr_hdr {
  47. __le32 version;
  48. __le32 type;
  49. __le32 src_node_id;
  50. __le32 src_port_id;
  51. __le32 confirm_rx;
  52. __le32 size;
  53. __le32 dst_node_id;
  54. __le32 dst_port_id;
  55. } __packed;
  56. #define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
  57. #define QRTR_NODE_BCAST ((unsigned int)-1)
  58. #define QRTR_PORT_CTRL ((unsigned int)-2)
  59. struct qrtr_sock {
  60. /* WARNING: sk must be the first member */
  61. struct sock sk;
  62. struct sockaddr_qrtr us;
  63. struct sockaddr_qrtr peer;
  64. };
  65. static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
  66. {
  67. BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
  68. return container_of(sk, struct qrtr_sock, sk);
  69. }
  70. static unsigned int qrtr_local_nid = -1;
  71. /* for node ids */
  72. static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
  73. /* broadcast list */
  74. static LIST_HEAD(qrtr_all_nodes);
  75. /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
  76. static DEFINE_MUTEX(qrtr_node_lock);
  77. /* local port allocation management */
  78. static DEFINE_IDR(qrtr_ports);
  79. static DEFINE_MUTEX(qrtr_port_lock);
  80. /**
  81. * struct qrtr_node - endpoint node
  82. * @ep_lock: lock for endpoint management and callbacks
  83. * @ep: endpoint
  84. * @ref: reference count for node
  85. * @nid: node id
  86. * @rx_queue: receive queue
  87. * @work: scheduled work struct for recv work
  88. * @item: list item for broadcast list
  89. */
  90. struct qrtr_node {
  91. struct mutex ep_lock;
  92. struct qrtr_endpoint *ep;
  93. struct kref ref;
  94. unsigned int nid;
  95. struct sk_buff_head rx_queue;
  96. struct work_struct work;
  97. struct list_head item;
  98. };
  99. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb);
  100. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb);
  101. /* Release node resources and free the node.
  102. *
  103. * Do not call directly, use qrtr_node_release. To be used with
  104. * kref_put_mutex. As such, the node mutex is expected to be locked on call.
  105. */
  106. static void __qrtr_node_release(struct kref *kref)
  107. {
  108. struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
  109. if (node->nid != QRTR_EP_NID_AUTO)
  110. radix_tree_delete(&qrtr_nodes, node->nid);
  111. list_del(&node->item);
  112. mutex_unlock(&qrtr_node_lock);
  113. cancel_work_sync(&node->work);
  114. skb_queue_purge(&node->rx_queue);
  115. kfree(node);
  116. }
  117. /* Increment reference to node. */
  118. static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
  119. {
  120. if (node)
  121. kref_get(&node->ref);
  122. return node;
  123. }
  124. /* Decrement reference to node and release as necessary. */
  125. static void qrtr_node_release(struct qrtr_node *node)
  126. {
  127. if (!node)
  128. return;
  129. kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
  130. }
  131. /* Pass an outgoing packet socket buffer to the endpoint driver. */
  132. static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  133. {
  134. int rc = -ENODEV;
  135. mutex_lock(&node->ep_lock);
  136. if (node->ep)
  137. rc = node->ep->xmit(node->ep, skb);
  138. else
  139. kfree_skb(skb);
  140. mutex_unlock(&node->ep_lock);
  141. return rc;
  142. }
  143. /* Lookup node by id.
  144. *
  145. * callers must release with qrtr_node_release()
  146. */
  147. static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
  148. {
  149. struct qrtr_node *node;
  150. mutex_lock(&qrtr_node_lock);
  151. node = radix_tree_lookup(&qrtr_nodes, nid);
  152. node = qrtr_node_acquire(node);
  153. mutex_unlock(&qrtr_node_lock);
  154. return node;
  155. }
  156. /* Assign node id to node.
  157. *
  158. * This is mostly useful for automatic node id assignment, based on
  159. * the source id in the incoming packet.
  160. */
  161. static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
  162. {
  163. if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
  164. return;
  165. mutex_lock(&qrtr_node_lock);
  166. radix_tree_insert(&qrtr_nodes, nid, node);
  167. node->nid = nid;
  168. mutex_unlock(&qrtr_node_lock);
  169. }
  170. /**
  171. * qrtr_endpoint_post() - post incoming data
  172. * @ep: endpoint handle
  173. * @data: data pointer
  174. * @len: size of data in bytes
  175. *
  176. * Return: 0 on success; negative error code on failure
  177. */
  178. int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
  179. {
  180. struct qrtr_node *node = ep->node;
  181. const struct qrtr_hdr *phdr = data;
  182. struct sk_buff *skb;
  183. unsigned int psize;
  184. unsigned int size;
  185. unsigned int type;
  186. unsigned int ver;
  187. unsigned int dst;
  188. if (len < QRTR_HDR_SIZE || len & 3)
  189. return -EINVAL;
  190. ver = le32_to_cpu(phdr->version);
  191. size = le32_to_cpu(phdr->size);
  192. type = le32_to_cpu(phdr->type);
  193. dst = le32_to_cpu(phdr->dst_port_id);
  194. psize = (size + 3) & ~3;
  195. if (ver != QRTR_PROTO_VER)
  196. return -EINVAL;
  197. if (len != psize + QRTR_HDR_SIZE)
  198. return -EINVAL;
  199. if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
  200. return -EINVAL;
  201. skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
  202. if (!skb)
  203. return -ENOMEM;
  204. skb_reset_transport_header(skb);
  205. skb_put_data(skb, data, len);
  206. skb_queue_tail(&node->rx_queue, skb);
  207. schedule_work(&node->work);
  208. return 0;
  209. }
  210. EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
  211. static struct sk_buff *qrtr_alloc_ctrl_packet(u32 type, size_t pkt_len,
  212. u32 src_node, u32 dst_node)
  213. {
  214. struct qrtr_hdr *hdr;
  215. struct sk_buff *skb;
  216. skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
  217. if (!skb)
  218. return NULL;
  219. skb_reset_transport_header(skb);
  220. hdr = skb_put(skb, QRTR_HDR_SIZE);
  221. hdr->version = cpu_to_le32(QRTR_PROTO_VER);
  222. hdr->type = cpu_to_le32(type);
  223. hdr->src_node_id = cpu_to_le32(src_node);
  224. hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  225. hdr->confirm_rx = cpu_to_le32(0);
  226. hdr->size = cpu_to_le32(pkt_len);
  227. hdr->dst_node_id = cpu_to_le32(dst_node);
  228. hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  229. return skb;
  230. }
  231. /* Allocate and construct a resume-tx packet. */
  232. static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
  233. u32 dst_node, u32 port)
  234. {
  235. const int pkt_len = 20;
  236. struct sk_buff *skb;
  237. __le32 *buf;
  238. skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_RESUME_TX, pkt_len,
  239. src_node, dst_node);
  240. if (!skb)
  241. return NULL;
  242. buf = skb_put_zero(skb, pkt_len);
  243. buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  244. buf[1] = cpu_to_le32(src_node);
  245. buf[2] = cpu_to_le32(port);
  246. return skb;
  247. }
  248. /* Allocate and construct a BYE message to signal remote termination */
  249. static struct sk_buff *qrtr_alloc_local_bye(u32 src_node)
  250. {
  251. const int pkt_len = 20;
  252. struct sk_buff *skb;
  253. __le32 *buf;
  254. skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_BYE, pkt_len,
  255. src_node, qrtr_local_nid);
  256. if (!skb)
  257. return NULL;
  258. buf = skb_put_zero(skb, pkt_len);
  259. buf[0] = cpu_to_le32(QRTR_TYPE_BYE);
  260. return skb;
  261. }
  262. static struct sk_buff *qrtr_alloc_del_client(struct sockaddr_qrtr *sq)
  263. {
  264. const int pkt_len = 20;
  265. struct sk_buff *skb;
  266. __le32 *buf;
  267. skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_DEL_CLIENT, pkt_len,
  268. sq->sq_node, QRTR_NODE_BCAST);
  269. if (!skb)
  270. return NULL;
  271. buf = skb_put_zero(skb, pkt_len);
  272. buf[0] = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
  273. buf[1] = cpu_to_le32(sq->sq_node);
  274. buf[2] = cpu_to_le32(sq->sq_port);
  275. return skb;
  276. }
  277. static struct qrtr_sock *qrtr_port_lookup(int port);
  278. static void qrtr_port_put(struct qrtr_sock *ipc);
  279. /* Handle and route a received packet.
  280. *
  281. * This will auto-reply with resume-tx packet as necessary.
  282. */
  283. static void qrtr_node_rx_work(struct work_struct *work)
  284. {
  285. struct qrtr_node *node = container_of(work, struct qrtr_node, work);
  286. struct sk_buff *skb;
  287. while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
  288. const struct qrtr_hdr *phdr;
  289. u32 dst_node, dst_port;
  290. struct qrtr_sock *ipc;
  291. u32 src_node;
  292. int confirm;
  293. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  294. src_node = le32_to_cpu(phdr->src_node_id);
  295. dst_node = le32_to_cpu(phdr->dst_node_id);
  296. dst_port = le32_to_cpu(phdr->dst_port_id);
  297. confirm = !!phdr->confirm_rx;
  298. qrtr_node_assign(node, src_node);
  299. ipc = qrtr_port_lookup(dst_port);
  300. if (!ipc) {
  301. kfree_skb(skb);
  302. } else {
  303. if (sock_queue_rcv_skb(&ipc->sk, skb))
  304. kfree_skb(skb);
  305. qrtr_port_put(ipc);
  306. }
  307. if (confirm) {
  308. skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
  309. if (!skb)
  310. break;
  311. if (qrtr_node_enqueue(node, skb))
  312. break;
  313. }
  314. }
  315. }
  316. /**
  317. * qrtr_endpoint_register() - register a new endpoint
  318. * @ep: endpoint to register
  319. * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
  320. * Return: 0 on success; negative error code on failure
  321. *
  322. * The specified endpoint must have the xmit function pointer set on call.
  323. */
  324. int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
  325. {
  326. struct qrtr_node *node;
  327. if (!ep || !ep->xmit)
  328. return -EINVAL;
  329. node = kzalloc(sizeof(*node), GFP_KERNEL);
  330. if (!node)
  331. return -ENOMEM;
  332. INIT_WORK(&node->work, qrtr_node_rx_work);
  333. kref_init(&node->ref);
  334. mutex_init(&node->ep_lock);
  335. skb_queue_head_init(&node->rx_queue);
  336. node->nid = QRTR_EP_NID_AUTO;
  337. node->ep = ep;
  338. qrtr_node_assign(node, nid);
  339. mutex_lock(&qrtr_node_lock);
  340. list_add(&node->item, &qrtr_all_nodes);
  341. mutex_unlock(&qrtr_node_lock);
  342. ep->node = node;
  343. return 0;
  344. }
  345. EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
  346. /**
  347. * qrtr_endpoint_unregister - unregister endpoint
  348. * @ep: endpoint to unregister
  349. */
  350. void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
  351. {
  352. struct qrtr_node *node = ep->node;
  353. struct sk_buff *skb;
  354. mutex_lock(&node->ep_lock);
  355. node->ep = NULL;
  356. mutex_unlock(&node->ep_lock);
  357. /* Notify the local controller about the event */
  358. skb = qrtr_alloc_local_bye(node->nid);
  359. if (skb)
  360. qrtr_local_enqueue(NULL, skb);
  361. qrtr_node_release(node);
  362. ep->node = NULL;
  363. }
  364. EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
  365. /* Lookup socket by port.
  366. *
  367. * Callers must release with qrtr_port_put()
  368. */
  369. static struct qrtr_sock *qrtr_port_lookup(int port)
  370. {
  371. struct qrtr_sock *ipc;
  372. if (port == QRTR_PORT_CTRL)
  373. port = 0;
  374. mutex_lock(&qrtr_port_lock);
  375. ipc = idr_find(&qrtr_ports, port);
  376. if (ipc)
  377. sock_hold(&ipc->sk);
  378. mutex_unlock(&qrtr_port_lock);
  379. return ipc;
  380. }
  381. /* Release acquired socket. */
  382. static void qrtr_port_put(struct qrtr_sock *ipc)
  383. {
  384. sock_put(&ipc->sk);
  385. }
  386. /* Remove port assignment. */
  387. static void qrtr_port_remove(struct qrtr_sock *ipc)
  388. {
  389. struct sk_buff *skb;
  390. int port = ipc->us.sq_port;
  391. skb = qrtr_alloc_del_client(&ipc->us);
  392. if (skb) {
  393. skb_set_owner_w(skb, &ipc->sk);
  394. qrtr_bcast_enqueue(NULL, skb);
  395. }
  396. if (port == QRTR_PORT_CTRL)
  397. port = 0;
  398. __sock_put(&ipc->sk);
  399. mutex_lock(&qrtr_port_lock);
  400. idr_remove(&qrtr_ports, port);
  401. mutex_unlock(&qrtr_port_lock);
  402. }
  403. /* Assign port number to socket.
  404. *
  405. * Specify port in the integer pointed to by port, and it will be adjusted
  406. * on return as necesssary.
  407. *
  408. * Port may be:
  409. * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
  410. * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
  411. * >QRTR_MIN_EPH_SOCKET: Specified; available to all
  412. */
  413. static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
  414. {
  415. int rc;
  416. mutex_lock(&qrtr_port_lock);
  417. if (!*port) {
  418. rc = idr_alloc(&qrtr_ports, ipc,
  419. QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
  420. GFP_ATOMIC);
  421. if (rc >= 0)
  422. *port = rc;
  423. } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
  424. rc = -EACCES;
  425. } else if (*port == QRTR_PORT_CTRL) {
  426. rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
  427. } else {
  428. rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
  429. if (rc >= 0)
  430. *port = rc;
  431. }
  432. mutex_unlock(&qrtr_port_lock);
  433. if (rc == -ENOSPC)
  434. return -EADDRINUSE;
  435. else if (rc < 0)
  436. return rc;
  437. sock_hold(&ipc->sk);
  438. return 0;
  439. }
  440. /* Reset all non-control ports */
  441. static void qrtr_reset_ports(void)
  442. {
  443. struct qrtr_sock *ipc;
  444. int id;
  445. mutex_lock(&qrtr_port_lock);
  446. idr_for_each_entry(&qrtr_ports, ipc, id) {
  447. /* Don't reset control port */
  448. if (id == 0)
  449. continue;
  450. sock_hold(&ipc->sk);
  451. ipc->sk.sk_err = ENETRESET;
  452. wake_up_interruptible(sk_sleep(&ipc->sk));
  453. sock_put(&ipc->sk);
  454. }
  455. mutex_unlock(&qrtr_port_lock);
  456. }
  457. /* Bind socket to address.
  458. *
  459. * Socket should be locked upon call.
  460. */
  461. static int __qrtr_bind(struct socket *sock,
  462. const struct sockaddr_qrtr *addr, int zapped)
  463. {
  464. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  465. struct sock *sk = sock->sk;
  466. int port;
  467. int rc;
  468. /* rebinding ok */
  469. if (!zapped && addr->sq_port == ipc->us.sq_port)
  470. return 0;
  471. port = addr->sq_port;
  472. rc = qrtr_port_assign(ipc, &port);
  473. if (rc)
  474. return rc;
  475. /* unbind previous, if any */
  476. if (!zapped)
  477. qrtr_port_remove(ipc);
  478. ipc->us.sq_port = port;
  479. sock_reset_flag(sk, SOCK_ZAPPED);
  480. /* Notify all open ports about the new controller */
  481. if (port == QRTR_PORT_CTRL)
  482. qrtr_reset_ports();
  483. return 0;
  484. }
  485. /* Auto bind to an ephemeral port. */
  486. static int qrtr_autobind(struct socket *sock)
  487. {
  488. struct sock *sk = sock->sk;
  489. struct sockaddr_qrtr addr;
  490. if (!sock_flag(sk, SOCK_ZAPPED))
  491. return 0;
  492. addr.sq_family = AF_QIPCRTR;
  493. addr.sq_node = qrtr_local_nid;
  494. addr.sq_port = 0;
  495. return __qrtr_bind(sock, &addr, 1);
  496. }
  497. /* Bind socket to specified sockaddr. */
  498. static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
  499. {
  500. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  501. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  502. struct sock *sk = sock->sk;
  503. int rc;
  504. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  505. return -EINVAL;
  506. if (addr->sq_node != ipc->us.sq_node)
  507. return -EINVAL;
  508. lock_sock(sk);
  509. rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
  510. release_sock(sk);
  511. return rc;
  512. }
  513. /* Queue packet to local peer socket. */
  514. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  515. {
  516. const struct qrtr_hdr *phdr;
  517. struct qrtr_sock *ipc;
  518. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  519. ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
  520. if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
  521. kfree_skb(skb);
  522. return -ENODEV;
  523. }
  524. if (sock_queue_rcv_skb(&ipc->sk, skb)) {
  525. qrtr_port_put(ipc);
  526. kfree_skb(skb);
  527. return -ENOSPC;
  528. }
  529. qrtr_port_put(ipc);
  530. return 0;
  531. }
  532. /* Queue packet for broadcast. */
  533. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
  534. {
  535. struct sk_buff *skbn;
  536. mutex_lock(&qrtr_node_lock);
  537. list_for_each_entry(node, &qrtr_all_nodes, item) {
  538. skbn = skb_clone(skb, GFP_KERNEL);
  539. if (!skbn)
  540. break;
  541. skb_set_owner_w(skbn, skb->sk);
  542. qrtr_node_enqueue(node, skbn);
  543. }
  544. mutex_unlock(&qrtr_node_lock);
  545. qrtr_local_enqueue(NULL, skb);
  546. return 0;
  547. }
  548. static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  549. {
  550. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  551. int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
  552. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  553. struct sock *sk = sock->sk;
  554. struct qrtr_node *node;
  555. struct qrtr_hdr *hdr;
  556. struct sk_buff *skb;
  557. size_t plen;
  558. int rc;
  559. if (msg->msg_flags & ~(MSG_DONTWAIT))
  560. return -EINVAL;
  561. if (len > 65535)
  562. return -EMSGSIZE;
  563. lock_sock(sk);
  564. if (addr) {
  565. if (msg->msg_namelen < sizeof(*addr)) {
  566. release_sock(sk);
  567. return -EINVAL;
  568. }
  569. if (addr->sq_family != AF_QIPCRTR) {
  570. release_sock(sk);
  571. return -EINVAL;
  572. }
  573. rc = qrtr_autobind(sock);
  574. if (rc) {
  575. release_sock(sk);
  576. return rc;
  577. }
  578. } else if (sk->sk_state == TCP_ESTABLISHED) {
  579. addr = &ipc->peer;
  580. } else {
  581. release_sock(sk);
  582. return -ENOTCONN;
  583. }
  584. node = NULL;
  585. if (addr->sq_node == QRTR_NODE_BCAST) {
  586. if (addr->sq_port != QRTR_PORT_CTRL &&
  587. qrtr_local_nid != QRTR_NODE_BCAST) {
  588. release_sock(sk);
  589. return -ENOTCONN;
  590. }
  591. enqueue_fn = qrtr_bcast_enqueue;
  592. } else if (addr->sq_node == ipc->us.sq_node) {
  593. enqueue_fn = qrtr_local_enqueue;
  594. } else {
  595. node = qrtr_node_lookup(addr->sq_node);
  596. if (!node) {
  597. release_sock(sk);
  598. return -ECONNRESET;
  599. }
  600. enqueue_fn = qrtr_node_enqueue;
  601. }
  602. plen = (len + 3) & ~3;
  603. skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
  604. msg->msg_flags & MSG_DONTWAIT, &rc);
  605. if (!skb)
  606. goto out_node;
  607. skb_reset_transport_header(skb);
  608. skb_put(skb, len + QRTR_HDR_SIZE);
  609. hdr = (struct qrtr_hdr *)skb_transport_header(skb);
  610. hdr->version = cpu_to_le32(QRTR_PROTO_VER);
  611. hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
  612. hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
  613. hdr->confirm_rx = cpu_to_le32(0);
  614. hdr->size = cpu_to_le32(len);
  615. hdr->dst_node_id = cpu_to_le32(addr->sq_node);
  616. hdr->dst_port_id = cpu_to_le32(addr->sq_port);
  617. rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
  618. &msg->msg_iter, len);
  619. if (rc) {
  620. kfree_skb(skb);
  621. goto out_node;
  622. }
  623. if (plen != len) {
  624. rc = skb_pad(skb, plen - len);
  625. if (rc)
  626. goto out_node;
  627. skb_put(skb, plen - len);
  628. }
  629. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  630. if (len < 4) {
  631. rc = -EINVAL;
  632. kfree_skb(skb);
  633. goto out_node;
  634. }
  635. /* control messages already require the type as 'command' */
  636. skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
  637. } else {
  638. hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
  639. }
  640. rc = enqueue_fn(node, skb);
  641. if (rc >= 0)
  642. rc = len;
  643. out_node:
  644. qrtr_node_release(node);
  645. release_sock(sk);
  646. return rc;
  647. }
  648. static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
  649. size_t size, int flags)
  650. {
  651. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  652. const struct qrtr_hdr *phdr;
  653. struct sock *sk = sock->sk;
  654. struct sk_buff *skb;
  655. int copied, rc;
  656. lock_sock(sk);
  657. if (sock_flag(sk, SOCK_ZAPPED)) {
  658. release_sock(sk);
  659. return -EADDRNOTAVAIL;
  660. }
  661. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  662. flags & MSG_DONTWAIT, &rc);
  663. if (!skb) {
  664. release_sock(sk);
  665. return rc;
  666. }
  667. phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
  668. copied = le32_to_cpu(phdr->size);
  669. if (copied > size) {
  670. copied = size;
  671. msg->msg_flags |= MSG_TRUNC;
  672. }
  673. rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
  674. if (rc < 0)
  675. goto out;
  676. rc = copied;
  677. if (addr) {
  678. /* There is an anonymous 2-byte hole after sq_family,
  679. * make sure to clear it.
  680. */
  681. memset(addr, 0, sizeof(*addr));
  682. addr->sq_family = AF_QIPCRTR;
  683. addr->sq_node = le32_to_cpu(phdr->src_node_id);
  684. addr->sq_port = le32_to_cpu(phdr->src_port_id);
  685. msg->msg_namelen = sizeof(*addr);
  686. }
  687. out:
  688. skb_free_datagram(sk, skb);
  689. release_sock(sk);
  690. return rc;
  691. }
  692. static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
  693. int len, int flags)
  694. {
  695. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  696. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  697. struct sock *sk = sock->sk;
  698. int rc;
  699. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  700. return -EINVAL;
  701. lock_sock(sk);
  702. sk->sk_state = TCP_CLOSE;
  703. sock->state = SS_UNCONNECTED;
  704. rc = qrtr_autobind(sock);
  705. if (rc) {
  706. release_sock(sk);
  707. return rc;
  708. }
  709. ipc->peer = *addr;
  710. sock->state = SS_CONNECTED;
  711. sk->sk_state = TCP_ESTABLISHED;
  712. release_sock(sk);
  713. return 0;
  714. }
  715. static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
  716. int *len, int peer)
  717. {
  718. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  719. struct sockaddr_qrtr qaddr;
  720. struct sock *sk = sock->sk;
  721. lock_sock(sk);
  722. if (peer) {
  723. if (sk->sk_state != TCP_ESTABLISHED) {
  724. release_sock(sk);
  725. return -ENOTCONN;
  726. }
  727. qaddr = ipc->peer;
  728. } else {
  729. qaddr = ipc->us;
  730. }
  731. release_sock(sk);
  732. *len = sizeof(qaddr);
  733. qaddr.sq_family = AF_QIPCRTR;
  734. memcpy(saddr, &qaddr, sizeof(qaddr));
  735. return 0;
  736. }
  737. static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  738. {
  739. void __user *argp = (void __user *)arg;
  740. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  741. struct sock *sk = sock->sk;
  742. struct sockaddr_qrtr *sq;
  743. struct sk_buff *skb;
  744. struct ifreq ifr;
  745. long len = 0;
  746. int rc = 0;
  747. lock_sock(sk);
  748. switch (cmd) {
  749. case TIOCOUTQ:
  750. len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
  751. if (len < 0)
  752. len = 0;
  753. rc = put_user(len, (int __user *)argp);
  754. break;
  755. case TIOCINQ:
  756. skb = skb_peek(&sk->sk_receive_queue);
  757. if (skb)
  758. len = skb->len - QRTR_HDR_SIZE;
  759. rc = put_user(len, (int __user *)argp);
  760. break;
  761. case SIOCGIFADDR:
  762. if (copy_from_user(&ifr, argp, sizeof(ifr))) {
  763. rc = -EFAULT;
  764. break;
  765. }
  766. sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
  767. *sq = ipc->us;
  768. if (copy_to_user(argp, &ifr, sizeof(ifr))) {
  769. rc = -EFAULT;
  770. break;
  771. }
  772. break;
  773. case SIOCGSTAMP:
  774. rc = sock_get_timestamp(sk, argp);
  775. break;
  776. case SIOCADDRT:
  777. case SIOCDELRT:
  778. case SIOCSIFADDR:
  779. case SIOCGIFDSTADDR:
  780. case SIOCSIFDSTADDR:
  781. case SIOCGIFBRDADDR:
  782. case SIOCSIFBRDADDR:
  783. case SIOCGIFNETMASK:
  784. case SIOCSIFNETMASK:
  785. rc = -EINVAL;
  786. break;
  787. default:
  788. rc = -ENOIOCTLCMD;
  789. break;
  790. }
  791. release_sock(sk);
  792. return rc;
  793. }
  794. static int qrtr_release(struct socket *sock)
  795. {
  796. struct sock *sk = sock->sk;
  797. struct qrtr_sock *ipc;
  798. if (!sk)
  799. return 0;
  800. lock_sock(sk);
  801. ipc = qrtr_sk(sk);
  802. sk->sk_shutdown = SHUTDOWN_MASK;
  803. if (!sock_flag(sk, SOCK_DEAD))
  804. sk->sk_state_change(sk);
  805. sock_set_flag(sk, SOCK_DEAD);
  806. sock->sk = NULL;
  807. if (!sock_flag(sk, SOCK_ZAPPED))
  808. qrtr_port_remove(ipc);
  809. skb_queue_purge(&sk->sk_receive_queue);
  810. release_sock(sk);
  811. sock_put(sk);
  812. return 0;
  813. }
  814. static const struct proto_ops qrtr_proto_ops = {
  815. .owner = THIS_MODULE,
  816. .family = AF_QIPCRTR,
  817. .bind = qrtr_bind,
  818. .connect = qrtr_connect,
  819. .socketpair = sock_no_socketpair,
  820. .accept = sock_no_accept,
  821. .listen = sock_no_listen,
  822. .sendmsg = qrtr_sendmsg,
  823. .recvmsg = qrtr_recvmsg,
  824. .getname = qrtr_getname,
  825. .ioctl = qrtr_ioctl,
  826. .poll = datagram_poll,
  827. .shutdown = sock_no_shutdown,
  828. .setsockopt = sock_no_setsockopt,
  829. .getsockopt = sock_no_getsockopt,
  830. .release = qrtr_release,
  831. .mmap = sock_no_mmap,
  832. .sendpage = sock_no_sendpage,
  833. };
  834. static struct proto qrtr_proto = {
  835. .name = "QIPCRTR",
  836. .owner = THIS_MODULE,
  837. .obj_size = sizeof(struct qrtr_sock),
  838. };
  839. static int qrtr_create(struct net *net, struct socket *sock,
  840. int protocol, int kern)
  841. {
  842. struct qrtr_sock *ipc;
  843. struct sock *sk;
  844. if (sock->type != SOCK_DGRAM)
  845. return -EPROTOTYPE;
  846. sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
  847. if (!sk)
  848. return -ENOMEM;
  849. sock_set_flag(sk, SOCK_ZAPPED);
  850. sock_init_data(sock, sk);
  851. sock->ops = &qrtr_proto_ops;
  852. ipc = qrtr_sk(sk);
  853. ipc->us.sq_family = AF_QIPCRTR;
  854. ipc->us.sq_node = qrtr_local_nid;
  855. ipc->us.sq_port = 0;
  856. return 0;
  857. }
  858. static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
  859. [IFA_LOCAL] = { .type = NLA_U32 },
  860. };
  861. static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  862. struct netlink_ext_ack *extack)
  863. {
  864. struct nlattr *tb[IFA_MAX + 1];
  865. struct ifaddrmsg *ifm;
  866. int rc;
  867. if (!netlink_capable(skb, CAP_NET_ADMIN))
  868. return -EPERM;
  869. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  870. return -EPERM;
  871. ASSERT_RTNL();
  872. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack);
  873. if (rc < 0)
  874. return rc;
  875. ifm = nlmsg_data(nlh);
  876. if (!tb[IFA_LOCAL])
  877. return -EINVAL;
  878. qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
  879. return 0;
  880. }
  881. static const struct net_proto_family qrtr_family = {
  882. .owner = THIS_MODULE,
  883. .family = AF_QIPCRTR,
  884. .create = qrtr_create,
  885. };
  886. static int __init qrtr_proto_init(void)
  887. {
  888. int rc;
  889. rc = proto_register(&qrtr_proto, 1);
  890. if (rc)
  891. return rc;
  892. rc = sock_register(&qrtr_family);
  893. if (rc) {
  894. proto_unregister(&qrtr_proto);
  895. return rc;
  896. }
  897. rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
  898. return 0;
  899. }
  900. postcore_initcall(qrtr_proto_init);
  901. static void __exit qrtr_proto_fini(void)
  902. {
  903. rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
  904. sock_unregister(qrtr_family.family);
  905. proto_unregister(&qrtr_proto);
  906. }
  907. module_exit(qrtr_proto_fini);
  908. MODULE_DESCRIPTION("Qualcomm IPC-router driver");
  909. MODULE_LICENSE("GPL v2");