net.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Author: Michael S. Tsirkin <mst@redhat.com>
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * virtio-net server in host kernel.
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/eventfd.h>
  10. #include <linux/vhost.h>
  11. #include <linux/virtio_net.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/module.h>
  14. #include <linux/mutex.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/file.h>
  18. #include <linux/slab.h>
  19. #include <linux/net.h>
  20. #include <linux/if_packet.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/if_tun.h>
  23. #include <linux/if_macvlan.h>
  24. #include <net/sock.h>
  25. #include "vhost.h"
  26. /* Max number of bytes transferred before requeueing the job.
  27. * Using this limit prevents one virtqueue from starving others. */
  28. #define VHOST_NET_WEIGHT 0x80000
  29. enum {
  30. VHOST_NET_VQ_RX = 0,
  31. VHOST_NET_VQ_TX = 1,
  32. VHOST_NET_VQ_MAX = 2,
  33. };
  34. enum vhost_net_poll_state {
  35. VHOST_NET_POLL_DISABLED = 0,
  36. VHOST_NET_POLL_STARTED = 1,
  37. VHOST_NET_POLL_STOPPED = 2,
  38. };
  39. struct vhost_net {
  40. struct vhost_dev dev;
  41. struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
  42. struct vhost_poll poll[VHOST_NET_VQ_MAX];
  43. /* Tells us whether we are polling a socket for TX.
  44. * We only do this when socket buffer fills up.
  45. * Protected by tx vq lock. */
  46. enum vhost_net_poll_state tx_poll_state;
  47. };
  48. /* Pop first len bytes from iovec. Return number of segments used. */
  49. static int move_iovec_hdr(struct iovec *from, struct iovec *to,
  50. size_t len, int iov_count)
  51. {
  52. int seg = 0;
  53. size_t size;
  54. while (len && seg < iov_count) {
  55. size = min(from->iov_len, len);
  56. to->iov_base = from->iov_base;
  57. to->iov_len = size;
  58. from->iov_len -= size;
  59. from->iov_base += size;
  60. len -= size;
  61. ++from;
  62. ++to;
  63. ++seg;
  64. }
  65. return seg;
  66. }
  67. /* Copy iovec entries for len bytes from iovec. */
  68. static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
  69. size_t len, int iovcount)
  70. {
  71. int seg = 0;
  72. size_t size;
  73. while (len && seg < iovcount) {
  74. size = min(from->iov_len, len);
  75. to->iov_base = from->iov_base;
  76. to->iov_len = size;
  77. len -= size;
  78. ++from;
  79. ++to;
  80. ++seg;
  81. }
  82. }
  83. /* Caller must have TX VQ lock */
  84. static void tx_poll_stop(struct vhost_net *net)
  85. {
  86. if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
  87. return;
  88. vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
  89. net->tx_poll_state = VHOST_NET_POLL_STOPPED;
  90. }
  91. /* Caller must have TX VQ lock */
  92. static void tx_poll_start(struct vhost_net *net, struct socket *sock)
  93. {
  94. if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
  95. return;
  96. vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
  97. net->tx_poll_state = VHOST_NET_POLL_STARTED;
  98. }
  99. /* Expects to be always run from workqueue - which acts as
  100. * read-size critical section for our kind of RCU. */
  101. static void handle_tx(struct vhost_net *net)
  102. {
  103. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
  104. unsigned out, in, s;
  105. int head;
  106. struct msghdr msg = {
  107. .msg_name = NULL,
  108. .msg_namelen = 0,
  109. .msg_control = NULL,
  110. .msg_controllen = 0,
  111. .msg_iov = vq->iov,
  112. .msg_flags = MSG_DONTWAIT,
  113. };
  114. size_t len, total_len = 0;
  115. int err, wmem;
  116. size_t hdr_size;
  117. struct socket *sock;
  118. /* TODO: check that we are running from vhost_worker? */
  119. sock = rcu_dereference_check(vq->private_data, 1);
  120. if (!sock)
  121. return;
  122. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  123. if (wmem >= sock->sk->sk_sndbuf) {
  124. mutex_lock(&vq->mutex);
  125. tx_poll_start(net, sock);
  126. mutex_unlock(&vq->mutex);
  127. return;
  128. }
  129. mutex_lock(&vq->mutex);
  130. vhost_disable_notify(&net->dev, vq);
  131. if (wmem < sock->sk->sk_sndbuf / 2)
  132. tx_poll_stop(net);
  133. hdr_size = vq->vhost_hlen;
  134. for (;;) {
  135. head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  136. ARRAY_SIZE(vq->iov),
  137. &out, &in,
  138. NULL, NULL);
  139. /* On error, stop handling until the next kick. */
  140. if (unlikely(head < 0))
  141. break;
  142. /* Nothing new? Wait for eventfd to tell us they refilled. */
  143. if (head == vq->num) {
  144. wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  145. if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
  146. tx_poll_start(net, sock);
  147. set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  148. break;
  149. }
  150. if (unlikely(vhost_enable_notify(&net->dev, vq))) {
  151. vhost_disable_notify(&net->dev, vq);
  152. continue;
  153. }
  154. break;
  155. }
  156. if (in) {
  157. vq_err(vq, "Unexpected descriptor format for TX: "
  158. "out %d, int %d\n", out, in);
  159. break;
  160. }
  161. /* Skip header. TODO: support TSO. */
  162. s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
  163. msg.msg_iovlen = out;
  164. len = iov_length(vq->iov, out);
  165. /* Sanity check */
  166. if (!len) {
  167. vq_err(vq, "Unexpected header len for TX: "
  168. "%zd expected %zd\n",
  169. iov_length(vq->hdr, s), hdr_size);
  170. break;
  171. }
  172. /* TODO: Check specific error and bomb out unless ENOBUFS? */
  173. err = sock->ops->sendmsg(NULL, sock, &msg, len);
  174. if (unlikely(err < 0)) {
  175. vhost_discard_vq_desc(vq, 1);
  176. tx_poll_start(net, sock);
  177. break;
  178. }
  179. if (err != len)
  180. pr_debug("Truncated TX packet: "
  181. " len %d != %zd\n", err, len);
  182. vhost_add_used_and_signal(&net->dev, vq, head, 0);
  183. total_len += len;
  184. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  185. vhost_poll_queue(&vq->poll);
  186. break;
  187. }
  188. }
  189. mutex_unlock(&vq->mutex);
  190. }
  191. static int peek_head_len(struct sock *sk)
  192. {
  193. struct sk_buff *head;
  194. int len = 0;
  195. unsigned long flags;
  196. spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
  197. head = skb_peek(&sk->sk_receive_queue);
  198. if (likely(head))
  199. len = head->len;
  200. spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
  201. return len;
  202. }
  203. /* This is a multi-buffer version of vhost_get_desc, that works if
  204. * vq has read descriptors only.
  205. * @vq - the relevant virtqueue
  206. * @datalen - data length we'll be reading
  207. * @iovcount - returned count of io vectors we fill
  208. * @log - vhost log
  209. * @log_num - log offset
  210. * @quota - headcount quota, 1 for big buffer
  211. * returns number of buffer heads allocated, negative on error
  212. */
  213. static int get_rx_bufs(struct vhost_virtqueue *vq,
  214. struct vring_used_elem *heads,
  215. int datalen,
  216. unsigned *iovcount,
  217. struct vhost_log *log,
  218. unsigned *log_num,
  219. unsigned int quota)
  220. {
  221. unsigned int out, in;
  222. int seg = 0;
  223. int headcount = 0;
  224. unsigned d;
  225. int r, nlogs = 0;
  226. while (datalen > 0 && headcount < quota) {
  227. if (unlikely(seg >= UIO_MAXIOV)) {
  228. r = -ENOBUFS;
  229. goto err;
  230. }
  231. d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
  232. ARRAY_SIZE(vq->iov) - seg, &out,
  233. &in, log, log_num);
  234. if (d == vq->num) {
  235. r = 0;
  236. goto err;
  237. }
  238. if (unlikely(out || in <= 0)) {
  239. vq_err(vq, "unexpected descriptor format for RX: "
  240. "out %d, in %d\n", out, in);
  241. r = -EINVAL;
  242. goto err;
  243. }
  244. if (unlikely(log)) {
  245. nlogs += *log_num;
  246. log += *log_num;
  247. }
  248. heads[headcount].id = d;
  249. heads[headcount].len = iov_length(vq->iov + seg, in);
  250. datalen -= heads[headcount].len;
  251. ++headcount;
  252. seg += in;
  253. }
  254. heads[headcount - 1].len += datalen;
  255. *iovcount = seg;
  256. if (unlikely(log))
  257. *log_num = nlogs;
  258. return headcount;
  259. err:
  260. vhost_discard_vq_desc(vq, headcount);
  261. return r;
  262. }
  263. /* Expects to be always run from workqueue - which acts as
  264. * read-size critical section for our kind of RCU. */
  265. static void handle_rx(struct vhost_net *net)
  266. {
  267. struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
  268. unsigned uninitialized_var(in), log;
  269. struct vhost_log *vq_log;
  270. struct msghdr msg = {
  271. .msg_name = NULL,
  272. .msg_namelen = 0,
  273. .msg_control = NULL, /* FIXME: get and handle RX aux data. */
  274. .msg_controllen = 0,
  275. .msg_iov = vq->iov,
  276. .msg_flags = MSG_DONTWAIT,
  277. };
  278. struct virtio_net_hdr_mrg_rxbuf hdr = {
  279. .hdr.flags = 0,
  280. .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
  281. };
  282. size_t total_len = 0;
  283. int err, headcount, mergeable;
  284. size_t vhost_hlen, sock_hlen;
  285. size_t vhost_len, sock_len;
  286. /* TODO: check that we are running from vhost_worker? */
  287. struct socket *sock = rcu_dereference_check(vq->private_data, 1);
  288. if (!sock)
  289. return;
  290. mutex_lock(&vq->mutex);
  291. vhost_disable_notify(&net->dev, vq);
  292. vhost_hlen = vq->vhost_hlen;
  293. sock_hlen = vq->sock_hlen;
  294. vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
  295. vq->log : NULL;
  296. mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
  297. while ((sock_len = peek_head_len(sock->sk))) {
  298. sock_len += sock_hlen;
  299. vhost_len = sock_len + vhost_hlen;
  300. headcount = get_rx_bufs(vq, vq->heads, vhost_len,
  301. &in, vq_log, &log,
  302. likely(mergeable) ? UIO_MAXIOV : 1);
  303. /* On error, stop handling until the next kick. */
  304. if (unlikely(headcount < 0))
  305. break;
  306. /* OK, now we need to know about added descriptors. */
  307. if (!headcount) {
  308. if (unlikely(vhost_enable_notify(&net->dev, vq))) {
  309. /* They have slipped one in as we were
  310. * doing that: check again. */
  311. vhost_disable_notify(&net->dev, vq);
  312. continue;
  313. }
  314. /* Nothing new? Wait for eventfd to tell us
  315. * they refilled. */
  316. break;
  317. }
  318. /* We don't need to be notified again. */
  319. if (unlikely((vhost_hlen)))
  320. /* Skip header. TODO: support TSO. */
  321. move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
  322. else
  323. /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
  324. * needed because recvmsg can modify msg_iov. */
  325. copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
  326. msg.msg_iovlen = in;
  327. err = sock->ops->recvmsg(NULL, sock, &msg,
  328. sock_len, MSG_DONTWAIT | MSG_TRUNC);
  329. /* Userspace might have consumed the packet meanwhile:
  330. * it's not supposed to do this usually, but might be hard
  331. * to prevent. Discard data we got (if any) and keep going. */
  332. if (unlikely(err != sock_len)) {
  333. pr_debug("Discarded rx packet: "
  334. " len %d, expected %zd\n", err, sock_len);
  335. vhost_discard_vq_desc(vq, headcount);
  336. continue;
  337. }
  338. if (unlikely(vhost_hlen) &&
  339. memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
  340. vhost_hlen)) {
  341. vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
  342. vq->iov->iov_base);
  343. break;
  344. }
  345. /* TODO: Should check and handle checksum. */
  346. if (likely(mergeable) &&
  347. memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
  348. offsetof(typeof(hdr), num_buffers),
  349. sizeof hdr.num_buffers)) {
  350. vq_err(vq, "Failed num_buffers write");
  351. vhost_discard_vq_desc(vq, headcount);
  352. break;
  353. }
  354. vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
  355. headcount);
  356. if (unlikely(vq_log))
  357. vhost_log_write(vq, vq_log, log, vhost_len);
  358. total_len += vhost_len;
  359. if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  360. vhost_poll_queue(&vq->poll);
  361. break;
  362. }
  363. }
  364. mutex_unlock(&vq->mutex);
  365. }
  366. static void handle_tx_kick(struct vhost_work *work)
  367. {
  368. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  369. poll.work);
  370. struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
  371. handle_tx(net);
  372. }
  373. static void handle_rx_kick(struct vhost_work *work)
  374. {
  375. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  376. poll.work);
  377. struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
  378. handle_rx(net);
  379. }
  380. static void handle_tx_net(struct vhost_work *work)
  381. {
  382. struct vhost_net *net = container_of(work, struct vhost_net,
  383. poll[VHOST_NET_VQ_TX].work);
  384. handle_tx(net);
  385. }
  386. static void handle_rx_net(struct vhost_work *work)
  387. {
  388. struct vhost_net *net = container_of(work, struct vhost_net,
  389. poll[VHOST_NET_VQ_RX].work);
  390. handle_rx(net);
  391. }
  392. static int vhost_net_open(struct inode *inode, struct file *f)
  393. {
  394. struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
  395. struct vhost_dev *dev;
  396. int r;
  397. if (!n)
  398. return -ENOMEM;
  399. dev = &n->dev;
  400. n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
  401. n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
  402. r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
  403. if (r < 0) {
  404. kfree(n);
  405. return r;
  406. }
  407. vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
  408. vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
  409. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  410. f->private_data = n;
  411. return 0;
  412. }
  413. static void vhost_net_disable_vq(struct vhost_net *n,
  414. struct vhost_virtqueue *vq)
  415. {
  416. if (!vq->private_data)
  417. return;
  418. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  419. tx_poll_stop(n);
  420. n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  421. } else
  422. vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
  423. }
  424. static void vhost_net_enable_vq(struct vhost_net *n,
  425. struct vhost_virtqueue *vq)
  426. {
  427. struct socket *sock;
  428. sock = rcu_dereference_protected(vq->private_data,
  429. lockdep_is_held(&vq->mutex));
  430. if (!sock)
  431. return;
  432. if (vq == n->vqs + VHOST_NET_VQ_TX) {
  433. n->tx_poll_state = VHOST_NET_POLL_STOPPED;
  434. tx_poll_start(n, sock);
  435. } else
  436. vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
  437. }
  438. static struct socket *vhost_net_stop_vq(struct vhost_net *n,
  439. struct vhost_virtqueue *vq)
  440. {
  441. struct socket *sock;
  442. mutex_lock(&vq->mutex);
  443. sock = rcu_dereference_protected(vq->private_data,
  444. lockdep_is_held(&vq->mutex));
  445. vhost_net_disable_vq(n, vq);
  446. rcu_assign_pointer(vq->private_data, NULL);
  447. mutex_unlock(&vq->mutex);
  448. return sock;
  449. }
  450. static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
  451. struct socket **rx_sock)
  452. {
  453. *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
  454. *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
  455. }
  456. static void vhost_net_flush_vq(struct vhost_net *n, int index)
  457. {
  458. vhost_poll_flush(n->poll + index);
  459. vhost_poll_flush(&n->dev.vqs[index].poll);
  460. }
  461. static void vhost_net_flush(struct vhost_net *n)
  462. {
  463. vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
  464. vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
  465. }
  466. static int vhost_net_release(struct inode *inode, struct file *f)
  467. {
  468. struct vhost_net *n = f->private_data;
  469. struct socket *tx_sock;
  470. struct socket *rx_sock;
  471. vhost_net_stop(n, &tx_sock, &rx_sock);
  472. vhost_net_flush(n);
  473. vhost_dev_cleanup(&n->dev);
  474. if (tx_sock)
  475. fput(tx_sock->file);
  476. if (rx_sock)
  477. fput(rx_sock->file);
  478. /* We do an extra flush before freeing memory,
  479. * since jobs can re-queue themselves. */
  480. vhost_net_flush(n);
  481. kfree(n);
  482. return 0;
  483. }
  484. static struct socket *get_raw_socket(int fd)
  485. {
  486. struct {
  487. struct sockaddr_ll sa;
  488. char buf[MAX_ADDR_LEN];
  489. } uaddr;
  490. int uaddr_len = sizeof uaddr, r;
  491. struct socket *sock = sockfd_lookup(fd, &r);
  492. if (!sock)
  493. return ERR_PTR(-ENOTSOCK);
  494. /* Parameter checking */
  495. if (sock->sk->sk_type != SOCK_RAW) {
  496. r = -ESOCKTNOSUPPORT;
  497. goto err;
  498. }
  499. r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
  500. &uaddr_len, 0);
  501. if (r)
  502. goto err;
  503. if (uaddr.sa.sll_family != AF_PACKET) {
  504. r = -EPFNOSUPPORT;
  505. goto err;
  506. }
  507. return sock;
  508. err:
  509. fput(sock->file);
  510. return ERR_PTR(r);
  511. }
  512. static struct socket *get_tap_socket(int fd)
  513. {
  514. struct file *file = fget(fd);
  515. struct socket *sock;
  516. if (!file)
  517. return ERR_PTR(-EBADF);
  518. sock = tun_get_socket(file);
  519. if (!IS_ERR(sock))
  520. return sock;
  521. sock = macvtap_get_socket(file);
  522. if (IS_ERR(sock))
  523. fput(file);
  524. return sock;
  525. }
  526. static struct socket *get_socket(int fd)
  527. {
  528. struct socket *sock;
  529. /* special case to disable backend */
  530. if (fd == -1)
  531. return NULL;
  532. sock = get_raw_socket(fd);
  533. if (!IS_ERR(sock))
  534. return sock;
  535. sock = get_tap_socket(fd);
  536. if (!IS_ERR(sock))
  537. return sock;
  538. return ERR_PTR(-ENOTSOCK);
  539. }
  540. static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
  541. {
  542. struct socket *sock, *oldsock;
  543. struct vhost_virtqueue *vq;
  544. int r;
  545. mutex_lock(&n->dev.mutex);
  546. r = vhost_dev_check_owner(&n->dev);
  547. if (r)
  548. goto err;
  549. if (index >= VHOST_NET_VQ_MAX) {
  550. r = -ENOBUFS;
  551. goto err;
  552. }
  553. vq = n->vqs + index;
  554. mutex_lock(&vq->mutex);
  555. /* Verify that ring has been setup correctly. */
  556. if (!vhost_vq_access_ok(vq)) {
  557. r = -EFAULT;
  558. goto err_vq;
  559. }
  560. sock = get_socket(fd);
  561. if (IS_ERR(sock)) {
  562. r = PTR_ERR(sock);
  563. goto err_vq;
  564. }
  565. /* start polling new socket */
  566. oldsock = rcu_dereference_protected(vq->private_data,
  567. lockdep_is_held(&vq->mutex));
  568. if (sock != oldsock) {
  569. vhost_net_disable_vq(n, vq);
  570. rcu_assign_pointer(vq->private_data, sock);
  571. vhost_net_enable_vq(n, vq);
  572. }
  573. mutex_unlock(&vq->mutex);
  574. if (oldsock) {
  575. vhost_net_flush_vq(n, index);
  576. fput(oldsock->file);
  577. }
  578. mutex_unlock(&n->dev.mutex);
  579. return 0;
  580. err_vq:
  581. mutex_unlock(&vq->mutex);
  582. err:
  583. mutex_unlock(&n->dev.mutex);
  584. return r;
  585. }
  586. static long vhost_net_reset_owner(struct vhost_net *n)
  587. {
  588. struct socket *tx_sock = NULL;
  589. struct socket *rx_sock = NULL;
  590. long err;
  591. mutex_lock(&n->dev.mutex);
  592. err = vhost_dev_check_owner(&n->dev);
  593. if (err)
  594. goto done;
  595. vhost_net_stop(n, &tx_sock, &rx_sock);
  596. vhost_net_flush(n);
  597. err = vhost_dev_reset_owner(&n->dev);
  598. done:
  599. mutex_unlock(&n->dev.mutex);
  600. if (tx_sock)
  601. fput(tx_sock->file);
  602. if (rx_sock)
  603. fput(rx_sock->file);
  604. return err;
  605. }
  606. static int vhost_net_set_features(struct vhost_net *n, u64 features)
  607. {
  608. size_t vhost_hlen, sock_hlen, hdr_len;
  609. int i;
  610. hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
  611. sizeof(struct virtio_net_hdr_mrg_rxbuf) :
  612. sizeof(struct virtio_net_hdr);
  613. if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
  614. /* vhost provides vnet_hdr */
  615. vhost_hlen = hdr_len;
  616. sock_hlen = 0;
  617. } else {
  618. /* socket provides vnet_hdr */
  619. vhost_hlen = 0;
  620. sock_hlen = hdr_len;
  621. }
  622. mutex_lock(&n->dev.mutex);
  623. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  624. !vhost_log_access_ok(&n->dev)) {
  625. mutex_unlock(&n->dev.mutex);
  626. return -EFAULT;
  627. }
  628. n->dev.acked_features = features;
  629. smp_wmb();
  630. for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
  631. mutex_lock(&n->vqs[i].mutex);
  632. n->vqs[i].vhost_hlen = vhost_hlen;
  633. n->vqs[i].sock_hlen = sock_hlen;
  634. mutex_unlock(&n->vqs[i].mutex);
  635. }
  636. vhost_net_flush(n);
  637. mutex_unlock(&n->dev.mutex);
  638. return 0;
  639. }
  640. static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
  641. unsigned long arg)
  642. {
  643. struct vhost_net *n = f->private_data;
  644. void __user *argp = (void __user *)arg;
  645. u64 __user *featurep = argp;
  646. struct vhost_vring_file backend;
  647. u64 features;
  648. int r;
  649. switch (ioctl) {
  650. case VHOST_NET_SET_BACKEND:
  651. if (copy_from_user(&backend, argp, sizeof backend))
  652. return -EFAULT;
  653. return vhost_net_set_backend(n, backend.index, backend.fd);
  654. case VHOST_GET_FEATURES:
  655. features = VHOST_FEATURES;
  656. if (copy_to_user(featurep, &features, sizeof features))
  657. return -EFAULT;
  658. return 0;
  659. case VHOST_SET_FEATURES:
  660. if (copy_from_user(&features, featurep, sizeof features))
  661. return -EFAULT;
  662. if (features & ~VHOST_FEATURES)
  663. return -EOPNOTSUPP;
  664. return vhost_net_set_features(n, features);
  665. case VHOST_RESET_OWNER:
  666. return vhost_net_reset_owner(n);
  667. default:
  668. mutex_lock(&n->dev.mutex);
  669. r = vhost_dev_ioctl(&n->dev, ioctl, arg);
  670. vhost_net_flush(n);
  671. mutex_unlock(&n->dev.mutex);
  672. return r;
  673. }
  674. }
  675. #ifdef CONFIG_COMPAT
  676. static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
  677. unsigned long arg)
  678. {
  679. return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  680. }
  681. #endif
  682. static const struct file_operations vhost_net_fops = {
  683. .owner = THIS_MODULE,
  684. .release = vhost_net_release,
  685. .unlocked_ioctl = vhost_net_ioctl,
  686. #ifdef CONFIG_COMPAT
  687. .compat_ioctl = vhost_net_compat_ioctl,
  688. #endif
  689. .open = vhost_net_open,
  690. .llseek = noop_llseek,
  691. };
  692. static struct miscdevice vhost_net_misc = {
  693. MISC_DYNAMIC_MINOR,
  694. "vhost-net",
  695. &vhost_net_fops,
  696. };
  697. static int vhost_net_init(void)
  698. {
  699. return misc_register(&vhost_net_misc);
  700. }
  701. module_init(vhost_net_init);
  702. static void vhost_net_exit(void)
  703. {
  704. misc_deregister(&vhost_net_misc);
  705. }
  706. module_exit(vhost_net_exit);
  707. MODULE_VERSION("0.0.1");
  708. MODULE_LICENSE("GPL v2");
  709. MODULE_AUTHOR("Michael S. Tsirkin");
  710. MODULE_DESCRIPTION("Host kernel accelerator for virtio net");