4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #include <linux/module.h>
26 #include <linux/net.h>
28 #include <linux/virtio_net.h>
29 #include <linux/wait.h>
31 #include <linux/nsproxy.h>
32 #include <linux/sched.h>
33 #include <linux/if_tun.h>
34 #include <linux/version.h>
35 #include <linux/file.h>
43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
44 static int kni_sock_map_fd(struct socket *sock)
47 int fd = get_unused_fd_flags(0);
52 file = sock_alloc_file(sock, 0, NULL);
61 #define kni_sock_map_fd(s) sock_map_fd(s, 0)
64 static struct proto kni_raw_proto = {
67 .obj_size = sizeof(struct kni_vhost_queue),
71 kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
72 uint32_t offset, uint32_t len)
74 struct rte_kni_mbuf *pkt_kva = NULL;
75 struct rte_kni_mbuf *pkt_va = NULL;
78 pr_debug("tx offset=%d, len=%d, iovlen=%d\n",
79 #ifdef HAVE_IOV_ITER_MSGHDR
80 offset, len, (int)m->msg_iter.iov->iov_len);
82 offset, len, (int)m->msg_iov->iov_len);
86 * Check if it has at least one free entry in tx_q and
87 * one entry in alloc_q.
89 if (kni_fifo_free_count(kni->tx_q) == 0 ||
90 kni_fifo_count(kni->alloc_q) == 0) {
92 * If no free entry in tx_q or no entry in alloc_q,
93 * drops skb and goes out.
98 /* dequeue a mbuf from alloc_q */
99 ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
100 if (likely(ret == 1)) {
103 pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
104 data_kva = pkt_kva->buf_addr + pkt_kva->data_off
105 - kni->mbuf_va + kni->mbuf_kva;
107 #ifdef HAVE_IOV_ITER_MSGHDR
108 copy_from_iter(data_kva, len, &m->msg_iter);
110 memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
113 if (unlikely(len < ETH_ZLEN)) {
114 memset(data_kva + len, 0, ETH_ZLEN - len);
117 pkt_kva->pkt_len = len;
118 pkt_kva->data_len = len;
120 /* enqueue mbuf into tx_q */
121 ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
122 if (unlikely(ret != 1)) {
123 /* Failing should not happen */
124 pr_err("Fail to enqueue mbuf into tx_q\n");
128 /* Failing should not happen */
129 pr_err("Fail to dequeue mbuf from alloc_q\n");
133 /* update statistics */
134 kni->stats.tx_bytes += len;
135 kni->stats.tx_packets++;
140 /* update statistics */
141 kni->stats.tx_dropped++;
147 kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
148 uint32_t offset, uint32_t len)
151 struct rte_kni_mbuf *kva;
152 struct rte_kni_mbuf *va;
155 struct kni_vhost_queue *q = kni->vhost_queue;
157 if (unlikely(q == NULL))
160 /* ensure at least one entry in free_q */
161 if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
164 skb = skb_dequeue(&q->sk.sk_receive_queue);
165 if (unlikely(skb == NULL))
168 kva = (struct rte_kni_mbuf *)skb->data;
170 /* free skb to cache */
172 if (unlikely(kni_fifo_put(q->fifo, (void **)&skb, 1) != 1))
173 /* Failing should not happen */
174 pr_err("Fail to enqueue entries into rx cache fifo\n");
176 pkt_len = kva->data_len;
177 if (unlikely(pkt_len > len))
180 pr_debug("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
181 #ifdef HAVE_IOV_ITER_MSGHDR
182 offset, len, pkt_len, (int)m->msg_iter.iov->iov_len);
184 offset, len, pkt_len, (int)m->msg_iov->iov_len);
187 data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
188 #ifdef HAVE_IOV_ITER_MSGHDR
189 if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter)))
191 if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len)))
195 /* Update statistics */
196 kni->stats.rx_bytes += pkt_len;
197 kni->stats.rx_packets++;
199 /* enqueue mbufs into free_q */
200 va = (void *)kva - kni->mbuf_kva + kni->mbuf_va;
201 if (unlikely(kni_fifo_put(kni->free_q, (void **)&va, 1) != 1))
202 /* Failing should not happen */
203 pr_err("Fail to enqueue entries into free_q\n");
205 pr_debug("receive done %d\n", pkt_len);
210 /* Update drop statistics */
211 kni->stats.rx_dropped++;
217 kni_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
219 struct kni_vhost_queue *q =
220 container_of(sock->sk, struct kni_vhost_queue, sk);
224 if (unlikely(q == NULL || q->kni == NULL))
228 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
229 pr_debug("start kni_poll on group %d, wq 0x%16llx\n",
230 kni->group_id, (uint64_t)sock->wq);
232 pr_debug("start kni_poll on group %d, wait at 0x%16llx\n",
233 kni->group_id, (uint64_t)&sock->wait);
236 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
237 poll_wait(file, &sock->wq->wait, wait);
239 poll_wait(file, &sock->wait, wait);
242 if (kni_fifo_count(kni->rx_q) > 0)
243 mask |= POLLIN | POLLRDNORM;
245 if (sock_writeable(&q->sk) ||
246 #ifdef SOCKWQ_ASYNC_NOSPACE
247 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock->flags) &&
248 sock_writeable(&q->sk)))
250 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
251 sock_writeable(&q->sk)))
253 mask |= POLLOUT | POLLWRNORM;
259 kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
260 struct sk_buff *skb, struct rte_kni_mbuf *va)
262 struct rte_kni_mbuf *kva;
264 kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
265 (skb)->data = (unsigned char *)kva;
266 (skb)->len = kva->data_len;
267 skb_queue_tail(&q->sk.sk_receive_queue, skb);
271 kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
272 struct sk_buff **skb, struct rte_kni_mbuf **va)
276 for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
277 kni_vhost_enqueue(kni, q, *skb, *va);
281 kni_chk_vhost_rx(struct kni_dev *kni)
283 struct kni_vhost_queue *q = kni->vhost_queue;
284 uint32_t nb_in, nb_mbuf, nb_skb;
285 const uint32_t BURST_MASK = RX_BURST_SZ - 1;
286 uint32_t nb_burst, nb_backlog, i;
287 struct sk_buff *skb[RX_BURST_SZ];
288 struct rte_kni_mbuf *va[RX_BURST_SZ];
290 if (unlikely(BE_STOP & kni->vq_status)) {
291 kni->vq_status |= BE_FINISH;
295 if (unlikely(q == NULL))
298 nb_skb = kni_fifo_count(q->fifo);
299 nb_mbuf = kni_fifo_count(kni->rx_q);
301 nb_in = min(nb_mbuf, nb_skb);
302 nb_in = min_t(uint32_t, nb_in, RX_BURST_SZ);
303 nb_burst = (nb_in & ~BURST_MASK);
304 nb_backlog = (nb_in & BURST_MASK);
306 /* enqueue skb_queue per BURST_SIZE bulk */
308 if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, RX_BURST_SZ)
312 if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, RX_BURST_SZ)
316 kni_vhost_enqueue_burst(kni, q, skb, va);
319 /* all leftover, do one by one */
320 for (i = 0; i < nb_backlog; ++i) {
321 if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, 1) != 1))
324 if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, 1) != 1))
327 kni_vhost_enqueue(kni, q, *skb, *va);
330 /* Ondemand wake up */
331 if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
332 ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
333 wake_up_interruptible_poll(sk_sleep(&q->sk),
334 POLLIN | POLLRDNORM | POLLRDBAND);
335 pr_debug("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
336 nb_mbuf, nb_skb, nb_in);
342 /* Failing should not happen */
343 pr_err("Fail to enqueue fifo, it shouldn't happen\n");
350 #ifdef HAVE_KIOCB_MSG_PARAM
351 kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
352 struct msghdr *m, size_t total_len)
354 kni_sock_sndmsg(struct socket *sock,
355 struct msghdr *m, size_t total_len)
356 #endif /* HAVE_KIOCB_MSG_PARAM */
358 struct kni_vhost_queue *q =
359 container_of(sock->sk, struct kni_vhost_queue, sk);
360 int vnet_hdr_len = 0;
361 unsigned long len = total_len;
363 if (unlikely(q == NULL || q->kni == NULL))
366 pr_debug("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
367 #ifdef HAVE_IOV_ITER_MSGHDR
368 len, q->flags, (int)m->msg_iter.iov->iov_len);
370 len, q->flags, (int)m->msg_iovlen);
373 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
374 if (likely(q->flags & IFF_VNET_HDR)) {
375 vnet_hdr_len = q->vnet_hdr_sz;
376 if (unlikely(len < vnet_hdr_len))
382 if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
385 return kni_vhost_net_tx(q->kni, m, vnet_hdr_len, len);
389 #ifdef HAVE_KIOCB_MSG_PARAM
390 kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
391 struct msghdr *m, size_t len, int flags)
393 kni_sock_rcvmsg(struct socket *sock,
394 struct msghdr *m, size_t len, int flags)
395 #endif /* HAVE_KIOCB_MSG_PARAM */
397 int vnet_hdr_len = 0;
399 struct kni_vhost_queue *q =
400 container_of(sock->sk, struct kni_vhost_queue, sk);
401 static struct virtio_net_hdr
402 __attribute__ ((unused)) vnet_hdr = {
404 .gso_type = VIRTIO_NET_HDR_GSO_NONE
407 if (unlikely(q == NULL || q->kni == NULL))
410 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
411 if (likely(q->flags & IFF_VNET_HDR)) {
412 vnet_hdr_len = q->vnet_hdr_sz;
419 pkt_len = kni_vhost_net_rx(q->kni, m, vnet_hdr_len, len);
420 if (unlikely(pkt_len == 0))
423 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
424 /* no need to copy hdr when no pkt received */
425 #ifdef HAVE_IOV_ITER_MSGHDR
426 if (unlikely(copy_to_iter((void *)&vnet_hdr, vnet_hdr_len,
429 if (unlikely(memcpy_toiovecend(m->msg_iov,
430 (void *)&vnet_hdr, 0, vnet_hdr_len)))
431 #endif /* HAVE_IOV_ITER_MSGHDR */
433 #endif /* RTE_KNI_VHOST_VNET_HDR_EN */
434 pr_debug("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
435 (unsigned long)len, q->flags, pkt_len);
437 return pkt_len + vnet_hdr_len;
440 /* dummy tap like ioctl */
442 kni_sock_ioctl(struct socket *sock, uint32_t cmd, unsigned long arg)
444 void __user *argp = (void __user *)arg;
445 struct ifreq __user *ifr = argp;
446 uint32_t __user *up = argp;
447 struct kni_vhost_queue *q =
448 container_of(sock->sk, struct kni_vhost_queue, sk);
451 int __user *sp = argp;
455 pr_debug("tap ioctl cmd 0x%08x\n", cmd);
459 pr_debug("TUNSETIFF\n");
460 /* ignore the name, just look at flags */
461 if (get_user(u, &ifr->ifr_flags))
465 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
473 pr_debug("TUNGETIFF\n");
475 kni = rcu_dereference_bh(q->kni);
477 dev_hold(kni->net_dev);
478 rcu_read_unlock_bh();
484 if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ)
485 || put_user(q->flags, &ifr->ifr_flags))
487 dev_put(kni->net_dev);
491 pr_debug("TUNGETFEATURES\n");
492 u = IFF_TAP | IFF_NO_PI;
493 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
501 pr_debug("TUNSETSNDBUF\n");
508 case TUNGETVNETHDRSZ:
512 pr_debug("TUNGETVNETHDRSZ %d\n", s);
515 case TUNSETVNETHDRSZ:
518 if (s < (int)sizeof(struct virtio_net_hdr))
521 pr_debug("TUNSETVNETHDRSZ %d\n", s);
526 pr_debug("TUNSETOFFLOAD %lx\n", arg);
527 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
528 /* not support any offload yet */
529 if (!(q->flags & IFF_VNET_HDR))
538 pr_debug("NOT SUPPORT\n");
544 kni_sock_compat_ioctl(struct socket *sock, uint32_t cmd,
547 /* 32 bits app on 64 bits OS to be supported later */
548 pr_debug("Not implemented.\n");
553 #define KNI_VHOST_WAIT_WQ_SAFE() \
555 while ((BE_FINISH | BE_STOP) == kni->vq_status) \
561 kni_sock_release(struct socket *sock)
563 struct kni_vhost_queue *q =
564 container_of(sock->sk, struct kni_vhost_queue, sk);
572 kni->vq_status = BE_STOP;
573 KNI_VHOST_WAIT_WQ_SAFE();
574 kni->vhost_queue = NULL;
581 sk_set_socket(&q->sk, NULL);
586 pr_debug("dummy sock release done\n");
592 kni_sock_getname(struct socket *sock, struct sockaddr *addr,
593 int *sockaddr_len, int peer)
595 pr_debug("dummy sock getname\n");
596 ((struct sockaddr_ll *)addr)->sll_family = AF_PACKET;
600 static const struct proto_ops kni_socket_ops = {
601 .getname = kni_sock_getname,
602 .sendmsg = kni_sock_sndmsg,
603 .recvmsg = kni_sock_rcvmsg,
604 .release = kni_sock_release,
605 .poll = kni_sock_poll,
606 .ioctl = kni_sock_ioctl,
607 .compat_ioctl = kni_sock_compat_ioctl,
611 kni_sk_write_space(struct sock *sk)
613 wait_queue_head_t *wqueue;
615 if (!sock_writeable(sk) ||
616 #ifdef SOCKWQ_ASYNC_NOSPACE
617 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
619 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
622 wqueue = sk_sleep(sk);
623 if (wqueue && waitqueue_active(wqueue))
624 wake_up_interruptible_poll(
625 wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
629 kni_sk_destruct(struct sock *sk)
631 struct kni_vhost_queue *q =
632 container_of(sk, struct kni_vhost_queue, sk);
637 /* make sure there's no packet in buffer */
638 while (skb_dequeue(&sk->sk_receive_queue) != NULL)
643 if (q->fifo != NULL) {
648 if (q->cache != NULL) {
655 kni_vhost_backend_init(struct kni_dev *kni)
657 struct kni_vhost_queue *q;
658 struct net *net = current->nsproxy->net_ns;
660 struct rte_kni_fifo *fifo;
661 struct sk_buff *elem;
663 if (kni->vhost_queue != NULL)
666 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
667 q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
670 q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
676 err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
680 sockfd = kni_sock_map_fd(q->sock);
688 RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
693 fifo = kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
694 + sizeof(struct rte_kni_fifo), GFP_KERNEL);
698 kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
700 for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
702 kni_fifo_put(fifo, (void **)&elem, 1);
706 /* store sockfd in vhost_queue */
710 q->sock->type = SOCK_RAW;
711 q->sock->state = SS_CONNECTED;
712 q->sock->ops = &kni_socket_ops;
713 sock_init_data(q->sock, &q->sk);
716 q->sk.sk_write_space = kni_sk_write_space;
717 q->sk.sk_destruct = kni_sk_destruct;
718 q->flags = IFF_NO_PI | IFF_TAP;
719 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
720 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
721 q->flags |= IFF_VNET_HDR;
724 /* bind kni_dev with vhost_queue */
726 kni->vhost_queue = q;
730 kni->vq_status = BE_START;
732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
733 pr_debug("backend init sockfd=%d, sock->wq=0x%16llx,sk->sk_wq=0x%16llx",
734 q->sockfd, (uint64_t)q->sock->wq,
735 (uint64_t)q->sk.sk_wq);
737 pr_debug("backend init sockfd=%d, sock->wait at 0x%16llx,sk->sk_sleep=0x%16llx",
738 q->sockfd, (uint64_t)&q->sock->wait,
739 (uint64_t)q->sk.sk_sleep);
749 put_unused_fd(sockfd);
753 kni->vhost_queue = NULL;
754 kni->vq_status |= BE_FINISH;
755 sock_release(q->sock);
760 sk_free((struct sock *)q);
765 /* kni vhost sock sysfs */
767 show_sock_fd(struct device *dev, struct device_attribute *attr,
770 struct net_device *net_dev = container_of(dev, struct net_device, dev);
771 struct kni_dev *kni = netdev_priv(net_dev);
774 if (kni->vhost_queue != NULL)
775 sockfd = kni->vhost_queue->sockfd;
776 return snprintf(buf, 10, "%d\n", sockfd);
780 show_sock_en(struct device *dev, struct device_attribute *attr,
783 struct net_device *net_dev = container_of(dev, struct net_device, dev);
784 struct kni_dev *kni = netdev_priv(net_dev);
786 return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
790 set_sock_en(struct device *dev, struct device_attribute *attr,
791 const char *buf, size_t count)
793 struct net_device *net_dev = container_of(dev, struct net_device, dev);
794 struct kni_dev *kni = netdev_priv(net_dev);
798 if (kstrtoul(buf, 0, &en) != 0)
802 err = kni_vhost_backend_init(kni);
804 return err ? err : count;
807 static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
808 static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
809 static struct attribute *dev_attrs[] = {
810 &dev_attr_sock_fd.attr,
811 &dev_attr_sock_en.attr,
815 static const struct attribute_group dev_attr_grp = {
820 kni_vhost_backend_release(struct kni_dev *kni)
822 struct kni_vhost_queue *q = kni->vhost_queue;
827 /* dettach from kni */
830 pr_debug("release backend done\n");
836 kni_vhost_init(struct kni_dev *kni)
838 struct net_device *dev = kni->net_dev;
840 if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
841 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
843 kni->vq_status = BE_STOP;
845 pr_debug("kni_vhost_init done\n");