4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #include <linux/module.h>
26 #include <linux/net.h>
28 #include <linux/virtio_net.h>
29 #include <linux/wait.h>
31 #include <linux/nsproxy.h>
32 #include <linux/sched.h>
33 #include <linux/if_tun.h>
34 #include <linux/version.h>
35 #include <linux/file.h>
43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
44 static int kni_sock_map_fd(struct socket *sock)
47 int fd = get_unused_fd_flags(0);
52 file = sock_alloc_file(sock, 0, NULL);
61 #define kni_sock_map_fd(s) sock_map_fd(s, 0)
64 static struct proto kni_raw_proto = {
67 .obj_size = sizeof(struct kni_vhost_queue),
71 kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
72 unsigned int offset, unsigned int len)
74 struct rte_kni_mbuf *pkt_kva = NULL;
75 struct rte_kni_mbuf *pkt_va = NULL;
78 KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
79 #ifdef HAVE_IOV_ITER_MSGHDR
80 offset, len, (int)m->msg_iter.iov->iov_len);
82 offset, len, (int)m->msg_iov->iov_len);
86 * Check if it has at least one free entry in tx_q and
87 * one entry in alloc_q.
89 if (kni_fifo_free_count(kni->tx_q) == 0 ||
90 kni_fifo_count(kni->alloc_q) == 0) {
92 * If no free entry in tx_q or no entry in alloc_q,
93 * drops skb and goes out.
98 /* dequeue a mbuf from alloc_q */
99 ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
100 if (likely(ret == 1)) {
103 pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
104 data_kva = pkt_kva->buf_addr + pkt_kva->data_off
105 - kni->mbuf_va + kni->mbuf_kva;
107 #ifdef HAVE_IOV_ITER_MSGHDR
108 copy_from_iter(data_kva, len, &m->msg_iter);
110 memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
113 if (unlikely(len < ETH_ZLEN)) {
114 memset(data_kva + len, 0, ETH_ZLEN - len);
117 pkt_kva->pkt_len = len;
118 pkt_kva->data_len = len;
120 /* enqueue mbuf into tx_q */
121 ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
122 if (unlikely(ret != 1)) {
123 /* Failing should not happen */
124 KNI_ERR("Fail to enqueue mbuf into tx_q\n");
128 /* Failing should not happen */
129 KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
133 /* update statistics */
134 kni->stats.tx_bytes += len;
135 kni->stats.tx_packets++;
140 /* update statistics */
141 kni->stats.tx_dropped++;
147 kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
148 unsigned int offset, unsigned int len)
151 struct rte_kni_mbuf *kva;
152 struct rte_kni_mbuf *va;
155 struct kni_vhost_queue *q = kni->vhost_queue;
157 if (unlikely(q == NULL))
160 /* ensure at least one entry in free_q */
161 if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
164 skb = skb_dequeue(&q->sk.sk_receive_queue);
165 if (unlikely(skb == NULL))
168 kva = (struct rte_kni_mbuf *)skb->data;
170 /* free skb to cache */
172 if (unlikely(kni_fifo_put(q->fifo, (void **)&skb, 1) != 1))
173 /* Failing should not happen */
174 KNI_ERR("Fail to enqueue entries into rx cache fifo\n");
176 pkt_len = kva->data_len;
177 if (unlikely(pkt_len > len))
180 KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
181 #ifdef HAVE_IOV_ITER_MSGHDR
182 offset, len, pkt_len, (int)m->msg_iter.iov->iov_len);
184 offset, len, pkt_len, (int)m->msg_iov->iov_len);
187 data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
188 #ifdef HAVE_IOV_ITER_MSGHDR
189 if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter)))
191 if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len)))
195 /* Update statistics */
196 kni->stats.rx_bytes += pkt_len;
197 kni->stats.rx_packets++;
199 /* enqueue mbufs into free_q */
200 va = (void *)kva - kni->mbuf_kva + kni->mbuf_va;
201 if (unlikely(kni_fifo_put(kni->free_q, (void **)&va, 1) != 1))
202 /* Failing should not happen */
203 KNI_ERR("Fail to enqueue entries into free_q\n");
205 KNI_DBG_RX("receive done %d\n", pkt_len);
210 /* Update drop statistics */
211 kni->stats.rx_dropped++;
217 kni_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
219 struct kni_vhost_queue *q =
220 container_of(sock->sk, struct kni_vhost_queue, sk);
222 unsigned int mask = 0;
224 if (unlikely(q == NULL || q->kni == NULL))
228 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
229 KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n",
230 kni->group_id, (uint64_t)sock->wq);
232 KNI_DBG("start kni_poll on group %d, wait at 0x%16llx\n",
233 kni->group_id, (uint64_t)&sock->wait);
236 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
237 poll_wait(file, &sock->wq->wait, wait);
239 poll_wait(file, &sock->wait, wait);
242 if (kni_fifo_count(kni->rx_q) > 0)
243 mask |= POLLIN | POLLRDNORM;
245 if (sock_writeable(&q->sk) ||
246 #ifdef SOCKWQ_ASYNC_NOSPACE
247 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock->flags) &&
249 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
251 sock_writeable(&q->sk)))
252 mask |= POLLOUT | POLLWRNORM;
258 kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
259 struct sk_buff *skb, struct rte_kni_mbuf *va)
261 struct rte_kni_mbuf *kva;
263 kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
264 (skb)->data = (unsigned char *)kva;
265 (skb)->len = kva->data_len;
266 skb_queue_tail(&q->sk.sk_receive_queue, skb);
270 kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
271 struct sk_buff **skb, struct rte_kni_mbuf **va)
275 for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
276 kni_vhost_enqueue(kni, q, *skb, *va);
280 kni_chk_vhost_rx(struct kni_dev *kni)
282 struct kni_vhost_queue *q = kni->vhost_queue;
283 unsigned int nb_in, nb_mbuf, nb_skb;
284 const unsigned int BURST_MASK = RX_BURST_SZ - 1;
285 unsigned int nb_burst, nb_backlog, i;
286 struct sk_buff *skb[RX_BURST_SZ];
287 struct rte_kni_mbuf *va[RX_BURST_SZ];
289 if (unlikely(BE_STOP & kni->vq_status)) {
290 kni->vq_status |= BE_FINISH;
294 if (unlikely(q == NULL))
297 nb_skb = kni_fifo_count(q->fifo);
298 nb_mbuf = kni_fifo_count(kni->rx_q);
300 nb_in = min(nb_mbuf, nb_skb);
301 nb_in = min(nb_in, (unsigned int)RX_BURST_SZ);
302 nb_burst = (nb_in & ~BURST_MASK);
303 nb_backlog = (nb_in & BURST_MASK);
305 /* enqueue skb_queue per BURST_SIZE bulk */
307 if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, RX_BURST_SZ)
311 if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, RX_BURST_SZ)
315 kni_vhost_enqueue_burst(kni, q, skb, va);
318 /* all leftover, do one by one */
319 for (i = 0; i < nb_backlog; ++i) {
320 if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, 1) != 1))
323 if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, 1) != 1))
326 kni_vhost_enqueue(kni, q, *skb, *va);
329 /* Ondemand wake up */
330 if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
331 ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
332 wake_up_interruptible_poll(sk_sleep(&q->sk),
333 POLLIN | POLLRDNORM | POLLRDBAND);
334 KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
335 nb_mbuf, nb_skb, nb_in);
341 /* Failing should not happen */
342 KNI_ERR("Fail to enqueue fifo, it shouldn't happen\n");
349 #ifdef HAVE_KIOCB_MSG_PARAM
350 kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
351 struct msghdr *m, size_t total_len)
353 kni_sock_sndmsg(struct socket *sock,
354 struct msghdr *m, size_t total_len)
355 #endif /* HAVE_KIOCB_MSG_PARAM */
357 struct kni_vhost_queue *q =
358 container_of(sock->sk, struct kni_vhost_queue, sk);
359 int vnet_hdr_len = 0;
360 unsigned long len = total_len;
362 if (unlikely(q == NULL || q->kni == NULL))
365 KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
366 #ifdef HAVE_IOV_ITER_MSGHDR
367 len, q->flags, (int)m->msg_iter.iov->iov_len);
369 len, q->flags, (int)m->msg_iovlen);
372 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
373 if (likely(q->flags & IFF_VNET_HDR)) {
374 vnet_hdr_len = q->vnet_hdr_sz;
375 if (unlikely(len < vnet_hdr_len))
381 if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
384 return kni_vhost_net_tx(q->kni, m, vnet_hdr_len, len);
388 #ifdef HAVE_KIOCB_MSG_PARAM
389 kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
390 struct msghdr *m, size_t len, int flags)
392 kni_sock_rcvmsg(struct socket *sock,
393 struct msghdr *m, size_t len, int flags)
394 #endif /* HAVE_KIOCB_MSG_PARAM */
396 int vnet_hdr_len = 0;
398 struct kni_vhost_queue *q =
399 container_of(sock->sk, struct kni_vhost_queue, sk);
400 static struct virtio_net_hdr
401 __attribute__ ((unused)) vnet_hdr = {
403 .gso_type = VIRTIO_NET_HDR_GSO_NONE
406 if (unlikely(q == NULL || q->kni == NULL))
409 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
410 if (likely(q->flags & IFF_VNET_HDR)) {
411 vnet_hdr_len = q->vnet_hdr_sz;
412 if ((len -= vnet_hdr_len) < 0)
417 if (unlikely(0 == (pkt_len = kni_vhost_net_rx(q->kni,
418 m, vnet_hdr_len, len))))
421 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
422 /* no need to copy hdr when no pkt received */
423 #ifdef HAVE_IOV_ITER_MSGHDR
424 if (unlikely(copy_to_iter((void *)&vnet_hdr, vnet_hdr_len,
427 if (unlikely(memcpy_toiovecend(m->msg_iov,
428 (void *)&vnet_hdr, 0, vnet_hdr_len)))
429 #endif /* HAVE_IOV_ITER_MSGHDR */
431 #endif /* RTE_KNI_VHOST_VNET_HDR_EN */
432 KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
433 (unsigned long)len, q->flags, pkt_len);
435 return pkt_len + vnet_hdr_len;
438 /* dummy tap like ioctl */
440 kni_sock_ioctl(struct socket *sock, unsigned int cmd,
443 void __user *argp = (void __user *)arg;
444 struct ifreq __user *ifr = argp;
445 unsigned int __user *up = argp;
446 struct kni_vhost_queue *q =
447 container_of(sock->sk, struct kni_vhost_queue, sk);
450 int __user *sp = argp;
454 KNI_DBG("tap ioctl cmd 0x%08x\n", cmd);
458 KNI_DBG("TUNSETIFF\n");
459 /* ignore the name, just look at flags */
460 if (get_user(u, &ifr->ifr_flags))
464 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
472 KNI_DBG("TUNGETIFF\n");
474 kni = rcu_dereference_bh(q->kni);
476 dev_hold(kni->net_dev);
477 rcu_read_unlock_bh();
483 if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ)
484 || put_user(q->flags, &ifr->ifr_flags))
486 dev_put(kni->net_dev);
490 KNI_DBG("TUNGETFEATURES\n");
491 u = IFF_TAP | IFF_NO_PI;
492 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
500 KNI_DBG("TUNSETSNDBUF\n");
507 case TUNGETVNETHDRSZ:
511 KNI_DBG("TUNGETVNETHDRSZ %d\n", s);
514 case TUNSETVNETHDRSZ:
517 if (s < (int)sizeof(struct virtio_net_hdr))
520 KNI_DBG("TUNSETVNETHDRSZ %d\n", s);
525 KNI_DBG("TUNSETOFFLOAD %lx\n", arg);
526 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
527 /* not support any offload yet */
528 if (!(q->flags & IFF_VNET_HDR))
537 KNI_DBG("NOT SUPPORT\n");
543 kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
546 /* 32 bits app on 64 bits OS to be supported later */
547 KNI_PRINT("Not implemented.\n");
552 #define KNI_VHOST_WAIT_WQ_SAFE() \
554 while ((BE_FINISH | BE_STOP) == kni->vq_status) \
560 kni_sock_release(struct socket *sock)
562 struct kni_vhost_queue *q =
563 container_of(sock->sk, struct kni_vhost_queue, sk);
569 if (NULL != (kni = q->kni)) {
570 kni->vq_status = BE_STOP;
571 KNI_VHOST_WAIT_WQ_SAFE();
572 kni->vhost_queue = NULL;
579 sk_set_socket(&q->sk, NULL);
584 KNI_DBG("dummy sock release done\n");
590 kni_sock_getname(struct socket *sock, struct sockaddr *addr,
591 int *sockaddr_len, int peer)
593 KNI_DBG("dummy sock getname\n");
594 ((struct sockaddr_ll *)addr)->sll_family = AF_PACKET;
598 static const struct proto_ops kni_socket_ops = {
599 .getname = kni_sock_getname,
600 .sendmsg = kni_sock_sndmsg,
601 .recvmsg = kni_sock_rcvmsg,
602 .release = kni_sock_release,
603 .poll = kni_sock_poll,
604 .ioctl = kni_sock_ioctl,
605 .compat_ioctl = kni_sock_compat_ioctl,
609 kni_sk_write_space(struct sock *sk)
611 wait_queue_head_t *wqueue;
613 if (!sock_writeable(sk) ||
614 #ifdef SOCKWQ_ASYNC_NOSPACE
615 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
617 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
620 wqueue = sk_sleep(sk);
621 if (wqueue && waitqueue_active(wqueue))
622 wake_up_interruptible_poll(
623 wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
627 kni_sk_destruct(struct sock *sk)
629 struct kni_vhost_queue *q =
630 container_of(sk, struct kni_vhost_queue, sk);
635 /* make sure there's no packet in buffer */
636 while (skb_dequeue(&sk->sk_receive_queue) != NULL)
641 if (q->fifo != NULL) {
646 if (q->cache != NULL) {
653 kni_vhost_backend_init(struct kni_dev *kni)
655 struct kni_vhost_queue *q;
656 struct net *net = current->nsproxy->net_ns;
658 struct rte_kni_fifo *fifo;
659 struct sk_buff *elem;
661 if (kni->vhost_queue != NULL)
664 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
665 q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
668 q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
674 err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
678 sockfd = kni_sock_map_fd(q->sock);
686 RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
691 fifo = kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
692 + sizeof(struct rte_kni_fifo), GFP_KERNEL);
696 kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
698 for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
700 kni_fifo_put(fifo, (void **)&elem, 1);
704 /* store sockfd in vhost_queue */
708 q->sock->type = SOCK_RAW;
709 q->sock->state = SS_CONNECTED;
710 q->sock->ops = &kni_socket_ops;
711 sock_init_data(q->sock, &q->sk);
714 q->sk.sk_write_space = kni_sk_write_space;
715 q->sk.sk_destruct = kni_sk_destruct;
716 q->flags = IFF_NO_PI | IFF_TAP;
717 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
718 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
719 q->flags |= IFF_VNET_HDR;
722 /* bind kni_dev with vhost_queue */
724 kni->vhost_queue = q;
728 kni->vq_status = BE_START;
730 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
731 KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,sk->sk_wq=0x%16llx",
732 q->sockfd, (uint64_t)q->sock->wq,
733 (uint64_t)q->sk.sk_wq);
735 KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,sk->sk_sleep=0x%16llx",
736 q->sockfd, (uint64_t)&q->sock->wait,
737 (uint64_t)q->sk.sk_sleep);
747 put_unused_fd(sockfd);
751 kni->vhost_queue = NULL;
752 kni->vq_status |= BE_FINISH;
753 sock_release(q->sock);
758 sk_free((struct sock *)q);
763 /* kni vhost sock sysfs */
765 show_sock_fd(struct device *dev, struct device_attribute *attr,
768 struct net_device *net_dev = container_of(dev, struct net_device, dev);
769 struct kni_dev *kni = netdev_priv(net_dev);
772 if (kni->vhost_queue != NULL)
773 sockfd = kni->vhost_queue->sockfd;
774 return snprintf(buf, 10, "%d\n", sockfd);
778 show_sock_en(struct device *dev, struct device_attribute *attr,
781 struct net_device *net_dev = container_of(dev, struct net_device, dev);
782 struct kni_dev *kni = netdev_priv(net_dev);
784 return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
788 set_sock_en(struct device *dev, struct device_attribute *attr,
789 const char *buf, size_t count)
791 struct net_device *net_dev = container_of(dev, struct net_device, dev);
792 struct kni_dev *kni = netdev_priv(net_dev);
796 if (kstrtoul(buf, 0, &en) != 0)
800 err = kni_vhost_backend_init(kni);
802 return err ? err : count;
805 static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
806 static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
807 static struct attribute *dev_attrs[] = {
808 &dev_attr_sock_fd.attr,
809 &dev_attr_sock_en.attr,
813 static const struct attribute_group dev_attr_grp = {
818 kni_vhost_backend_release(struct kni_dev *kni)
820 struct kni_vhost_queue *q = kni->vhost_queue;
825 /* dettach from kni */
828 KNI_DBG("release backend done\n");
834 kni_vhost_init(struct kni_dev *kni)
836 struct net_device *dev = kni->net_dev;
838 if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
839 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
841 kni->vq_status = BE_STOP;
843 KNI_DBG("kni_vhost_init done\n");