4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #include <linux/module.h>
26 #include <linux/net.h>
28 #include <linux/virtio_net.h>
29 #include <linux/wait.h>
31 #include <linux/nsproxy.h>
32 #include <linux/sched.h>
33 #include <linux/if_tun.h>
34 #include <linux/version.h>
42 extern void put_unused_fd(unsigned int fd);
44 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
46 sock_alloc_file(struct socket *sock,
47 int flags, const char *dname);
49 extern int get_unused_fd_flags(unsigned flags);
51 extern void fd_install(unsigned int fd, struct file *file);
53 static int kni_sock_map_fd(struct socket *sock)
56 int fd = get_unused_fd_flags(0);
60 file = sock_alloc_file(sock, 0, NULL);
69 #define kni_sock_map_fd(s) sock_map_fd(s, 0)
72 static struct proto kni_raw_proto = {
75 .obj_size = sizeof(struct kni_vhost_queue),
79 kni_vhost_net_tx(struct kni_dev *kni, struct iovec *iov,
80 unsigned offset, unsigned len)
82 struct rte_kni_mbuf *pkt_kva = NULL;
83 struct rte_kni_mbuf *pkt_va = NULL;
86 KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
87 offset, len, (int)iov->iov_len);
90 * Check if it has at least one free entry in tx_q and
91 * one entry in alloc_q.
93 if (kni_fifo_free_count(kni->tx_q) == 0 ||
94 kni_fifo_count(kni->alloc_q) == 0) {
96 * If no free entry in tx_q or no entry in alloc_q,
97 * drops skb and goes out.
102 /* dequeue a mbuf from alloc_q */
103 ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
104 if (likely(ret == 1)) {
107 pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
108 data_kva = pkt_kva->buf_addr + pkt_kva->data_off
109 - kni->mbuf_va + kni->mbuf_kva;
111 memcpy_fromiovecend(data_kva, iov, offset, len);
112 if (unlikely(len < ETH_ZLEN)) {
113 memset(data_kva + len, 0, ETH_ZLEN - len);
116 pkt_kva->pkt_len = len;
117 pkt_kva->data_len = len;
119 /* enqueue mbuf into tx_q */
120 ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
121 if (unlikely(ret != 1)) {
122 /* Failing should not happen */
123 KNI_ERR("Fail to enqueue mbuf into tx_q\n");
127 /* Failing should not happen */
128 KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
132 /* update statistics */
133 kni->stats.tx_bytes += len;
134 kni->stats.tx_packets++;
139 /* update statistics */
140 kni->stats.tx_dropped++;
146 kni_vhost_net_rx(struct kni_dev *kni, struct iovec *iov,
147 unsigned offset, unsigned len)
150 struct rte_kni_mbuf *kva;
151 struct rte_kni_mbuf *va;
154 struct kni_vhost_queue *q = kni->vhost_queue;
156 if (unlikely(q == NULL))
159 /* ensure at least one entry in free_q */
160 if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
163 skb = skb_dequeue(&q->sk.sk_receive_queue);
164 if (unlikely(skb == NULL))
167 kva = (struct rte_kni_mbuf*)skb->data;
169 /* free skb to cache */
171 if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1)))
172 /* Failing should not happen */
173 KNI_ERR("Fail to enqueue entries into rx cache fifo\n");
175 pkt_len = kva->data_len;
176 if (unlikely(pkt_len > len))
179 KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
180 offset, len, pkt_len, (int)iov->iov_len);
182 data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
183 if (unlikely(memcpy_toiovecend(iov, data_kva, offset, pkt_len)))
186 /* Update statistics */
187 kni->stats.rx_bytes += pkt_len;
188 kni->stats.rx_packets++;
190 /* enqueue mbufs into free_q */
191 va = (void*)kva - kni->mbuf_kva + kni->mbuf_va;
192 if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1)))
193 /* Failing should not happen */
194 KNI_ERR("Fail to enqueue entries into free_q\n");
196 KNI_DBG_RX("receive done %d\n", pkt_len);
201 /* Update drop statistics */
202 kni->stats.rx_dropped++;
208 kni_sock_poll(struct file *file, struct socket *sock, poll_table * wait)
210 struct kni_vhost_queue *q =
211 container_of(sock->sk, struct kni_vhost_queue, sk);
213 unsigned int mask = 0;
215 if (unlikely(q == NULL || q->kni == NULL))
219 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
220 KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n",
221 kni->group_id, (uint64_t)sock->wq);
223 KNI_DBG("start kni_poll on group %d, wait at 0x%16llx\n",
224 kni->group_id, (uint64_t)&sock->wait);
227 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
228 poll_wait(file, &sock->wq->wait, wait);
230 poll_wait(file, &sock->wait, wait);
233 if (kni_fifo_count(kni->rx_q) > 0)
234 mask |= POLLIN | POLLRDNORM;
236 if (sock_writeable(&q->sk) ||
237 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
238 sock_writeable(&q->sk)))
239 mask |= POLLOUT | POLLWRNORM;
245 kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
246 struct sk_buff *skb, struct rte_kni_mbuf *va)
248 struct rte_kni_mbuf *kva;
250 kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
251 (skb)->data = (unsigned char*)kva;
252 (skb)->len = kva->data_len;
253 skb_queue_tail(&q->sk.sk_receive_queue, skb);
257 kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
258 struct sk_buff **skb, struct rte_kni_mbuf **va)
261 for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
262 kni_vhost_enqueue(kni, q, *skb, *va);
266 kni_chk_vhost_rx(struct kni_dev *kni)
268 struct kni_vhost_queue *q = kni->vhost_queue;
269 unsigned nb_in, nb_mbuf, nb_skb;
270 const unsigned BURST_MASK = RX_BURST_SZ - 1;
271 unsigned nb_burst, nb_backlog, i;
272 struct sk_buff *skb[RX_BURST_SZ];
273 struct rte_kni_mbuf *va[RX_BURST_SZ];
275 if (unlikely(BE_STOP & kni->vq_status)) {
276 kni->vq_status |= BE_FINISH;
280 if (unlikely(q == NULL))
283 nb_skb = kni_fifo_count(q->fifo);
284 nb_mbuf = kni_fifo_count(kni->rx_q);
286 nb_in = min(nb_mbuf, nb_skb);
287 nb_in = min(nb_in, (unsigned)RX_BURST_SZ);
288 nb_burst = (nb_in & ~BURST_MASK);
289 nb_backlog = (nb_in & BURST_MASK);
291 /* enqueue skb_queue per BURST_SIZE bulk */
293 if (unlikely(RX_BURST_SZ != kni_fifo_get(
294 kni->rx_q, (void **)&va,
298 if (unlikely(RX_BURST_SZ != kni_fifo_get(
299 q->fifo, (void **)&skb,
303 kni_vhost_enqueue_burst(kni, q, skb, va);
306 /* all leftover, do one by one */
307 for (i = 0; i < nb_backlog; ++i) {
308 if (unlikely(1 != kni_fifo_get(
309 kni->rx_q,(void **)&va, 1)))
312 if (unlikely(1 != kni_fifo_get(
313 q->fifo, (void **)&skb, 1)))
316 kni_vhost_enqueue(kni, q, *skb, *va);
319 /* Ondemand wake up */
320 if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
321 ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
322 wake_up_interruptible_poll(sk_sleep(&q->sk),
323 POLLIN | POLLRDNORM | POLLRDBAND);
324 KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
325 nb_mbuf, nb_skb, nb_in);
331 /* Failing should not happen */
332 KNI_ERR("Fail to enqueue fifo, it shouldn't happen \n");
339 kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
340 struct msghdr *m, size_t total_len)
342 struct kni_vhost_queue *q =
343 container_of(sock->sk, struct kni_vhost_queue, sk);
344 int vnet_hdr_len = 0;
345 unsigned long len = total_len;
347 if (unlikely(q == NULL || q->kni == NULL))
350 KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
351 len, q->flags, (int)m->msg_iovlen);
353 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
354 if (likely(q->flags & IFF_VNET_HDR)) {
355 vnet_hdr_len = q->vnet_hdr_sz;
356 if (unlikely(len < vnet_hdr_len))
362 if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
365 return kni_vhost_net_tx(q->kni, m->msg_iov, vnet_hdr_len, len);
369 kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
370 struct msghdr *m, size_t len, int flags)
372 int vnet_hdr_len = 0;
374 struct kni_vhost_queue *q =
375 container_of(sock->sk, struct kni_vhost_queue, sk);
376 static struct virtio_net_hdr
377 __attribute__ ((unused)) vnet_hdr = {
379 .gso_type = VIRTIO_NET_HDR_GSO_NONE
382 if (unlikely(q == NULL || q->kni == NULL))
385 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
386 if (likely(q->flags & IFF_VNET_HDR)) {
387 vnet_hdr_len = q->vnet_hdr_sz;
388 if ((len -= vnet_hdr_len) < 0)
393 if (unlikely(0 == (pkt_len = kni_vhost_net_rx(q->kni,
394 m->msg_iov, vnet_hdr_len, len))))
397 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
398 /* no need to copy hdr when no pkt received */
399 if (unlikely(memcpy_toiovecend(m->msg_iov,
400 (void *)&vnet_hdr, 0, vnet_hdr_len)))
403 KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
404 (unsigned long)len, q->flags, pkt_len);
406 return (pkt_len + vnet_hdr_len);
409 /* dummy tap like ioctl */
411 kni_sock_ioctl(struct socket *sock, unsigned int cmd,
414 void __user *argp = (void __user *)arg;
415 struct ifreq __user *ifr = argp;
416 unsigned int __user *up = argp;
417 struct kni_vhost_queue *q =
418 container_of(sock->sk, struct kni_vhost_queue, sk);
421 int __user *sp = argp;
425 KNI_DBG("tap ioctl cmd 0x%08x\n", cmd);
429 KNI_DBG("TUNSETIFF\n");
430 /* ignore the name, just look at flags */
431 if (get_user(u, &ifr->ifr_flags))
435 if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
443 KNI_DBG("TUNGETIFF\n");
445 kni = rcu_dereference_bh(q->kni);
447 dev_hold(kni->net_dev);
448 rcu_read_unlock_bh();
454 if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ) ||
455 put_user(q->flags, &ifr->ifr_flags))
457 dev_put(kni->net_dev);
461 KNI_DBG("TUNGETFEATURES\n");
462 u = IFF_TAP | IFF_NO_PI;
463 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
471 KNI_DBG("TUNSETSNDBUF\n");
478 case TUNGETVNETHDRSZ:
482 KNI_DBG("TUNGETVNETHDRSZ %d\n", s);
485 case TUNSETVNETHDRSZ:
488 if (s < (int)sizeof(struct virtio_net_hdr))
491 KNI_DBG("TUNSETVNETHDRSZ %d\n", s);
496 KNI_DBG("TUNSETOFFLOAD %lx\n", arg);
497 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
498 /* not support any offload yet */
499 if (!(q->flags & IFF_VNET_HDR))
508 KNI_DBG("NOT SUPPORT\n");
514 kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
517 /* 32 bits app on 64 bits OS to be supported later */
518 KNI_PRINT("Not implemented.\n");
523 #define KNI_VHOST_WAIT_WQ_SAFE() \
525 while ((BE_FINISH | BE_STOP) == kni->vq_status) \
531 kni_sock_release(struct socket *sock)
533 struct kni_vhost_queue *q =
534 container_of(sock->sk, struct kni_vhost_queue, sk);
540 if (NULL != (kni = q->kni)) {
541 kni->vq_status = BE_STOP;
542 KNI_VHOST_WAIT_WQ_SAFE();
543 kni->vhost_queue = NULL;
550 sk_set_socket(&q->sk, NULL);
555 KNI_DBG("dummy sock release done\n");
561 kni_sock_getname (struct socket *sock,
562 struct sockaddr *addr,
563 int *sockaddr_len, int peer)
565 KNI_DBG("dummy sock getname\n");
566 ((struct sockaddr_ll*)addr)->sll_family = AF_PACKET;
570 static const struct proto_ops kni_socket_ops = {
571 .getname = kni_sock_getname,
572 .sendmsg = kni_sock_sndmsg,
573 .recvmsg = kni_sock_rcvmsg,
574 .release = kni_sock_release,
575 .poll = kni_sock_poll,
576 .ioctl = kni_sock_ioctl,
577 .compat_ioctl = kni_sock_compat_ioctl,
581 kni_sk_write_space(struct sock *sk)
583 wait_queue_head_t *wqueue;
585 if (!sock_writeable(sk) ||
586 !test_and_clear_bit(SOCK_ASYNC_NOSPACE,
587 &sk->sk_socket->flags))
589 wqueue = sk_sleep(sk);
590 if (wqueue && waitqueue_active(wqueue))
591 wake_up_interruptible_poll(
592 wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
596 kni_sk_destruct(struct sock *sk)
598 struct kni_vhost_queue *q =
599 container_of(sk, struct kni_vhost_queue, sk);
604 /* make sure there's no packet in buffer */
605 while (skb_dequeue(&sk->sk_receive_queue) != NULL)
610 if (q->fifo != NULL) {
615 if (q->cache != NULL) {
622 kni_vhost_backend_init(struct kni_dev *kni)
624 struct kni_vhost_queue *q;
625 struct net *net = current->nsproxy->net_ns;
627 struct rte_kni_fifo *fifo;
628 struct sk_buff *elem;
630 if (kni->vhost_queue != NULL)
633 if (!(q = (struct kni_vhost_queue *)sk_alloc(
634 net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
637 err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
641 sockfd = kni_sock_map_fd(q->sock);
648 q->cache = (struct sk_buff*)
649 kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
654 fifo = (struct rte_kni_fifo*)
655 kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
656 + sizeof(struct rte_kni_fifo), GFP_KERNEL);
660 kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
662 for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
664 kni_fifo_put(fifo, (void**)&elem, 1);
668 /* store sockfd in vhost_queue */
672 q->sock->type = SOCK_RAW;
673 q->sock->state = SS_CONNECTED;
674 q->sock->ops = &kni_socket_ops;
675 sock_init_data(q->sock, &q->sk);
678 q->sk.sk_write_space = kni_sk_write_space;
679 q->sk.sk_destruct = kni_sk_destruct;
680 q->flags = IFF_NO_PI | IFF_TAP;
681 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
682 #ifdef RTE_KNI_VHOST_VNET_HDR_EN
683 q->flags |= IFF_VNET_HDR;
686 /* bind kni_dev with vhost_queue */
688 kni->vhost_queue = q;
692 kni->vq_status = BE_START;
694 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
695 KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
696 "sk->sk_wq=0x%16llx",
697 q->sockfd, (uint64_t)q->sock->wq,
698 (uint64_t)q->sk.sk_wq);
700 KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
701 "sk->sk_sleep=0x%16llx",
702 q->sockfd, (uint64_t)&q->sock->wait,
703 (uint64_t)q->sk.sk_sleep);
713 put_unused_fd(sockfd);
717 kni->vhost_queue = NULL;
718 kni->vq_status |= BE_FINISH;
719 sock_release(q->sock);
724 sk_free((struct sock*)q);
729 /* kni vhost sock sysfs */
731 show_sock_fd(struct device *dev, struct device_attribute *attr,
734 struct net_device *net_dev = container_of(dev, struct net_device, dev);
735 struct kni_dev *kni = netdev_priv(net_dev);
737 if (kni->vhost_queue != NULL)
738 sockfd = kni->vhost_queue->sockfd;
739 return snprintf(buf, 10, "%d\n", sockfd);
743 show_sock_en(struct device *dev, struct device_attribute *attr,
746 struct net_device *net_dev = container_of(dev, struct net_device, dev);
747 struct kni_dev *kni = netdev_priv(net_dev);
748 return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
752 set_sock_en(struct device *dev, struct device_attribute *attr,
753 const char *buf, size_t count)
755 struct net_device *net_dev = container_of(dev, struct net_device, dev);
756 struct kni_dev *kni = netdev_priv(net_dev);
760 if (0 != kstrtoul(buf, 0, &en))
764 err = kni_vhost_backend_init(kni);
766 return err ? err : count;
769 static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
770 static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
771 static struct attribute *dev_attrs[] = {
772 &dev_attr_sock_fd.attr,
773 &dev_attr_sock_en.attr,
777 static const struct attribute_group dev_attr_grp = {
782 kni_vhost_backend_release(struct kni_dev *kni)
784 struct kni_vhost_queue *q = kni->vhost_queue;
789 /* dettach from kni */
792 KNI_DBG("release backend done\n");
798 kni_vhost_init(struct kni_dev *kni)
800 struct net_device *dev = kni->net_dev;
802 if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
803 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
805 kni->vq_status = BE_STOP;
807 KNI_DBG("kni_vhost_init done\n");