4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
26 * This code is inspired from the book "Linux Device Drivers" by
27 * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
30 #include <linux/device.h>
31 #include <linux/module.h>
32 #include <linux/version.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h> /* eth_type_trans */
35 #include <linux/skbuff.h>
36 #include <linux/kthread.h>
37 #include <linux/delay.h>
39 #include <exec-env/rte_kni_common.h>
45 #define WD_TIMEOUT 5 /*jiffies */
47 #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
49 /* typedef for rx function */
50 typedef void (*kni_net_rx_t)(struct kni_dev *kni);
52 static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
53 static void kni_net_rx_normal(struct kni_dev *kni);
54 static void kni_net_rx_lo_fifo(struct kni_dev *kni);
55 static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
56 static int kni_net_process_request(struct kni_dev *kni,
57 struct rte_kni_request *req);
59 /* kni rx function pointer, with default to normal rx */
60 static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
62 /* physical address to kernel virtual address */
66 return phys_to_virt((unsigned long)pa);
69 /* physical address to virtual address */
71 pa2va(void *pa, struct rte_kni_mbuf *m)
75 va = (void *)((unsigned long)pa +
76 (unsigned long)m->buf_addr -
77 (unsigned long)m->buf_physaddr);
81 /* mbuf data kernel virtual address from mbuf kernel virtual address */
83 kva2data_kva(struct rte_kni_mbuf *m)
85 return phys_to_virt(m->buf_physaddr + m->data_off);
88 /* virtual address to physical address */
90 va2pa(void *va, struct rte_kni_mbuf *m)
94 pa = (void *)((unsigned long)va -
95 ((unsigned long)m->buf_addr -
96 (unsigned long)m->buf_physaddr));
104 kni_net_open(struct net_device *dev)
107 struct rte_kni_request req;
108 struct kni_dev *kni = netdev_priv(dev);
110 netif_start_queue(dev);
112 memset(&req, 0, sizeof(req));
113 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
115 /* Setting if_up to non-zero means up */
117 ret = kni_net_process_request(kni, &req);
119 return (ret == 0) ? req.result : ret;
123 kni_net_release(struct net_device *dev)
126 struct rte_kni_request req;
127 struct kni_dev *kni = netdev_priv(dev);
129 netif_stop_queue(dev); /* can't transmit any more */
131 memset(&req, 0, sizeof(req));
132 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
134 /* Setting if_up to 0 means down */
136 ret = kni_net_process_request(kni, &req);
138 return (ret == 0) ? req.result : ret;
142 * Configuration changes (passed on by ifconfig)
145 kni_net_config(struct net_device *dev, struct ifmap *map)
147 if (dev->flags & IFF_UP) /* can't act on a running interface */
150 /* ignore other fields */
155 * RX: normal working mode
158 kni_net_rx_normal(struct kni_dev *kni)
162 unsigned i, num_rx, num_fq;
163 struct rte_kni_mbuf *kva;
166 struct net_device *dev = kni->net_dev;
168 /* Get the number of free entries in free_q */
169 num_fq = kni_fifo_free_count(kni->free_q);
171 /* No room on the free_q, bail out */
175 /* Calculate the number of entries to dequeue from rx_q */
176 num_rx = min(num_fq, (unsigned)MBUF_BURST_SZ);
178 /* Burst dequeue from rx_q */
179 num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);
183 /* Transfer received packets to netif */
184 for (i = 0; i < num_rx; i++) {
185 kva = pa2kva(kni->pa[i]);
187 data_kva = kva2data_kva(kva);
188 kni->va[i] = pa2va(kni->pa[i], kva);
190 skb = dev_alloc_skb(len + 2);
192 KNI_ERR("Out of mem, dropping pkts\n");
193 /* Update statistics */
194 kni->stats.rx_dropped++;
198 /* Align IP on 16B boundary */
201 if (kva->nb_segs == 1) {
202 memcpy(skb_put(skb, len), data_kva, len);
205 int kva_nb_segs = kva->nb_segs;
207 for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
208 memcpy(skb_put(skb, kva->data_len),
209 data_kva, kva->data_len);
214 kva = pa2kva(va2pa(kva->next, kva));
215 data_kva = kva2data_kva(kva);
220 skb->protocol = eth_type_trans(skb, dev);
221 skb->ip_summed = CHECKSUM_UNNECESSARY;
223 /* Call netif interface */
226 /* Update statistics */
227 kni->stats.rx_bytes += len;
228 kni->stats.rx_packets++;
231 /* Burst enqueue mbufs into free_q */
232 ret = kni_fifo_put(kni->free_q, kni->va, num_rx);
234 /* Failing should not happen */
235 KNI_ERR("Fail to enqueue entries into free_q\n");
239 * RX: loopback with enqueue/dequeue fifos.
242 kni_net_rx_lo_fifo(struct kni_dev *kni)
246 unsigned i, num, num_rq, num_tq, num_aq, num_fq;
247 struct rte_kni_mbuf *kva;
249 struct rte_kni_mbuf *alloc_kva;
250 void *alloc_data_kva;
252 /* Get the number of entries in rx_q */
253 num_rq = kni_fifo_count(kni->rx_q);
255 /* Get the number of free entrie in tx_q */
256 num_tq = kni_fifo_free_count(kni->tx_q);
258 /* Get the number of entries in alloc_q */
259 num_aq = kni_fifo_count(kni->alloc_q);
261 /* Get the number of free entries in free_q */
262 num_fq = kni_fifo_free_count(kni->free_q);
264 /* Calculate the number of entries to be dequeued from rx_q */
265 num = min(num_rq, num_tq);
266 num = min(num, num_aq);
267 num = min(num, num_fq);
268 num = min(num, (unsigned)MBUF_BURST_SZ);
270 /* Return if no entry to dequeue from rx_q */
274 /* Burst dequeue from rx_q */
275 ret = kni_fifo_get(kni->rx_q, kni->pa, num);
277 return; /* Failing should not happen */
279 /* Dequeue entries from alloc_q */
280 ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num);
284 for (i = 0; i < num; i++) {
285 kva = pa2kva(kni->pa[i]);
287 data_kva = kva2data_kva(kva);
288 kni->va[i] = pa2va(kni->pa[i], kva);
290 alloc_kva = pa2kva(kni->alloc_pa[i]);
291 alloc_data_kva = kva2data_kva(alloc_kva);
292 kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
294 memcpy(alloc_data_kva, data_kva, len);
295 alloc_kva->pkt_len = len;
296 alloc_kva->data_len = len;
298 kni->stats.tx_bytes += len;
299 kni->stats.rx_bytes += len;
302 /* Burst enqueue mbufs into tx_q */
303 ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);
305 /* Failing should not happen */
306 KNI_ERR("Fail to enqueue mbufs into tx_q\n");
309 /* Burst enqueue mbufs into free_q */
310 ret = kni_fifo_put(kni->free_q, kni->va, num);
312 /* Failing should not happen */
313 KNI_ERR("Fail to enqueue mbufs into free_q\n");
316 * Update statistic, and enqueue/dequeue failure is impossible,
317 * as all queues are checked at first.
319 kni->stats.tx_packets += num;
320 kni->stats.rx_packets += num;
324 * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
327 kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
331 unsigned i, num_rq, num_fq, num;
332 struct rte_kni_mbuf *kva;
335 struct net_device *dev = kni->net_dev;
337 /* Get the number of entries in rx_q */
338 num_rq = kni_fifo_count(kni->rx_q);
340 /* Get the number of free entries in free_q */
341 num_fq = kni_fifo_free_count(kni->free_q);
343 /* Calculate the number of entries to dequeue from rx_q */
344 num = min(num_rq, num_fq);
345 num = min(num, (unsigned)MBUF_BURST_SZ);
347 /* Return if no entry to dequeue from rx_q */
351 /* Burst dequeue mbufs from rx_q */
352 ret = kni_fifo_get(kni->rx_q, kni->pa, num);
356 /* Copy mbufs to sk buffer and then call tx interface */
357 for (i = 0; i < num; i++) {
358 kva = pa2kva(kni->pa[i]);
360 data_kva = kva2data_kva(kva);
361 kni->va[i] = pa2va(kni->pa[i], kva);
363 skb = dev_alloc_skb(len + 2);
365 KNI_ERR("Out of mem, dropping pkts\n");
367 /* Align IP on 16B boundary */
369 memcpy(skb_put(skb, len), data_kva, len);
371 skb->ip_summed = CHECKSUM_UNNECESSARY;
375 /* Simulate real usage, allocate/copy skb twice */
376 skb = dev_alloc_skb(len + 2);
378 KNI_ERR("Out of mem, dropping pkts\n");
379 kni->stats.rx_dropped++;
383 /* Align IP on 16B boundary */
386 if (kva->nb_segs == 1) {
387 memcpy(skb_put(skb, len), data_kva, len);
390 int kva_nb_segs = kva->nb_segs;
392 for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
393 memcpy(skb_put(skb, kva->data_len),
394 data_kva, kva->data_len);
399 kva = pa2kva(va2pa(kva->next, kva));
400 data_kva = kva2data_kva(kva);
405 skb->ip_summed = CHECKSUM_UNNECESSARY;
407 kni->stats.rx_bytes += len;
408 kni->stats.rx_packets++;
410 /* call tx interface */
411 kni_net_tx(skb, dev);
414 /* enqueue all the mbufs from rx_q into free_q */
415 ret = kni_fifo_put(kni->free_q, kni->va, num);
417 /* Failing should not happen */
418 KNI_ERR("Fail to enqueue mbufs into free_q\n");
423 kni_net_rx(struct kni_dev *kni)
426 * It doesn't need to check if it is NULL pointer,
427 * as it has a default value
429 (*kni_net_rx_func)(kni);
433 * Transmit a packet (called by the kernel)
437 kni_net_tx(struct sk_buff *skb, struct net_device *dev)
439 struct kni_dev *kni = netdev_priv(dev);
442 kni->stats.tx_dropped++;
448 kni_net_tx(struct sk_buff *skb, struct net_device *dev)
452 struct kni_dev *kni = netdev_priv(dev);
453 struct rte_kni_mbuf *pkt_kva = NULL;
457 /* save the timestamp */
458 #ifdef HAVE_TRANS_START_HELPER
459 netif_trans_update(dev);
461 dev->trans_start = jiffies;
464 /* Check if the length of skb is less than mbuf size */
465 if (skb->len > kni->mbuf_size)
469 * Check if it has at least one free entry in tx_q and
470 * one entry in alloc_q.
472 if (kni_fifo_free_count(kni->tx_q) == 0 ||
473 kni_fifo_count(kni->alloc_q) == 0) {
475 * If no free entry in tx_q or no entry in alloc_q,
476 * drops skb and goes out.
481 /* dequeue a mbuf from alloc_q */
482 ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
483 if (likely(ret == 1)) {
486 pkt_kva = pa2kva(pkt_pa);
487 data_kva = kva2data_kva(pkt_kva);
488 pkt_va = pa2va(pkt_pa, pkt_kva);
491 memcpy(data_kva, skb->data, len);
492 if (unlikely(len < ETH_ZLEN)) {
493 memset(data_kva + len, 0, ETH_ZLEN - len);
496 pkt_kva->pkt_len = len;
497 pkt_kva->data_len = len;
499 /* enqueue mbuf into tx_q */
500 ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
501 if (unlikely(ret != 1)) {
502 /* Failing should not happen */
503 KNI_ERR("Fail to enqueue mbuf into tx_q\n");
507 /* Failing should not happen */
508 KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
512 /* Free skb and update statistics */
514 kni->stats.tx_bytes += len;
515 kni->stats.tx_packets++;
520 /* Free skb and update statistics */
522 kni->stats.tx_dropped++;
529 * Deal with a transmit timeout.
532 kni_net_tx_timeout (struct net_device *dev)
534 struct kni_dev *kni = netdev_priv(dev);
536 KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies,
537 jiffies - dev_trans_start(dev));
539 kni->stats.tx_errors++;
540 netif_wake_queue(dev);
548 kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
550 KNI_DBG("kni_net_ioctl %d\n",
551 ((struct kni_dev *)netdev_priv(dev))->group_id);
557 kni_net_set_rx_mode(struct net_device *dev)
562 kni_net_change_mtu(struct net_device *dev, int new_mtu)
565 struct rte_kni_request req;
566 struct kni_dev *kni = netdev_priv(dev);
568 KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
570 memset(&req, 0, sizeof(req));
571 req.req_id = RTE_KNI_REQ_CHANGE_MTU;
572 req.new_mtu = new_mtu;
573 ret = kni_net_process_request(kni, &req);
574 if (ret == 0 && req.result == 0)
577 return (ret == 0) ? req.result : ret;
581 * Checks if the user space application provided the resp message
584 kni_net_poll_resp(struct kni_dev *kni)
586 if (kni_fifo_count(kni->resp_q))
587 wake_up_interruptible(&kni->wq);
591 * It can be called to process the request.
594 kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
602 KNI_ERR("No kni instance or request\n");
606 mutex_lock(&kni->sync_lock);
609 memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
610 num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
612 KNI_ERR("Cannot send to req_q\n");
617 ret_val = wait_event_interruptible_timeout(kni->wq,
618 kni_fifo_count(kni->resp_q), 3 * HZ);
619 if (signal_pending(current) || ret_val <= 0) {
623 num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
624 if (num != 1 || resp_va != kni->sync_va) {
625 /* This should never happen */
626 KNI_ERR("No data in resp_q\n");
631 memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
635 mutex_unlock(&kni->sync_lock);
640 * Return statistics to the caller
642 static struct net_device_stats *
643 kni_net_stats(struct net_device *dev)
645 struct kni_dev *kni = netdev_priv(dev);
650 * Fill the eth header
653 kni_net_header(struct sk_buff *skb, struct net_device *dev,
654 unsigned short type, const void *daddr,
655 const void *saddr, unsigned int len)
657 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
659 memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
660 memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
661 eth->h_proto = htons(type);
663 return dev->hard_header_len;
668 * Re-fill the eth header
670 #ifdef HAVE_REBUILD_HEADER
672 kni_net_rebuild_header(struct sk_buff *skb)
674 struct net_device *dev = skb->dev;
675 struct ethhdr *eth = (struct ethhdr *) skb->data;
677 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
678 memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
685 * kni_net_set_mac - Change the Ethernet Address of the KNI NIC
686 * @netdev: network interface device structure
687 * @p: pointer to an address structure
689 * Returns 0 on success, negative on failure
691 static int kni_net_set_mac(struct net_device *netdev, void *p)
693 struct sockaddr *addr = p;
694 if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
695 return -EADDRNOTAVAIL;
696 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
700 #ifdef HAVE_CHANGE_CARRIER_CB
701 static int kni_net_change_carrier(struct net_device *dev, bool new_carrier)
704 netif_carrier_on(dev);
706 netif_carrier_off(dev);
711 static const struct header_ops kni_net_header_ops = {
712 .create = kni_net_header,
713 #ifdef HAVE_REBUILD_HEADER
714 .rebuild = kni_net_rebuild_header,
716 .cache = NULL, /* disable caching */
719 static const struct net_device_ops kni_net_netdev_ops = {
720 .ndo_open = kni_net_open,
721 .ndo_stop = kni_net_release,
722 .ndo_set_config = kni_net_config,
723 .ndo_start_xmit = kni_net_tx,
724 .ndo_change_mtu = kni_net_change_mtu,
725 .ndo_do_ioctl = kni_net_ioctl,
726 .ndo_set_rx_mode = kni_net_set_rx_mode,
727 .ndo_get_stats = kni_net_stats,
728 .ndo_tx_timeout = kni_net_tx_timeout,
729 .ndo_set_mac_address = kni_net_set_mac,
730 #ifdef HAVE_CHANGE_CARRIER_CB
731 .ndo_change_carrier = kni_net_change_carrier,
736 kni_net_init(struct net_device *dev)
738 struct kni_dev *kni = netdev_priv(dev);
740 KNI_DBG("kni_net_init\n");
742 init_waitqueue_head(&kni->wq);
743 mutex_init(&kni->sync_lock);
745 ether_setup(dev); /* assign some of the fields */
746 dev->netdev_ops = &kni_net_netdev_ops;
747 dev->header_ops = &kni_net_header_ops;
748 dev->watchdog_timeo = WD_TIMEOUT;
752 kni_net_config_lo_mode(char *lo_str)
755 KNI_PRINT("loopback disabled");
759 if (!strcmp(lo_str, "lo_mode_none"))
760 KNI_PRINT("loopback disabled");
761 else if (!strcmp(lo_str, "lo_mode_fifo")) {
762 KNI_PRINT("loopback mode=lo_mode_fifo enabled");
763 kni_net_rx_func = kni_net_rx_lo_fifo;
764 } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
765 KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
766 kni_net_rx_func = kni_net_rx_lo_fifo_skb;
768 KNI_PRINT("Incognizant parameter, loopback disabled");