1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(c) 2010-2014 Intel Corporation.
7 * This code is inspired from the book "Linux Device Drivers" by
8 * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/version.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h> /* eth_type_trans */
16 #include <linux/ethtool.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
21 #include <rte_kni_common.h>
27 #define WD_TIMEOUT 5 /*jiffies */
29 #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
31 /* typedef for rx function */
32 typedef void (*kni_net_rx_t)(struct kni_dev *kni);
34 static void kni_net_rx_normal(struct kni_dev *kni);
36 /* kni rx function pointer, with default to normal rx */
37 static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
39 /* physical address to kernel virtual address */
43 return phys_to_virt((unsigned long)pa);
46 /* physical address to virtual address */
48 pa2va(void *pa, struct rte_kni_mbuf *m)
52 va = (void *)((unsigned long)pa +
53 (unsigned long)m->buf_addr -
54 (unsigned long)m->buf_physaddr);
58 /* mbuf data kernel virtual address from mbuf kernel virtual address */
60 kva2data_kva(struct rte_kni_mbuf *m)
62 return phys_to_virt(m->buf_physaddr + m->data_off);
65 /* virtual address to physical address */
67 va2pa(void *va, struct rte_kni_mbuf *m)
71 pa = (void *)((unsigned long)va -
72 ((unsigned long)m->buf_addr -
73 (unsigned long)m->buf_physaddr));
78 * It can be called to process the request.
81 kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
89 pr_err("No kni instance or request\n");
93 mutex_lock(&kni->sync_lock);
96 memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
97 num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
99 pr_err("Cannot send to req_q\n");
104 ret_val = wait_event_interruptible_timeout(kni->wq,
105 kni_fifo_count(kni->resp_q), 3 * HZ);
106 if (signal_pending(current) || ret_val <= 0) {
110 num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
111 if (num != 1 || resp_va != kni->sync_va) {
112 /* This should never happen */
113 pr_err("No data in resp_q\n");
118 memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
122 mutex_unlock(&kni->sync_lock);
130 kni_net_open(struct net_device *dev)
133 struct rte_kni_request req;
134 struct kni_dev *kni = netdev_priv(dev);
136 netif_start_queue(dev);
137 if (dflt_carrier == 1)
138 netif_carrier_on(dev);
140 netif_carrier_off(dev);
142 memset(&req, 0, sizeof(req));
143 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
145 /* Setting if_up to non-zero means up */
147 ret = kni_net_process_request(kni, &req);
149 return (ret == 0) ? req.result : ret;
153 kni_net_release(struct net_device *dev)
156 struct rte_kni_request req;
157 struct kni_dev *kni = netdev_priv(dev);
159 netif_stop_queue(dev); /* can't transmit any more */
160 netif_carrier_off(dev);
162 memset(&req, 0, sizeof(req));
163 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
165 /* Setting if_up to 0 means down */
167 ret = kni_net_process_request(kni, &req);
169 return (ret == 0) ? req.result : ret;
173 kni_fifo_trans_pa2va(struct kni_dev *kni,
174 struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va)
176 uint32_t ret, i, num_dst, num_rx;
179 num_dst = kni_fifo_free_count(dst_va);
183 num_rx = min_t(uint32_t, num_dst, MBUF_BURST_SZ);
185 num_rx = kni_fifo_get(src_pa, kni->pa, num_rx);
189 for (i = 0; i < num_rx; i++) {
190 kva = pa2kva(kni->pa[i]);
191 kni->va[i] = pa2va(kni->pa[i], kva);
194 ret = kni_fifo_put(dst_va, kni->va, num_rx);
196 /* Failing should not happen */
197 pr_err("Fail to enqueue entries into dst_va\n");
203 /* Try to release mbufs when kni release */
204 void kni_net_release_fifo_phy(struct kni_dev *kni)
206 /* release rx_q first, because it can't release in userspace */
207 kni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q);
208 /* release alloc_q for speeding up kni release in userspace */
209 kni_fifo_trans_pa2va(kni, kni->alloc_q, kni->free_q);
213 * Configuration changes (passed on by ifconfig)
216 kni_net_config(struct net_device *dev, struct ifmap *map)
218 if (dev->flags & IFF_UP) /* can't act on a running interface */
221 /* ignore other fields */
226 * Transmit a packet (called by the kernel)
229 kni_net_tx(struct sk_buff *skb, struct net_device *dev)
233 struct kni_dev *kni = netdev_priv(dev);
234 struct rte_kni_mbuf *pkt_kva = NULL;
238 /* save the timestamp */
239 #ifdef HAVE_TRANS_START_HELPER
240 netif_trans_update(dev);
242 dev->trans_start = jiffies;
245 /* Check if the length of skb is less than mbuf size */
246 if (skb->len > kni->mbuf_size)
250 * Check if it has at least one free entry in tx_q and
251 * one entry in alloc_q.
253 if (kni_fifo_free_count(kni->tx_q) == 0 ||
254 kni_fifo_count(kni->alloc_q) == 0) {
256 * If no free entry in tx_q or no entry in alloc_q,
257 * drops skb and goes out.
262 /* dequeue a mbuf from alloc_q */
263 ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
264 if (likely(ret == 1)) {
267 pkt_kva = pa2kva(pkt_pa);
268 data_kva = kva2data_kva(pkt_kva);
269 pkt_va = pa2va(pkt_pa, pkt_kva);
272 memcpy(data_kva, skb->data, len);
273 if (unlikely(len < ETH_ZLEN)) {
274 memset(data_kva + len, 0, ETH_ZLEN - len);
277 pkt_kva->pkt_len = len;
278 pkt_kva->data_len = len;
280 /* enqueue mbuf into tx_q */
281 ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
282 if (unlikely(ret != 1)) {
283 /* Failing should not happen */
284 pr_err("Fail to enqueue mbuf into tx_q\n");
288 /* Failing should not happen */
289 pr_err("Fail to dequeue mbuf from alloc_q\n");
293 /* Free skb and update statistics */
295 dev->stats.tx_bytes += len;
296 dev->stats.tx_packets++;
301 /* Free skb and update statistics */
303 dev->stats.tx_dropped++;
309 * RX: normal working mode
312 kni_net_rx_normal(struct kni_dev *kni)
316 uint32_t i, num_rx, num_fq;
317 struct rte_kni_mbuf *kva;
320 struct net_device *dev = kni->net_dev;
322 /* Get the number of free entries in free_q */
323 num_fq = kni_fifo_free_count(kni->free_q);
325 /* No room on the free_q, bail out */
329 /* Calculate the number of entries to dequeue from rx_q */
330 num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ);
332 /* Burst dequeue from rx_q */
333 num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);
337 /* Transfer received packets to netif */
338 for (i = 0; i < num_rx; i++) {
339 kva = pa2kva(kni->pa[i]);
341 data_kva = kva2data_kva(kva);
342 kni->va[i] = pa2va(kni->pa[i], kva);
344 skb = netdev_alloc_skb(dev, len);
346 /* Update statistics */
347 dev->stats.rx_dropped++;
351 if (kva->nb_segs == 1) {
352 memcpy(skb_put(skb, len), data_kva, len);
355 int kva_nb_segs = kva->nb_segs;
357 for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
358 memcpy(skb_put(skb, kva->data_len),
359 data_kva, kva->data_len);
364 kva = pa2kva(va2pa(kva->next, kva));
365 data_kva = kva2data_kva(kva);
369 skb->protocol = eth_type_trans(skb, dev);
370 skb->ip_summed = CHECKSUM_UNNECESSARY;
372 /* Call netif interface */
375 /* Update statistics */
376 dev->stats.rx_bytes += len;
377 dev->stats.rx_packets++;
380 /* Burst enqueue mbufs into free_q */
381 ret = kni_fifo_put(kni->free_q, kni->va, num_rx);
383 /* Failing should not happen */
384 pr_err("Fail to enqueue entries into free_q\n");
388 * RX: loopback with enqueue/dequeue fifos.
391 kni_net_rx_lo_fifo(struct kni_dev *kni)
395 uint32_t i, num, num_rq, num_tq, num_aq, num_fq;
396 struct rte_kni_mbuf *kva;
398 struct rte_kni_mbuf *alloc_kva;
399 void *alloc_data_kva;
400 struct net_device *dev = kni->net_dev;
402 /* Get the number of entries in rx_q */
403 num_rq = kni_fifo_count(kni->rx_q);
405 /* Get the number of free entries in tx_q */
406 num_tq = kni_fifo_free_count(kni->tx_q);
408 /* Get the number of entries in alloc_q */
409 num_aq = kni_fifo_count(kni->alloc_q);
411 /* Get the number of free entries in free_q */
412 num_fq = kni_fifo_free_count(kni->free_q);
414 /* Calculate the number of entries to be dequeued from rx_q */
415 num = min(num_rq, num_tq);
416 num = min(num, num_aq);
417 num = min(num, num_fq);
418 num = min_t(uint32_t, num, MBUF_BURST_SZ);
420 /* Return if no entry to dequeue from rx_q */
424 /* Burst dequeue from rx_q */
425 ret = kni_fifo_get(kni->rx_q, kni->pa, num);
427 return; /* Failing should not happen */
429 /* Dequeue entries from alloc_q */
430 ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num);
434 for (i = 0; i < num; i++) {
435 kva = pa2kva(kni->pa[i]);
437 data_kva = kva2data_kva(kva);
438 kni->va[i] = pa2va(kni->pa[i], kva);
440 alloc_kva = pa2kva(kni->alloc_pa[i]);
441 alloc_data_kva = kva2data_kva(alloc_kva);
442 kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
444 memcpy(alloc_data_kva, data_kva, len);
445 alloc_kva->pkt_len = len;
446 alloc_kva->data_len = len;
448 dev->stats.tx_bytes += len;
449 dev->stats.rx_bytes += len;
452 /* Burst enqueue mbufs into tx_q */
453 ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);
455 /* Failing should not happen */
456 pr_err("Fail to enqueue mbufs into tx_q\n");
459 /* Burst enqueue mbufs into free_q */
460 ret = kni_fifo_put(kni->free_q, kni->va, num);
462 /* Failing should not happen */
463 pr_err("Fail to enqueue mbufs into free_q\n");
466 * Update statistic, and enqueue/dequeue failure is impossible,
467 * as all queues are checked at first.
469 dev->stats.tx_packets += num;
470 dev->stats.rx_packets += num;
474 * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
477 kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
481 uint32_t i, num_rq, num_fq, num;
482 struct rte_kni_mbuf *kva;
485 struct net_device *dev = kni->net_dev;
487 /* Get the number of entries in rx_q */
488 num_rq = kni_fifo_count(kni->rx_q);
490 /* Get the number of free entries in free_q */
491 num_fq = kni_fifo_free_count(kni->free_q);
493 /* Calculate the number of entries to dequeue from rx_q */
494 num = min(num_rq, num_fq);
495 num = min_t(uint32_t, num, MBUF_BURST_SZ);
497 /* Return if no entry to dequeue from rx_q */
501 /* Burst dequeue mbufs from rx_q */
502 ret = kni_fifo_get(kni->rx_q, kni->pa, num);
506 /* Copy mbufs to sk buffer and then call tx interface */
507 for (i = 0; i < num; i++) {
508 kva = pa2kva(kni->pa[i]);
510 data_kva = kva2data_kva(kva);
511 kni->va[i] = pa2va(kni->pa[i], kva);
513 skb = netdev_alloc_skb(dev, len);
515 memcpy(skb_put(skb, len), data_kva, len);
516 skb->ip_summed = CHECKSUM_UNNECESSARY;
520 /* Simulate real usage, allocate/copy skb twice */
521 skb = netdev_alloc_skb(dev, len);
523 dev->stats.rx_dropped++;
527 if (kva->nb_segs == 1) {
528 memcpy(skb_put(skb, len), data_kva, len);
531 int kva_nb_segs = kva->nb_segs;
533 for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
534 memcpy(skb_put(skb, kva->data_len),
535 data_kva, kva->data_len);
540 kva = pa2kva(va2pa(kva->next, kva));
541 data_kva = kva2data_kva(kva);
545 skb->ip_summed = CHECKSUM_UNNECESSARY;
547 dev->stats.rx_bytes += len;
548 dev->stats.rx_packets++;
550 /* call tx interface */
551 kni_net_tx(skb, dev);
554 /* enqueue all the mbufs from rx_q into free_q */
555 ret = kni_fifo_put(kni->free_q, kni->va, num);
557 /* Failing should not happen */
558 pr_err("Fail to enqueue mbufs into free_q\n");
563 kni_net_rx(struct kni_dev *kni)
566 * It doesn't need to check if it is NULL pointer,
567 * as it has a default value
569 (*kni_net_rx_func)(kni);
573 * Deal with a transmit timeout.
576 kni_net_tx_timeout(struct net_device *dev)
578 pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
579 jiffies - dev_trans_start(dev));
581 dev->stats.tx_errors++;
582 netif_wake_queue(dev);
586 kni_net_change_mtu(struct net_device *dev, int new_mtu)
589 struct rte_kni_request req;
590 struct kni_dev *kni = netdev_priv(dev);
592 pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
594 memset(&req, 0, sizeof(req));
595 req.req_id = RTE_KNI_REQ_CHANGE_MTU;
596 req.new_mtu = new_mtu;
597 ret = kni_net_process_request(kni, &req);
598 if (ret == 0 && req.result == 0)
601 return (ret == 0) ? req.result : ret;
605 kni_net_set_promiscusity(struct net_device *netdev, int flags)
607 struct rte_kni_request req;
608 struct kni_dev *kni = netdev_priv(netdev);
610 memset(&req, 0, sizeof(req));
611 req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
613 if (netdev->flags & IFF_PROMISC)
614 req.promiscusity = 1;
616 req.promiscusity = 0;
617 kni_net_process_request(kni, &req);
621 * Checks if the user space application provided the resp message
624 kni_net_poll_resp(struct kni_dev *kni)
626 if (kni_fifo_count(kni->resp_q))
627 wake_up_interruptible(&kni->wq);
631 * Fill the eth header
634 kni_net_header(struct sk_buff *skb, struct net_device *dev,
635 unsigned short type, const void *daddr,
636 const void *saddr, uint32_t len)
638 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
640 memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
641 memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
642 eth->h_proto = htons(type);
644 return dev->hard_header_len;
648 * Re-fill the eth header
650 #ifdef HAVE_REBUILD_HEADER
652 kni_net_rebuild_header(struct sk_buff *skb)
654 struct net_device *dev = skb->dev;
655 struct ethhdr *eth = (struct ethhdr *) skb->data;
657 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
658 memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
665 * kni_net_set_mac - Change the Ethernet Address of the KNI NIC
666 * @netdev: network interface device structure
667 * @p: pointer to an address structure
669 * Returns 0 on success, negative on failure
672 kni_net_set_mac(struct net_device *netdev, void *p)
675 struct rte_kni_request req;
677 struct sockaddr *addr = p;
679 memset(&req, 0, sizeof(req));
680 req.req_id = RTE_KNI_REQ_CHANGE_MAC_ADDR;
682 if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
683 return -EADDRNOTAVAIL;
685 memcpy(req.mac_addr, addr->sa_data, netdev->addr_len);
686 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
688 kni = netdev_priv(netdev);
689 ret = kni_net_process_request(kni, &req);
691 return (ret == 0 ? req.result : ret);
694 #ifdef HAVE_CHANGE_CARRIER_CB
696 kni_net_change_carrier(struct net_device *dev, bool new_carrier)
699 netif_carrier_on(dev);
701 netif_carrier_off(dev);
706 static const struct header_ops kni_net_header_ops = {
707 .create = kni_net_header,
708 .parse = eth_header_parse,
709 #ifdef HAVE_REBUILD_HEADER
710 .rebuild = kni_net_rebuild_header,
712 .cache = NULL, /* disable caching */
715 static const struct net_device_ops kni_net_netdev_ops = {
716 .ndo_open = kni_net_open,
717 .ndo_stop = kni_net_release,
718 .ndo_set_config = kni_net_config,
719 .ndo_change_rx_flags = kni_net_set_promiscusity,
720 .ndo_start_xmit = kni_net_tx,
721 .ndo_change_mtu = kni_net_change_mtu,
722 .ndo_tx_timeout = kni_net_tx_timeout,
723 .ndo_set_mac_address = kni_net_set_mac,
724 #ifdef HAVE_CHANGE_CARRIER_CB
725 .ndo_change_carrier = kni_net_change_carrier,
729 static void kni_get_drvinfo(struct net_device *dev,
730 struct ethtool_drvinfo *info)
732 strlcpy(info->version, KNI_VERSION, sizeof(info->version));
733 strlcpy(info->driver, "kni", sizeof(info->driver));
736 static const struct ethtool_ops kni_net_ethtool_ops = {
737 .get_drvinfo = kni_get_drvinfo,
738 .get_link = ethtool_op_get_link,
742 kni_net_init(struct net_device *dev)
744 struct kni_dev *kni = netdev_priv(dev);
746 init_waitqueue_head(&kni->wq);
747 mutex_init(&kni->sync_lock);
749 ether_setup(dev); /* assign some of the fields */
750 dev->netdev_ops = &kni_net_netdev_ops;
751 dev->header_ops = &kni_net_header_ops;
752 dev->ethtool_ops = &kni_net_ethtool_ops;
753 dev->watchdog_timeo = WD_TIMEOUT;
757 kni_net_config_lo_mode(char *lo_str)
760 pr_debug("loopback disabled");
764 if (!strcmp(lo_str, "lo_mode_none"))
765 pr_debug("loopback disabled");
766 else if (!strcmp(lo_str, "lo_mode_fifo")) {
767 pr_debug("loopback mode=lo_mode_fifo enabled");
768 kni_net_rx_func = kni_net_rx_lo_fifo;
769 } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
770 pr_debug("loopback mode=lo_mode_fifo_skb enabled");
771 kni_net_rx_func = kni_net_rx_lo_fifo_skb;
773 pr_debug("Unknown loopback parameter, disabled");