#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/delay.h>
/* kni rx function pointer, with default to normal rx */
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+/* iova to kernel virtual address */
+static inline void *
+iova2kva(struct kni_dev *kni, void *iova)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, (unsigned long)iova));
+}
+
+static inline void *
+iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_physaddr) +
+ m->data_off);
+}
+#endif
+
/* physical address to kernel virtual address */
static void *
pa2kva(void *pa)
return phys_to_virt(m->buf_physaddr + m->data_off);
}
-/* virtual address to physical address */
-static void *
-va2pa(void *va, struct rte_kni_mbuf *m)
+static inline void *
+get_kva(struct kni_dev *kni, void *pa)
{
- void *pa;
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2kva(kni, pa);
+#endif
+ return pa2kva(pa);
+}
- pa = (void *)((unsigned long)va -
- ((unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr));
- return pa;
+static inline void *
+get_data_kva(struct kni_dev *kni, void *pkt_kva)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2data_kva(kni, pkt_kva);
+#endif
+ return kva2data_kva(pkt_kva);
}
/*
struct kni_dev *kni = netdev_priv(dev);
netif_start_queue(dev);
- if (dflt_carrier == 1)
+ if (kni_dflt_carrier == 1)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va)
{
uint32_t ret, i, num_dst, num_rx;
- void *kva;
+ struct rte_kni_mbuf *kva, *prev_kva;
+ int nb_segs;
+ int kva_nb_segs;
+
do {
num_dst = kni_fifo_free_count(dst_va);
if (num_dst == 0)
return;
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
kni->va[i] = pa2va(kni->pa[i], kva);
+
+ kva_nb_segs = kva->nb_segs;
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ if (!kva->next)
+ break;
+
+ prev_kva = kva;
+ kva = pa2kva(kva->next);
+ /* Convert physical address to virtual address */
+ prev_kva->next = pa2va(prev_kva->next, kva);
+ }
}
ret = kni_fifo_put(dst_va, kni->va, num_rx);
if (likely(ret == 1)) {
void *data_kva;
- pkt_kva = pa2kva(pkt_pa);
- data_kva = kva2data_kva(pkt_kva);
+ pkt_kva = get_kva(kni, pkt_pa);
+ data_kva = get_data_kva(kni, pkt_kva);
pkt_va = pa2va(pkt_pa, pkt_kva);
len = skb->len;
/* Free skb and update statistics */
dev_kfree_skb(skb);
- kni->stats.tx_bytes += len;
- kni->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+ dev->stats.tx_packets++;
return NETDEV_TX_OK;
drop:
/* Free skb and update statistics */
dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
uint32_t ret;
uint32_t len;
uint32_t i, num_rx, num_fq;
- struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *kva, *prev_kva;
void *data_kva;
struct sk_buff *skb;
struct net_device *dev = kni->net_dev;
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
if (!skb) {
/* Update statistics */
- kni->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
if (!kva->next)
break;
- kva = pa2kva(va2pa(kva->next, kva));
+ prev_kva = kva;
+ kva = pa2kva(kva->next);
data_kva = kva2data_kva(kva);
+ /* Convert physical address to virtual address */
+ prev_kva->next = pa2va(prev_kva->next, kva);
}
}
netif_rx_ni(skb);
/* Update statistics */
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
}
/* Burst enqueue mbufs into free_q */
uint32_t ret;
uint32_t len;
uint32_t i, num, num_rq, num_tq, num_aq, num_fq;
- struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *kva, *next_kva;
void *data_kva;
struct rte_kni_mbuf *alloc_kva;
void *alloc_data_kva;
+ struct net_device *dev = kni->net_dev;
/* Get the number of entries in rx_q */
num_rq = kni_fifo_count(kni->rx_q);
- /* Get the number of free entrie in tx_q */
+ /* Get the number of free entries in tx_q */
num_tq = kni_fifo_free_count(kni->tx_q);
/* Get the number of entries in alloc_q */
num = ret;
/* Copy mbufs */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
- len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ kva = get_kva(kni, kni->pa[i]);
+ len = kva->data_len;
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
- alloc_kva = pa2kva(kni->alloc_pa[i]);
- alloc_data_kva = kva2data_kva(alloc_kva);
+ while (kva->next) {
+ next_kva = pa2kva(kva->next);
+ /* Convert physical address to virtual address */
+ kva->next = pa2va(kva->next, next_kva);
+ kva = next_kva;
+ }
+
+ alloc_kva = get_kva(kni, kni->alloc_pa[i]);
+ alloc_data_kva = get_data_kva(kni, alloc_kva);
kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
memcpy(alloc_data_kva, data_kva, len);
alloc_kva->pkt_len = len;
alloc_kva->data_len = len;
- kni->stats.tx_bytes += len;
- kni->stats.rx_bytes += len;
+ dev->stats.tx_bytes += len;
+ dev->stats.rx_bytes += len;
}
/* Burst enqueue mbufs into tx_q */
* Update statistic, and enqueue/dequeue failure is impossible,
* as all queues are checked at first.
*/
- kni->stats.tx_packets += num;
- kni->stats.rx_packets += num;
+ dev->stats.tx_packets += num;
+ dev->stats.rx_packets += num;
}
/*
uint32_t ret;
uint32_t len;
uint32_t i, num_rq, num_fq, num;
- struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *kva, *prev_kva;
void *data_kva;
struct sk_buff *skb;
struct net_device *dev = kni->net_dev;
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
/* Simulate real usage, allocate/copy skb twice */
skb = netdev_alloc_skb(dev, len);
if (skb == NULL) {
- kni->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
if (!kva->next)
break;
- kva = pa2kva(va2pa(kva->next, kva));
- data_kva = kva2data_kva(kva);
+ prev_kva = kva;
+ kva = get_kva(kni, kva->next);
+ data_kva = get_data_kva(kni, kva);
+ /* Convert physical address to virtual address */
+ prev_kva->next = pa2va(prev_kva->next, kva);
}
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
/* call tx interface */
kni_net_tx(skb, dev);
/*
* Deal with a transmit timeout.
*/
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void
+kni_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
+#else
static void
kni_net_tx_timeout(struct net_device *dev)
+#endif
{
- struct kni_dev *kni = netdev_priv(dev);
-
pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
jiffies - dev_trans_start(dev));
- kni->stats.tx_errors++;
+ dev->stats.tx_errors++;
netif_wake_queue(dev);
}
}
static void
-kni_net_set_promiscusity(struct net_device *netdev, int flags)
+kni_net_change_rx_flags(struct net_device *netdev, int flags)
{
struct rte_kni_request req;
struct kni_dev *kni = netdev_priv(netdev);
memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
- if (netdev->flags & IFF_PROMISC)
- req.promiscusity = 1;
- else
- req.promiscusity = 0;
+ if (flags & IFF_ALLMULTI) {
+ req.req_id = RTE_KNI_REQ_CHANGE_ALLMULTI;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ req.allmulti = 1;
+ else
+ req.allmulti = 0;
+ }
+
+ if (flags & IFF_PROMISC) {
+ req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
+
+ if (netdev->flags & IFF_PROMISC)
+ req.promiscusity = 1;
+ else
+ req.promiscusity = 0;
+ }
+
kni_net_process_request(kni, &req);
}
wake_up_interruptible(&kni->wq);
}
-/*
- * Return statistics to the caller
- */
-static struct net_device_stats *
-kni_net_stats(struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- return &kni->stats;
-}
-
/*
* Fill the eth header
*/
.ndo_open = kni_net_open,
.ndo_stop = kni_net_release,
.ndo_set_config = kni_net_config,
- .ndo_change_rx_flags = kni_net_set_promiscusity,
+ .ndo_change_rx_flags = kni_net_change_rx_flags,
.ndo_start_xmit = kni_net_tx,
.ndo_change_mtu = kni_net_change_mtu,
- .ndo_get_stats = kni_net_stats,
.ndo_tx_timeout = kni_net_tx_timeout,
.ndo_set_mac_address = kni_net_set_mac,
#ifdef HAVE_CHANGE_CARRIER_CB
#endif
};
+static void kni_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->version, KNI_VERSION, sizeof(info->version));
+ strlcpy(info->driver, "kni", sizeof(info->driver));
+}
+
+static const struct ethtool_ops kni_net_ethtool_ops = {
+ .get_drvinfo = kni_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
void
kni_net_init(struct net_device *dev)
{
ether_setup(dev); /* assign some of the fields */
dev->netdev_ops = &kni_net_netdev_ops;
dev->header_ops = &kni_net_header_ops;
+ dev->ethtool_ops = &kni_net_ethtool_ops;
dev->watchdog_timeo = WD_TIMEOUT;
}
} else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
pr_debug("loopback mode=lo_mode_fifo_skb enabled");
kni_net_rx_func = kni_net_rx_lo_fifo_skb;
- } else
- pr_debug("Incognizant parameter, loopback disabled");
+ } else {
+ pr_debug("Unknown loopback parameter, disabled");
+ }
}