/* kni rx function pointer, with default to normal rx */
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+/* iova to kernel virtual address */
+static inline void *
+iova2kva(struct kni_dev *kni, void *iova)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, (unsigned long)iova));
+}
+
+static inline void *
+iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_iova) +
+ m->data_off);
+}
+#endif
+
/* physical address to kernel virtual address */
static void *
pa2kva(void *pa)
va = (void *)((unsigned long)pa +
(unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr);
+ (unsigned long)m->buf_iova);
return va;
}
static void *
kva2data_kva(struct rte_kni_mbuf *m)
{
- return phys_to_virt(m->buf_physaddr + m->data_off);
+ return phys_to_virt(m->buf_iova + m->data_off);
+}
+
+static inline void *
+get_kva(struct kni_dev *kni, void *pa)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2kva(kni, pa);
+#endif
+ return pa2kva(pa);
+}
+
+static inline void *
+get_data_kva(struct kni_dev *kni, void *pkt_kva)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2data_kva(kni, pkt_kva);
+#endif
+ return kva2data_kva(pkt_kva);
}
/*
struct kni_dev *kni = netdev_priv(dev);
netif_start_queue(dev);
- if (dflt_carrier == 1)
+ if (kni_dflt_carrier == 1)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
return;
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
kni->va[i] = pa2va(kni->pa[i], kva);
kva_nb_segs = kva->nb_segs;
if (likely(ret == 1)) {
void *data_kva;
- pkt_kva = pa2kva(pkt_pa);
- data_kva = kva2data_kva(pkt_kva);
+ pkt_kva = get_kva(kni, pkt_pa);
+ data_kva = get_data_kva(kni, pkt_kva);
pkt_va = pa2va(pkt_pa, pkt_kva);
len = skb->len;
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
num = ret;
/* Copy mbufs */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
- len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ kva = get_kva(kni, kni->pa[i]);
+ len = kva->data_len;
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
while (kva->next) {
kva = next_kva;
}
- alloc_kva = pa2kva(kni->alloc_pa[i]);
- alloc_data_kva = kva2data_kva(alloc_kva);
+ alloc_kva = get_kva(kni, kni->alloc_pa[i]);
+ alloc_data_kva = get_data_kva(kni, alloc_kva);
kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
memcpy(alloc_data_kva, data_kva, len);
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
- data_kva = kva2data_kva(kva);
+ kva = get_kva(kni, kva->next);
+ data_kva = get_data_kva(kni, kva);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
}
/*
* Deal with a transmit timeout.
*/
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void
+kni_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
+#else
static void
kni_net_tx_timeout(struct net_device *dev)
+#endif
{
pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
jiffies - dev_trans_start(dev));
}
static void
-kni_net_set_promiscusity(struct net_device *netdev, int flags)
+kni_net_change_rx_flags(struct net_device *netdev, int flags)
{
struct rte_kni_request req;
struct kni_dev *kni = netdev_priv(netdev);
memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
- if (netdev->flags & IFF_PROMISC)
- req.promiscusity = 1;
- else
- req.promiscusity = 0;
+ if (flags & IFF_ALLMULTI) {
+ req.req_id = RTE_KNI_REQ_CHANGE_ALLMULTI;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ req.allmulti = 1;
+ else
+ req.allmulti = 0;
+ }
+
+ if (flags & IFF_PROMISC) {
+ req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
+
+ if (netdev->flags & IFF_PROMISC)
+ req.promiscusity = 1;
+ else
+ req.promiscusity = 0;
+ }
+
kni_net_process_request(kni, &req);
}
.ndo_open = kni_net_open,
.ndo_stop = kni_net_release,
.ndo_set_config = kni_net_config,
- .ndo_change_rx_flags = kni_net_set_promiscusity,
+ .ndo_change_rx_flags = kni_net_change_rx_flags,
.ndo_start_xmit = kni_net_tx,
.ndo_change_mtu = kni_net_change_mtu,
.ndo_tx_timeout = kni_net_tx_timeout,