#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
#endif
+
+#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
+
+#define HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+
+#if KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
+#define GET_USER_PAGES_REMOTE_API_V1
+#elif KERNEL_VERSION(4, 9, 0) == LINUX_VERSION_CODE
+#define GET_USER_PAGES_REMOTE_API_V2
+#else
+#define GET_USER_PAGES_REMOTE_API_V3
+#endif
+
+#endif
/* kni list */
struct list_head list;
+ uint8_t iova_mode;
+
uint32_t core_id; /* Core ID to bind */
char name[RTE_KNI_NAMESIZE]; /* Network device name */
struct task_struct *pthread;
void *va[MBUF_BURST_SZ];
void *alloc_pa[MBUF_BURST_SZ];
void *alloc_va[MBUF_BURST_SZ];
+
+ struct task_struct *usr_tsk;
};
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+static inline phys_addr_t iova_to_phys(struct task_struct *tsk,
+ unsigned long iova)
+{
+ phys_addr_t offset, phys_addr;
+ struct page *page = NULL;
+ long ret;
+
+ offset = iova & (PAGE_SIZE - 1);
+
+ /* Read one page struct info */
+#ifdef GET_USER_PAGES_REMOTE_API_V3
+ ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
+ FOLL_TOUCH, &page, NULL, NULL);
+#endif
+#ifdef GET_USER_PAGES_REMOTE_API_V2
+ ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
+ FOLL_TOUCH, &page, NULL);
+#endif
+#ifdef GET_USER_PAGES_REMOTE_API_V1
+ ret = get_user_pages_remote(tsk, tsk->mm, iova, 1
+ 0, 0, &page, NULL);
+#endif
+ if (ret < 0)
+ return 0;
+
+ phys_addr = page_to_phys(page) | offset;
+ put_page(page);
+
+ return phys_addr;
+}
+
+static inline void *iova_to_kva(struct task_struct *tsk, unsigned long iova)
+{
+ return phys_to_virt(iova_to_phys(tsk, iova));
+}
+#endif
+
void kni_net_release_fifo_phy(struct kni_dev *kni);
void kni_net_rx(struct kni_dev *kni);
void kni_net_init(struct net_device *dev);
strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
/* Translate user space info into kernel space info */
- kni->tx_q = phys_to_virt(dev_info.tx_phys);
- kni->rx_q = phys_to_virt(dev_info.rx_phys);
- kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
- kni->free_q = phys_to_virt(dev_info.free_phys);
-
- kni->req_q = phys_to_virt(dev_info.req_phys);
- kni->resp_q = phys_to_virt(dev_info.resp_phys);
- kni->sync_va = dev_info.sync_va;
- kni->sync_kva = phys_to_virt(dev_info.sync_phys);
+ if (dev_info.iova_mode) {
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ kni->tx_q = iova_to_kva(current, dev_info.tx_phys);
+ kni->rx_q = iova_to_kva(current, dev_info.rx_phys);
+ kni->alloc_q = iova_to_kva(current, dev_info.alloc_phys);
+ kni->free_q = iova_to_kva(current, dev_info.free_phys);
+
+ kni->req_q = iova_to_kva(current, dev_info.req_phys);
+ kni->resp_q = iova_to_kva(current, dev_info.resp_phys);
+ kni->sync_va = dev_info.sync_va;
+ kni->sync_kva = iova_to_kva(current, dev_info.sync_phys);
+ kni->usr_tsk = current;
+ kni->iova_mode = 1;
+#else
+ pr_err("KNI module does not support IOVA to VA translation\n");
+ return -EINVAL;
+#endif
+ } else {
+
+ kni->tx_q = phys_to_virt(dev_info.tx_phys);
+ kni->rx_q = phys_to_virt(dev_info.rx_phys);
+ kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
+ kni->free_q = phys_to_virt(dev_info.free_phys);
+
+ kni->req_q = phys_to_virt(dev_info.req_phys);
+ kni->resp_q = phys_to_virt(dev_info.resp_phys);
+ kni->sync_va = dev_info.sync_va;
+ kni->sync_kva = phys_to_virt(dev_info.sync_phys);
+ kni->iova_mode = 0;
+ }
kni->mbuf_size = dev_info.mbuf_size;
/* kni rx function pointer, with default to normal rx */
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+/* iova to kernel virtual address */
+static inline void *
+iova2kva(struct kni_dev *kni, void *iova)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, (unsigned long)iova));
+}
+
+static inline void *
+iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
+{
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_physaddr) +
+ m->data_off);
+}
+#endif
+
/* physical address to kernel virtual address */
static void *
pa2kva(void *pa)
return phys_to_virt(m->buf_physaddr + m->data_off);
}
+static inline void *
+get_kva(struct kni_dev *kni, void *pa)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2kva(kni, pa);
+#endif
+ return pa2kva(pa);
+}
+
+static inline void *
+get_data_kva(struct kni_dev *kni, void *pkt_kva)
+{
+#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
+ if (kni->iova_mode == 1)
+ return iova2data_kva(kni, pkt_kva);
+#endif
+ return kva2data_kva(pkt_kva);
+}
+
/*
* It can be called to process the request.
*/
return;
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
kni->va[i] = pa2va(kni->pa[i], kva);
kva_nb_segs = kva->nb_segs;
if (likely(ret == 1)) {
void *data_kva;
- pkt_kva = pa2kva(pkt_pa);
- data_kva = kva2data_kva(pkt_kva);
+ pkt_kva = get_kva(kni, pkt_pa);
+ data_kva = get_data_kva(kni, pkt_kva);
pkt_va = pa2va(pkt_pa, pkt_kva);
len = skb->len;
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
num = ret;
/* Copy mbufs */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->data_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
while (kva->next) {
kva = next_kva;
}
- alloc_kva = pa2kva(kni->alloc_pa[i]);
- alloc_data_kva = kva2data_kva(alloc_kva);
+ alloc_kva = get_kva(kni, kni->alloc_pa[i]);
+ alloc_data_kva = get_data_kva(kni, alloc_kva);
kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
memcpy(alloc_data_kva, data_kva, len);
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
- kva = pa2kva(kni->pa[i]);
+ kva = get_kva(kni, kni->pa[i]);
len = kva->pkt_len;
- data_kva = kva2data_kva(kva);
+ data_kva = get_data_kva(kni, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
skb = netdev_alloc_skb(dev, len);
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
- data_kva = kva2data_kva(kva);
+ kva = get_kva(kni, kva->next);
+ data_kva = get_data_kva(kni, kva);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
}
unsigned int min_mtu;
unsigned int max_mtu;
uint8_t mac_addr[6];
+ uint8_t iova_mode;
};
#define KNI_DEVICE "kni"