ethdev: fix max Rx packet length
[dpdk.git] / examples / vhost / main.c
index ff48ba2..427b882 100644 (file)
@@ -19,6 +19,7 @@
 #include <rte_log.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
+#include <rte_net.h>
 #include <rte_vhost.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
@@ -43,6 +44,7 @@
 #define BURST_RX_RETRIES 4             /* Number of retries on RX. */
 
 #define JUMBO_FRAME_MAX_SIZE    0x2600
+#define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
 
 /* State of virtio device. */
 #define DEVICE_MAC_LEARNING 0
@@ -55,9 +57,6 @@
 
 #define INVALID_PORT_ID 0xFF
 
-/* Maximum long option length for option parsing. */
-#define MAX_LONG_OPT_SZ 64
-
 /* mask of enabled ports */
 static uint32_t enabled_port_mask = 0;
 
@@ -97,7 +96,7 @@ static int builtin_net_driver;
 
 static int async_vhost_driver;
 
-static char dma_type[MAX_LONG_OPT_SZ];
+static char *dma_type;
 
 /* Specify timeout (in useconds) between retries on RX. */
 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
@@ -201,7 +200,7 @@ struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
 static inline int
 open_dma(const char *value)
 {
-       if (strncmp(dma_type, "ioat", 4) == 0)
+       if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0)
                return open_ioat(value);
 
        return -1;
@@ -371,14 +370,8 @@ port_init(uint16_t port)
 
        RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
        RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
-                       " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       port,
-                       vmdq_ports_eth_addr[port].addr_bytes[0],
-                       vmdq_ports_eth_addr[port].addr_bytes[1],
-                       vmdq_ports_eth_addr[port].addr_bytes[2],
-                       vmdq_ports_eth_addr[port].addr_bytes[3],
-                       vmdq_ports_eth_addr[port].addr_bytes[4],
-                       vmdq_ports_eth_addr[port].addr_bytes[5]);
+               " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+               port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
 
        return 0;
 }
@@ -641,8 +634,7 @@ us_vhost_parse_args(int argc, char **argv)
                        if (ret) {
                                vmdq_conf_default.rxmode.offloads |=
                                        DEV_RX_OFFLOAD_JUMBO_FRAME;
-                               vmdq_conf_default.rxmode.max_rx_pkt_len
-                                       = JUMBO_FRAME_MAX_SIZE;
+                               vmdq_conf_default.rxmode.mtu = MAX_MTU;
                        }
                        break;
 
@@ -669,7 +661,7 @@ us_vhost_parse_args(int argc, char **argv)
                        break;
 
                case OPT_DMA_TYPE_NUM:
-                       strcpy(dma_type, optarg);
+                       dma_type = optarg;
                        break;
 
                case OPT_DMAS_NUM:
@@ -765,7 +757,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
        /* Learn MAC address of guest device from packet */
        pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       if (find_vhost_dev(&pkt_hdr->s_addr)) {
+       if (find_vhost_dev(&pkt_hdr->src_addr)) {
                RTE_LOG(ERR, VHOST_DATA,
                        "(%d) device is using a registered MAC!\n",
                        vdev->vid);
@@ -773,18 +765,16 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
        }
 
        for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
-               vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
+               vdev->mac_address.addr_bytes[i] =
+                       pkt_hdr->src_addr.addr_bytes[i];
 
        /* vlan_tag currently uses the device_id. */
        vdev->vlan_tag = vlan_tags[vdev->vid];
 
        /* Print out VMDQ registration info. */
        RTE_LOG(INFO, VHOST_DATA,
-               "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
-               vdev->vid,
-               vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
-               vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
-               vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
+               "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
+               vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
                vdev->vlan_tag);
 
        /* Register the MAC address. */
@@ -853,8 +843,11 @@ complete_async_pkts(struct vhost_dev *vdev)
 
        complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
                                        VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
-       if (complete_count)
+       if (complete_count) {
                free_pkts(p_cpl, complete_count);
+               __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+       }
+
 }
 
 static __rte_always_inline void
@@ -890,16 +883,11 @@ drain_vhost(struct vhost_dev *vdev)
        if (builtin_net_driver) {
                ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
        } else if (async_vhost_driver) {
-               uint32_t cpu_cpl_nr = 0;
                uint16_t enqueue_fail = 0;
-               struct rte_mbuf *m_cpu_cpl[nr_xmit];
 
                complete_async_pkts(vdev);
-               ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-                                       m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
-
-               if (cpu_cpl_nr)
-                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
+               ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
+               __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
 
                enqueue_fail = nr_xmit - ret;
                if (enqueue_fail)
@@ -958,7 +946,7 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
        uint16_t lcore_id = rte_lcore_id();
        pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return -1;
 
@@ -1006,7 +994,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        struct rte_ether_hdr *pkt_hdr =
                rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return 0;
 
@@ -1032,33 +1020,34 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        return 0;
 }
 
-static uint16_t
-get_psd_sum(void *l3_hdr, uint64_t ol_flags)
-{
-       if (ol_flags & PKT_TX_IPV4)
-               return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
-       else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
-               return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
 static void virtio_tx_offload(struct rte_mbuf *m)
 {
+       struct rte_net_hdr_lens hdr_lens;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_tcp_hdr *tcp_hdr;
+       uint32_t ptype;
        void *l3_hdr;
-       struct rte_ipv4_hdr *ipv4_hdr = NULL;
-       struct rte_tcp_hdr *tcp_hdr = NULL;
-       struct rte_ether_hdr *eth_hdr =
-               rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       l3_hdr = (char *)eth_hdr + m->l2_len;
+       ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+       m->l2_len = hdr_lens.l2_len;
+       m->l3_len = hdr_lens.l3_len;
+       m->l4_len = hdr_lens.l4_len;
 
-       if (m->ol_flags & PKT_TX_IPV4) {
+       l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
+       tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
+               m->l2_len + m->l3_len);
+
+       m->ol_flags |= PKT_TX_TCP_SEG;
+       if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
+               m->ol_flags |= PKT_TX_IPV4;
+               m->ol_flags |= PKT_TX_IP_CKSUM;
                ipv4_hdr = l3_hdr;
                ipv4_hdr->hdr_checksum = 0;
-               m->ol_flags |= PKT_TX_IP_CKSUM;
+               tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
+       } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
+               m->ol_flags |= PKT_TX_IPV6;
+               tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
        }
-
-       tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
-       tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
 }
 
 static __rte_always_inline void
@@ -1088,7 +1077,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 
 
        nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-       if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
+       if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
@@ -1151,7 +1140,7 @@ queue2nic:
                m->vlan_tci = vlan_tag;
        }
 
-       if (m->ol_flags & PKT_TX_TCP_SEG)
+       if (m->ol_flags & PKT_RX_LRO)
                virtio_tx_offload(m);
 
        tx_q->m_table[tx_q->len++] = m;
@@ -1219,16 +1208,12 @@ drain_eth_rx(struct vhost_dev *vdev)
                enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
                                                pkts, rx_count);
        } else if (async_vhost_driver) {
-               uint32_t cpu_cpl_nr = 0;
                uint16_t enqueue_fail = 0;
-               struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
 
                complete_async_pkts(vdev);
                enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-                                       VIRTIO_RXQ, pkts, rx_count,
-                                       m_cpu_cpl, &cpu_cpl_nr);
-               if (cpu_cpl_nr)
-                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
+                                       VIRTIO_RXQ, pkts, rx_count);
+               __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
 
                enqueue_fail = rx_count - enqueue_count;
                if (enqueue_fail)
@@ -1398,8 +1383,19 @@ destroy_device(int vid)
                "(%d) device has been removed from data core\n",
                vdev->vid);
 
-       if (async_vhost_driver)
+       if (async_vhost_driver) {
+               uint16_t n_pkt = 0;
+               struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+               while (vdev->pkts_inflight) {
+                       n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
+                                               m_cpl, vdev->pkts_inflight);
+                       free_pkts(m_cpl, n_pkt);
+                       __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+               }
+
                rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+       }
 
        rte_free(vdev);
 }
@@ -1469,19 +1465,50 @@ new_device(int vid)
                vid, vdev->coreid);
 
        if (async_vhost_driver) {
-               struct rte_vhost_async_features f;
+               struct rte_vhost_async_config config = {0};
                struct rte_vhost_async_channel_ops channel_ops;
 
-               if (strncmp(dma_type, "ioat", 4) == 0) {
+               if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) {
                        channel_ops.transfer_data = ioat_transfer_data_cb;
                        channel_ops.check_completed_copies =
                                ioat_check_completed_copies_cb;
 
-                       f.async_inorder = 1;
-                       f.async_threshold = 256;
+                       config.features = RTE_VHOST_ASYNC_INORDER;
 
                        return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
-                               f.intval, &channel_ops);
+                               config, &channel_ops);
+               }
+       }
+
+       return 0;
+}
+
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+       struct vhost_dev *vdev = NULL;
+
+       TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+               if (vdev->vid == vid)
+                       break;
+       }
+       if (!vdev)
+               return -1;
+
+       if (queue_id != VIRTIO_RXQ)
+               return 0;
+
+       if (async_vhost_driver) {
+               if (!enable) {
+                       uint16_t n_pkt = 0;
+                       struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+                       while (vdev->pkts_inflight) {
+                               n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
+                                                       m_cpl, vdev->pkts_inflight);
+                               free_pkts(m_cpl, n_pkt);
+                               __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+                       }
                }
        }
 
@@ -1496,6 +1523,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
 {
        .new_device =  new_device,
        .destroy_device = destroy_device,
+       .vring_state_changed = vring_state_changed,
 };
 
 /*
@@ -1636,7 +1664,7 @@ main(int argc, char *argv[])
        int ret, i;
        uint16_t portid;
        static pthread_t tid;
-       uint64_t flags = 0;
+       uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
 
        signal(SIGINT, sigint_handler);