net/enic: add Tx prepare handler
authorHyong Youb Kim <hyonkim@cisco.com>
Tue, 23 Jan 2018 01:05:29 +0000 (17:05 -0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 29 Jan 2018 09:04:28 +0000 (10:04 +0100)
Like most NICs, this hardware (Cisco VIC) also requires partial
checksum in the packet for checksum offload and TSO. So, add
the tx_pkt_prepare handler like other PMDs do.

Technically, VIC has an offload mode that does not require partial
checksum for non-TSO packets. But, it has no such mode for TSO
packets, making tx_pkt_prepare unavoidable.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
drivers/net/enic/enic.h
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_rxtx.c

index c76a8f0..c083985 100644 (file)
@@ -268,6 +268,8 @@ uint16_t enic_dummy_recv_pkts(void *rx_queue,
                              uint16_t nb_pkts);
 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                               uint16_t nb_pkts);
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                       uint16_t nb_pkts);
 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
 int enic_link_update(struct enic *enic);
 void enic_fdir_info(struct enic *enic);
index 2c356dc..c46d592 100644 (file)
@@ -641,6 +641,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->dev_ops = &enicpmd_eth_dev_ops;
        eth_dev->rx_pkt_burst = &enic_recv_pkts;
        eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+       eth_dev->tx_pkt_prepare = &enic_prep_pkts;
 
        pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pdev);
index 98902ca..5b743eb 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <rte_mbuf.h>
 #include <rte_ethdev_driver.h>
+#include <rte_net.h>
 #include <rte_prefetch.h>
 
 #include "enic_compat.h"
 #include <rte_ip.h>
 #include <rte_tcp.h>
 
+#define        ENIC_TX_OFFLOAD_MASK (                   \
+               PKT_TX_VLAN_PKT |                \
+               PKT_TX_IP_CKSUM |                \
+               PKT_TX_L4_MASK |                 \
+               PKT_TX_TCP_SEG)
+
+#define        ENIC_TX_OFFLOAD_NOTSUP_MASK \
+       (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
+
 #define RTE_PMD_USE_PREFETCH
 
 #ifdef RTE_PMD_USE_PREFETCH
@@ -433,6 +443,38 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
        return 0;
 }
 
+uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+                       uint16_t nb_pkts)
+{
+       int32_t ret;
+       uint16_t i;
+       uint64_t ol_flags;
+       struct rte_mbuf *m;
+
+       for (i = 0; i != nb_pkts; i++) {
+               m = tx_pkts[i];
+               ol_flags = m->ol_flags;
+               if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = -ENOTSUP;
+                       return i;
+               }
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+#endif
+               ret = rte_net_intel_cksum_prepare(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+       }
+
+       return i;
+}
+
 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t nb_pkts)
 {