From: Hyong Youb Kim Date: Tue, 23 Jan 2018 01:05:29 +0000 (-0800) Subject: net/enic: add Tx prepare handler X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=1e81dbb5321b2bcfdba41a1f237d6534faef2c6e;p=dpdk.git net/enic: add Tx prepare handler Like most NICs, this hardware (Cisco VIC) also requires partial checksum in the packet for checksum offload and TSO. So, add the tx_pkt_prepare handler like other PMDs do. Technically, VIC has an offload mode that does not require partial checksum for non-TSO packets. But, it has no such mode for TSO packets, making tx_pkt_prepare unavoidable. Signed-off-by: Hyong Youb Kim Reviewed-by: John Daley --- diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index c76a8f0af8..c083985ee7 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -268,6 +268,8 @@ uint16_t enic_dummy_recv_pkts(void *rx_queue, uint16_t nb_pkts); uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int enic_set_mtu(struct enic *enic, uint16_t new_mtu); int enic_link_update(struct enic *enic); void enic_fdir_info(struct enic *enic); diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 2c356dc27d..c46d592ed2 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -641,6 +641,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &enicpmd_eth_dev_ops; eth_dev->rx_pkt_burst = &enic_recv_pkts; eth_dev->tx_pkt_burst = &enic_xmit_pkts; + eth_dev->tx_pkt_prepare = &enic_prep_pkts; pdev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pdev); diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 98902caa0f..5b743eb325 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -5,6 +5,7 @@ #include #include +#include #include #include "enic_compat.h" @@ -14,6 +15,15 @@ #include #include +#define ENIC_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define ENIC_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK) + #define RTE_PMD_USE_PREFETCH #ifdef RTE_PMD_USE_PREFETCH @@ -433,6 +443,38 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) return 0; } +uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int32_t ret; + uint16_t i; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {