From 51215925a32fd3632e9963ca7b0349b4472bc59b Mon Sep 17 00:00:00 2001 From: Wei Dai Date: Thu, 22 Mar 2018 11:41:03 +0800 Subject: [PATCH] net/ixgbe: convert to new Tx offloads API Ethdev Tx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new Tx offloads API. Signed-off-by: Wei Dai Acked-by: Qi Zhang --- drivers/net/ixgbe/ixgbe_ethdev.c | 56 +++++++++++----------- drivers/net/ixgbe/ixgbe_ipsec.c | 5 +- drivers/net/ixgbe/ixgbe_rxtx.c | 79 ++++++++++++++++++++++++++++++-- drivers/net/ixgbe/ixgbe_rxtx.h | 3 ++ 4 files changed, 108 insertions(+), 35 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 3a531e199e..fbc048f7d8 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2298,6 +2298,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) (struct ixgbe_adapter *)dev->data->dev_private; struct rte_eth_dev_info dev_info; uint64_t rx_offloads; + uint64_t tx_offloads; int ret; PMD_INIT_FUNC_TRACE(); @@ -2317,6 +2318,13 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) rx_offloads, dev_info.rx_offload_capa); return -ENOTSUP; } + tx_offloads = dev->data->dev_conf.txmode.offloads; + if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) { + PMD_DRV_LOG(ERR, "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + tx_offloads, dev_info.tx_offload_capa); + return -ENOTSUP; + } /* set flag to update link status after init */ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -3610,28 +3618,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | dev_info->rx_queue_offload_capa); - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - -#ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; -#endif + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3653,7 +3641,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + ETH_TXQ_FLAGS_NOOFFLOADS | + ETH_TXQ_FLAGS_IGNORE, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3737,12 +3727,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | dev_info->rx_queue_offload_capa); - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3764,7 +3750,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + ETH_TXQ_FLAGS_NOOFFLOADS | + ETH_TXQ_FLAGS_IGNORE, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -4892,6 +4880,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) (struct ixgbe_adapter *)dev->data->dev_private; struct rte_eth_dev_info dev_info; uint64_t rx_offloads; + uint64_t tx_offloads; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); @@ -4904,6 +4893,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) rx_offloads, dev_info.rx_offload_capa); return -ENOTSUP; } + tx_offloads = dev->data->dev_conf.txmode.offloads; + if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) { + PMD_DRV_LOG(ERR, "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64, + tx_offloads, dev_info.tx_offload_capa); + return -ENOTSUP; + } /* * VF has no ability to enable/disable HW CRC diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c index 29e4728c7c..de7ed36764 100644 --- a/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; uint64_t rx_offloads; + uint64_t tx_offloads; rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; + /* sanity checks */ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); @@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) return -1; } } - if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index f6198f0696..7511e183f6 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2379,7 +2379,7 @@ void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && + if ((txq->offloads == 0) && #ifdef RTE_LIBRTE_SECURITY !(txq->using_ipsec) && #endif @@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); PMD_INIT_LOG(DEBUG, - " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", - (unsigned long)txq->txq_flags, - (unsigned long)IXGBE_SIMPLE_FLAGS); + " - offloads = 0x%" PRIx64, + txq->offloads); PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, @@ -2410,6 +2409,60 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } } +uint64_t +ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +uint64_t +ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + return tx_offload_capa; +} + +static int +ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev); + uint64_t port_supported = ixgbe_get_tx_port_offloads(dev); + + if ((requested & (queue_supported | port_supported)) != requested) + return 0; + + if ((port_offloads ^ requested) & port_supported) + return 0; + + return 1; +} + int __attribute__((cold)) ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2425,6 +2478,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* + * Don't verify port offloads for application which + * use the old API. + */ + if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) { + PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported queue offloads 0x%" PRIx64 + " or supported port offloads 0x%" PRIx64, + (void *)dev, tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + ixgbe_get_tx_queue_offloads(dev), + ixgbe_get_tx_port_offloads(dev)); + return -ENOTSUP; + } + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2551,6 +2620,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; + txq->offloads = tx_conf->offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; #ifdef RTE_LIBRTE_SECURITY @@ -5371,6 +5441,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 30095fa4b0..642cf4d82b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -223,6 +223,7 @@ struct ixgbe_tx_queue { uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ uint32_t txq_flags; /**< Holds flags for this TXq */ + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ uint32_t ctx_curr; /**< Hardware context states. */ /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; @@ -307,8 +308,10 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); +uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); #endif /* RTE_IXGBE_INC_VECTOR */ #endif /* _IXGBE_RXTX_H_ */ -- 2.20.1