RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_LEVEL_MASK)
-#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
- RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
- RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
- RTE_ETH_TX_OFFLOAD_TCP_TSO | \
- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
- RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
-
-#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
- RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
- RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
- RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
- RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
- RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
- RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
- RTE_ETH_RX_OFFLOAD_TCP_LRO | \
- RTE_ETH_RX_OFFLOAD_SCATTER | \
- RTE_ETH_RX_OFFLOAD_RSS_HASH)
-
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt_flow_stat_info {
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
- dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
- if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
- if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
- dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp);
dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
+ dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) |
dev_info->tx_queue_offload_capa;
- if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
- dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
- dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
- if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(parent_bp);
+ dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(parent_bp);
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->switch_info.name = eth_dev->device->name;
* RX Queues
*/
+uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
+{
+ uint64_t rx_offload_capa;
+
+ rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
+
+ if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+ return rx_offload_capa;
+}
+
/* Determine whether the current configuration needs aggregation ring in HW. */
int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
{
void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev);
void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq);
+uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp);
#endif
* TX Queues
*/
+uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp)
+{
+ uint64_t tx_offload_capa;
+
+ tx_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+
+ return tx_offload_capa;
+}
+
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp);
#endif