X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=c9993abc6d1806bfea9343c59165b4abbea72970;hb=adce1f86f8d25fc10e9ac32fd59fa0bedce608ad;hp=ff6ddb8c8d051d32709be970caacac607780cc33;hpb=a7740dc1303a940fff3b3009fb58dbd43d831ce9;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index ff6ddb8c8d..c9993abc6d 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * Copyright 2014 6WIND S.A. * All rights reserved. * @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include @@ -71,6 +70,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -80,24 +80,23 @@ #include "base/ixgbe_common.h" #include "ixgbe_rxtx.h" +#ifdef RTE_LIBRTE_IEEE1588 +#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define IXGBE_TX_IEEE1588_TMST 0 +#endif /* Bit Mask to indicate what bits required for building TX context */ #define IXGBE_TX_OFFLOAD_MASK ( \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ PKT_TX_TCP_SEG | \ - PKT_TX_OUTER_IP_CKSUM) - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} + PKT_TX_MACSEC | \ + PKT_TX_OUTER_IP_CKSUM | \ + IXGBE_TX_IEEE1588_TMST) +#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK) #if 1 #define RTE_PMD_USE_PREFETCH @@ -333,7 +332,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); return nb_pkts; } @@ -352,6 +351,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx = 0; while (nb_pkts) { uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n); nb_tx = (uint16_t)(nb_tx + ret); @@ -478,30 +478,28 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, */ static inline uint32_t what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, - union ixgbe_tx_offload tx_offload) + union ixgbe_tx_offload tx_offload) { /* If match with the current used context */ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] - & tx_offload.data[0])) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] - & tx_offload.data[1])))) { - return txq->ctx_curr; - } + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; /* What if match with the next context */ txq->ctx_curr ^= 1; if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] - & tx_offload.data[0])) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] - & tx_offload.data[1])))) { - return txq->ctx_curr; - } + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; /* Mismatch, use the previous context */ return IXGBE_CTX_NUM; @@ -511,6 +509,7 @@ static inline uint32_t tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) { uint32_t tmp = 0; + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) tmp |= IXGBE_ADVTXD_POPTS_TXSM; if (ol_flags & PKT_TX_IP_CKSUM) @@ -524,12 +523,15 @@ static inline uint32_t tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) { uint32_t cmdtype = 0; + if (ol_flags & PKT_TX_VLAN_PKT) cmdtype |= IXGBE_ADVTXD_DCMD_VLE; if (ol_flags & PKT_TX_TCP_SEG) cmdtype |= IXGBE_ADVTXD_DCMD_TSE; if (ol_flags & PKT_TX_OUTER_IP_CKSUM) cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT); + if (ol_flags & PKT_TX_MACSEC) + cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC; return cmdtype; } @@ -561,8 +563,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; status = txr[desc_to_clean_to].wb.status; - if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) - { + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" "(port=%d queue=%d)", @@ -909,91 +910,446 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; return nb_tx; } +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /** + * Check if packet meets requirements for number of segments + * + * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and + * non-TSO + */ + + if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) { + rte_errno = -EINVAL; + return i; + } + + if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + /********************************************************************* * * RX functions * **********************************************************************/ -#define IXGBE_PACKET_TYPE_IPV4 0X01 -#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 -#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 -#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 -#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 -#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 -#define IXGBE_PACKET_TYPE_IPV6 0X04 -#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 -#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 -#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C -#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C -#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C -#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_ETHER 0X00 +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F + +#define IXGBE_PACKET_TYPE_NVGRE 0X00 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_VXLAN 0X80 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD + #define IXGBE_PACKET_TYPE_MAX 0X80 -#define IXGBE_PACKET_TYPE_MASK 0X7F +#define IXGBE_PACKET_TYPE_TN_MAX 0X100 #define IXGBE_PACKET_TYPE_SHIFT 0X04 + +/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ static inline uint32_t -ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) +ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) { + /** + * Use 2 different table for normal packet and tunnel packet + * to save the space. + */ static const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + RTE_PTYPE_INNER_L3_IPV6, [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = + RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + }; + + static const uint32_t + ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + + [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, }; + if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) return RTE_PTYPE_UNKNOWN; - pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & - IXGBE_PACKET_TYPE_MASK; + pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask; + + /* For tunnel packet */ + if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) { + /* Remove the tunnel bit to save the space. */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + /** + * For x550, if it's not tunnel, + * tunnel type bit should be set to 0. + * Reuse 82599's mask. + */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; return ptype_table[pkt_info]; } @@ -1024,7 +1380,7 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) } static inline uint64_t -rx_desc_status_to_pkt_flags(uint32_t rx_status) +rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) { uint64_t pkt_flags; @@ -1033,7 +1389,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) * Do not check whether L3/L4 rx checksum done by NIC or not, * That can be found from rte_eth_rxmode.hw_ip_checksum flag */ - pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0; #ifdef RTE_LIBRTE_IEEE1588 if (rx_status & IXGBE_RXD_STAT_TMST) @@ -1052,7 +1408,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) * Bit 30: L4I, L4I integrity error */ static uint64_t error_to_pkt_flags_map[4] = { - 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD }; pkt_flags = error_to_pkt_flags_map[(rx_status >> @@ -1087,9 +1445,10 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) uint64_t pkt_flags; int nb_dd; uint32_t s[LOOK_AHEAD]; - uint16_t pkt_info[LOOK_AHEAD]; + uint32_t pkt_info[LOOK_AHEAD]; int i, j, nb_rx = 0; uint32_t status; + uint64_t vlan_flags = rxq->vlan_flags; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; @@ -1105,20 +1464,21 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) * reference packets that are ready to be received. */ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; - i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) - { + i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) { /* Read desc statuses backwards to avoid race condition */ - for (j = LOOK_AHEAD-1; j >= 0; --j) + for (j = 0; j < LOOK_AHEAD; j++) s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); - for (j = LOOK_AHEAD - 1; j >= 0; --j) - pkt_info[j] = rxdp[j].wb.lower.lo_dword. - hs_rss.pkt_info; + rte_smp_rmb(); /* Compute how many status bits were set */ - nb_dd = 0; - for (j = 0; j < LOOK_AHEAD; ++j) - nb_dd += s[j] & IXGBE_RXDADV_STAT_DD; + for (nb_dd = 0; nb_dd < LOOK_AHEAD && + (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++) + ; + + for (j = 0; j < nb_dd; j++) + pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower. + lo_dword.data); nb_rx += nb_dd; @@ -1132,13 +1492,15 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - pkt_flags = rx_desc_status_to_pkt_flags(s[j]); + pkt_flags = rx_desc_status_to_pkt_flags(s[j], + vlan_flags); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); - pkt_flags |= - ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags + ((uint16_t)pkt_info[j]); mb->ol_flags = pkt_flags; mb->packet_type = - ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); + ixgbe_rxd_pkt_info_to_pkt_type + (pkt_info[j], rxq->pkt_type_mask); if (likely(pkt_flags & PKT_RX_RSS_HASH)) mb->hash.rss = rte_le_to_cpu_32( @@ -1263,6 +1625,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, if (ixgbe_rx_alloc_bufs(rxq, true) != 0) { int i, j; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); @@ -1284,7 +1647,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + cur_free_trigger); } if (rxq->rx_tail >= rxq->nb_rx_desc) @@ -1298,7 +1662,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */ -static uint16_t +uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -1314,6 +1678,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, nb_rx = 0; while (nb_pkts) { uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); nb_rx = (uint16_t)(nb_rx + ret); @@ -1345,6 +1710,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_rx; uint16_t nb_hold; uint64_t pkt_flags; + uint64_t vlan_flags; nb_rx = 0; nb_hold = 0; @@ -1352,6 +1718,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; sw_ring = rxq->sw_ring; + vlan_flags = rxq->vlan_flags; while (nb_rx < nb_pkts) { /* * The order of operations here is important as the DD status @@ -1399,7 +1766,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1457,17 +1824,18 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = pkt_len; rxm->port = rxq->port_id; - pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss. - pkt_info); + pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); pkt_flags = pkt_flags | - ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); + ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); rxm->ol_flags = pkt_flags; - rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); + rxm->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, + rxq->pkt_type_mask); if (likely(pkt_flags & PKT_RX_RSS_HASH)) rxm->hash.rss = rte_le_to_cpu_32( @@ -1534,30 +1902,31 @@ ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) * - error flags * @head HEAD of the packet cluster * @desc HW descriptor to get data from - * @port_id Port ID of the Rx queue + * @rxq Pointer to the Rx queue */ static inline void ixgbe_fill_cluster_head_buf( struct rte_mbuf *head, union ixgbe_adv_rx_desc *desc, - uint8_t port_id, + struct ixgbe_rx_queue *rxq, uint32_t staterr) { - uint16_t pkt_info; + uint32_t pkt_info; uint64_t pkt_flags; - head->port = port_id; + head->port = rxq->port_id; /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is * set in the pkt_flags field. */ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); - pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info); - pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); pkt_flags |= rx_desc_error_to_pkt_flags(staterr); - pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); head->ol_flags = pkt_flags; - head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); + head->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask); if (likely(pkt_flags & PKT_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); @@ -1614,7 +1983,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, struct ixgbe_rx_entry *rxe; struct ixgbe_scattered_rx_entry *sc_entry; struct ixgbe_scattered_rx_entry *next_sc_entry; - struct ixgbe_rx_entry *next_rxe; + struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; struct rte_mbuf *nmb; @@ -1668,7 +2037,7 @@ next_desc: rte_le_to_cpu_16(rxd.wb.upper.length)); if (!bulk_alloc) { - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed " "port_id=%u queue_id=%u", @@ -1678,14 +2047,13 @@ next_desc: rx_mbuf_alloc_failed++; break; } - } - else if (nb_hold > rxq->rx_free_thresh) { + } else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; if (!ixgbe_rx_alloc_bufs(rxq, false)) { rte_wmb(); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, - next_rdt); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + next_rdt); nb_hold -= rxq->rx_free_thresh; } else { PMD_RX_LOG(DEBUG, "RX bulk alloc failed " @@ -1791,7 +2159,7 @@ next_desc: * the pointer to the first mbuf at the NEXTP entry in the * sw_sc_ring and continue to parse the RX ring. */ - if (!eop) { + if (!eop && next_rxe) { rxm->next = next_rxe->mbuf; next_sc_entry->fbuf = first_seg; goto next_desc; @@ -1804,8 +2172,7 @@ next_desc: rxm->next = NULL; /* Initialize the first mbuf of the returned packet */ - ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id, - staterr); + ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr); /* * Deal with the case, when HW CRC srip is disabled. @@ -1857,7 +2224,7 @@ next_desc: rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); rte_wmb(); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); nb_hold = 0; } @@ -1941,6 +2308,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; @@ -1960,7 +2328,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); txq->ctx_curr = 0; - memset((void*)&txq->ctx_cache, 0, + memset((void *)&txq->ctx_cache, 0, IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); } @@ -1981,6 +2349,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); + dev->tx_pkt_prepare = NULL; #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || @@ -2001,6 +2370,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) (unsigned long)txq->tx_rs_thresh, (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); dev->tx_pkt_burst = ixgbe_xmit_pkts; + dev->tx_pkt_prepare = ixgbe_prep_pkts; } } @@ -2233,6 +2603,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; + mb = rxq->rx_stage[rxq->rx_next_avail + i]; rte_pktmbuf_free_seg(mb); } @@ -2283,7 +2654,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST * rxq->rx_free_thresh < rxq->nb_rx_desc * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0 - * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST) * Scattered packets are not supported. This should be checked * outside of this function. */ @@ -2305,15 +2675,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) "rxq->rx_free_thresh=%d", rxq->nb_rx_desc, rxq->rx_free_thresh); ret = -EINVAL; - } else if (!(rxq->nb_rx_desc < - (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " - "rxq->nb_rx_desc=%d, " - "IXGBE_MAX_RING_DESC=%d, " - "RTE_PMD_IXGBE_RX_MAX_BURST=%d", - rxq->nb_rx_desc, IXGBE_MAX_RING_DESC, - RTE_PMD_IXGBE_RX_MAX_BURST); - ret = -EINVAL; } return ret; @@ -2330,12 +2691,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) /* * By default, the Rx queue setup function allocates enough memory for * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires - * extra memory at the end of the descriptor ring to be zero'd out. A - * pre-condition for using the Rx burst bulk alloc function is that the - * number of descriptors is less than or equal to - * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the - * constraints here to see if we need to zero out memory after the end - * of the H/W descriptor ring. + * extra memory at the end of the descriptor ring to be zero'd out. */ if (adapter->rx_bulk_alloc_allowed) /* zero out extra memory */ @@ -2425,6 +2781,21 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + /* + * The packet type in RX descriptor is different for different NICs. + * Some bits are used for x550 but reserved for other NICS. + * So set different masks for different NICs. + */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550; + else + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + /* * Allocate RX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for @@ -2440,7 +2811,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, /* * Zero init all the descriptors in the ring. */ - memset (rz->addr, 0, RX_RING_SZ); + memset(rz->addr, 0, RX_RING_SZ); /* * Modified to setup VFRDT for Virtual Function @@ -2454,8 +2825,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); rxq->rdh_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); - } - else { + } else { rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); rxq->rdh_reg_addr = @@ -2541,11 +2911,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct ixgbe_rx_queue *rxq; uint32_t desc = 0; - if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); - return 0; - } - rxq = dev->data->rx_queues[rx_queue_id]; rxdp = &(rxq->rx_ring[rxq->rx_tail]); @@ -2580,6 +2945,63 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } +int +ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t nb_hold, desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + +#ifdef RTE_IXGBE_INC_VECTOR + if (rxq->rx_using_sse) + nb_hold = rxq->rxrearm_nb; + else +#endif + nb_hold = rxq->nb_rx_hold; + if (offset >= rxq->nb_rx_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.upper.status_error; + if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct ixgbe_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].wb.status; + if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void __attribute__((cold)) ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { @@ -2591,6 +3013,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { struct ixgbe_tx_queue *txq = dev->data->tx_queues[i]; + if (txq != NULL) { txq->ops->release_mbufs(txq); txq->ops->reset(txq); @@ -2599,6 +3022,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); ixgbe_reset_rx_queue(adapter, rxq); @@ -2914,6 +3338,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } for (i = 0; i < nb_tcs; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); /* clear 10 bits. */ rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */ @@ -2922,14 +3347,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* zero alloc all unused TCs */ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT )); + + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); /* clear 10 bits. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } /* MRQC: enable vmdq and dcb */ - mrqc = ((num_pools == ETH_16_POOLS) ? \ - IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN ); + mrqc = (num_pools == ETH_16_POOLS) ? + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* PFVTCTL: turn on virtualisation and set the default pool */ @@ -2967,7 +3393,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } /* VFRE: pool enabling for receive - 16 or 32 */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /* @@ -2980,7 +3406,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ for (i = 0; i < cfg->nb_pool_maps; i++) { /* set vlan id in VF register and set the valid bit */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | (cfg->pool_map[i].vlan_id & 0xFFF))); /* * Put the allowed pools in VFB reg. As we only have 16 or 32 @@ -2993,15 +3419,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /** * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters - * @hw: pointer to hardware structure + * @dev: pointer to eth_dev structure * @dcb_config: pointer to ixgbe_dcb_config structure */ static void -ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; - uint32_t q; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3013,19 +3439,13 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, /* Enable DCB for Tx with 8 TCs */ if (dcb_config->num_tcs.pg_tcs == 8) { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - } - else { + } else { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; } if (dcb_config->vt_mode) - reg |= IXGBE_MTQC_VT_ENA; + reg |= IXGBE_MTQC_VT_ENA; IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); - /* Disable drop for all queues */ - for (q = 0; q < 128; q++) - IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); - /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); reg &= ~IXGBE_RTTDCS_ARBDIS; @@ -3036,7 +3456,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, reg |= IXGBE_SECTX_DCB; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); } - return; } /** @@ -3060,25 +3479,23 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /*Configure general DCB TX parameters*/ - ixgbe_dcb_tx_hw_config(hw,dcb_config); - return; + ixgbe_dcb_tx_hw_config(dev, dcb_config); } static void ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3093,19 +3510,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, static void ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3117,7 +3533,6 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); } - return; } static void @@ -3127,7 +3542,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; @@ -3148,7 +3563,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_tx_conf *tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; @@ -3164,16 +3579,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /** * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters - * @hw: pointer to hardware structure + * @dev: pointer to eth_dev structure * @dcb_config: pointer to ixgbe_dcb_config structure */ static void -ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t vlanctrl; uint8_t i; + uint32_t q; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); /* @@ -3211,6 +3628,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, } IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* Disable drop for all queues in VMDQ mode*/ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | + (q << IXGBE_QDE_IDX_SHIFT))); + } else { + /* Enable drop for all queues in SRIOV mode */ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | + (q << IXGBE_QDE_IDX_SHIFT) | + IXGBE_QDE_ENABLE)); + } } /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ @@ -3229,13 +3661,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - return; } static void ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, - uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) + uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -3260,16 +3690,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m { switch (hw->mac.type) { case ixgbe_mac_82598EB: - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); break; default: break; @@ -3290,7 +3720,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config) { int ret = 0; - uint8_t i,pfc_en,nb_tcs; + uint8_t i, pfc_en, nb_tcs; uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; @@ -3304,7 +3734,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch(dev->data->dev_conf.rxmode.mq_mode){ + switch (dev->data->dev_conf.rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: dcb_config->vt_mode = true; if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3325,7 +3755,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Get dcb TX configuration parameters from rte_eth_conf */ ixgbe_dcb_rx_config(dev, dcb_config); /*Configure general DCB RX parameters*/ - ixgbe_dcb_rx_hw_config(hw, dcb_config); + ixgbe_dcb_rx_hw_config(dev, dcb_config); break; default: PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); @@ -3335,10 +3765,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, case ETH_MQ_TX_VMDQ_DCB: dcb_config->vt_mode = true; config_dcb_tx = DCB_TX_CONFIG; - /* get DCB and VT TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_vt_tx_config(dev,dcb_config); + /* get DCB and VT TX configuration parameters + * from rte_eth_conf + */ + ixgbe_dcb_vt_tx_config(dev, dcb_config); /*Configure general VMDQ and DCB TX parameters*/ - ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config); + ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config); break; case ETH_MQ_TX_DCB: @@ -3347,7 +3779,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /*get DCB TX configuration parameters from rte_eth_conf*/ ixgbe_dcb_tx_config(dev, dcb_config); /*Configure general DCB TX parameters*/ - ixgbe_dcb_tx_hw_config(hw, dcb_config); + ixgbe_dcb_tx_hw_config(dev, dcb_config); break; default: PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); @@ -3361,8 +3793,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Avoid un-configured priority mapping to TC0 */ uint8_t j = 4; uint8_t mask = 0xFF; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) - mask = (uint8_t)(mask & (~ (1 << map[i]))); + mask = (uint8_t)(mask & (~(1 << map[i]))); for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) map[j++] = i; @@ -3381,6 +3814,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0; tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0; } + } else { + /* Re-configure 8 TCs BW */ + for (i = 0; i < nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs + (i & 1)); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs + (i & 1)); + } } switch (hw->mac.type) { @@ -3398,6 +3840,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Set RX buffer size */ pbsize = (uint16_t)(rx_buffer_size / nb_tcs); uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; + for (i = 0; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } @@ -3407,9 +3850,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } } if (config_dcb_tx) { - /* Only support an equally distributed Tx packet buffer strategy. */ + /* Only support an equally distributed + * Tx packet buffer strategy. + */ uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs; uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); @@ -3422,9 +3868,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } /*Calculates traffic class credits*/ - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_RX_CONFIG); if (config_dcb_rx) { @@ -3434,7 +3880,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa); /* Configure PG(ETS) RX */ - ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map); } if (config_dcb_tx) { @@ -3444,7 +3890,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); /* Configure PG(ETS) TX */ - ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map); } /*Configure queue statistics registers*/ @@ -3458,7 +3904,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, * If the TC count is 8,and the default high_water is 48, * the low_water is 16 as default. */ - hw->fc.high_water[i] = (pbsize * 3 ) / 4; + hw->fc.high_water[i] = (pbsize * 3) / 4; hw->fc.low_water[i] = pbsize / 4; /* Enable pfc for this TC */ tc = &dcb_config->tc_config[i]; @@ -3491,13 +3937,11 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) return; - if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) + if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES) return; /** Configure DCB hardware **/ ixgbe_dcb_hw_configure(dev, dcb_cfg); - - return; } /* @@ -3562,7 +4006,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ for (i = 0; i < cfg->nb_pool_maps; i++) { /* set vlan id in VF register and set the valid bit */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK))); /* * Put the allowed pools in VFB reg. As we only have 16 or 64 @@ -3570,12 +4014,11 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) * i.e. bits 0-31 */ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2), (cfg->pool_map[i].pools & UINT32_MAX)); else - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \ - ((cfg->pool_map[i].pools >> 32) \ - & UINT32_MAX)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)), + ((cfg->pool_map[i].pools >> 32) & UINT32_MAX)); } @@ -3615,7 +4058,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) /* Disable drop for all queues */ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); @@ -3623,8 +4066,6 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); IXGBE_WRITE_FLUSH(hw); - - return; } static int __attribute__((cold)) @@ -3632,12 +4073,13 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; uint64_t dma_addr; - unsigned i; + unsigned int i; /* Initialize software ring entries */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union ixgbe_adv_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); @@ -3758,21 +4200,24 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) break; } } else { - /* - * SRIOV active scheme - * Support RSS together with VMDq & SRIOV + /* SRIOV active scheme + * Support RSS together with SRIOV. */ switch (dev->data->dev_conf.rxmode.mq_mode) { case ETH_MQ_RX_RSS: case ETH_MQ_RX_VMDQ_RSS: ixgbe_config_vf_rss(dev); break; - - /* FIXME if support DCB/RSS together with VMDq & SRIOV */ case ETH_MQ_RX_VMDQ_DCB: + case ETH_MQ_RX_DCB: + /* In SRIOV, the configuration is the same as VMDq case */ + ixgbe_vmdq_dcb_configure(dev); + break; + /* DCB/RSS together with SRIOV is not supported */ case ETH_MQ_RX_VMDQ_DCB_RSS: + case ETH_MQ_RX_DCB_RSS: PMD_INIT_LOG(ERR, - "Could not support DCB with VMDq & SRIOV"); + "Could not support DCB/RSS with VMDq & SRIOV"); return -1; default: ixgbe_config_vf_default(dev); @@ -4028,6 +4473,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; } } @@ -4080,6 +4526,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ if (rsc_capable) { uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if (rx_conf->enable_lro) /* * Since NFS packets coalescing is not supported - clear @@ -4273,6 +4720,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB) { /* Must setup the PSRTYPE register */ uint32_t psrtype; + psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | @@ -4372,7 +4820,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* Enable TX CRC (checksum offload requirement) and hw padding - * (TSO requirement) */ + * (TSO requirement) + */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4397,26 +4846,26 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) * bookkeeping if things aren't delivered in order. */ switch (hw->mac.type) { - case ixgbe_mac_82598EB: - txctrl = IXGBE_READ_REG(hw, - IXGBE_DCA_TXCTRL(txq->reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), - txctrl); - break; + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), + txctrl); + break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - default: - txctrl = IXGBE_READ_REG(hw, + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), - txctrl); - break; + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), + txctrl); + break; } } @@ -4588,12 +5037,12 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl &= ~IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); - /* Wait until RX Enable ready */ + /* Wait until RX Enable bit clear */ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; do { rte_delay_ms(1); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); + } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); @@ -4667,48 +5116,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) + return -1; - /* Wait until TX queue is empty */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_us(RTE_IXGBE_WAIT_100_US); - txtdh = IXGBE_READ_REG(hw, - IXGBE_TDH(txq->reg_idx)); - txtdt = IXGBE_READ_REG(hw, - IXGBE_TDT(txq->reg_idx)); - } while (--poll_ms && (txtdh != txtdt)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " - "when stopping.", tx_queue_id); - } + txq = dev->data->tx_queues[tx_queue_id]; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - txdctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, + IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, + IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " + "when stopping.", tx_queue_id); + } - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable bit clear */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d", tx_queue_id); - } + } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Tx Queue %d", tx_queue_id); + } - if (txq->ops != NULL) { - txq->ops->release_mbufs(txq); - txq->ops->reset(txq); - } - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } else - return -1; + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; }