X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=d7c80d424263174eea9af35ccae0aeac5ac41dd6;hb=369ce46248c0605d31bd29ebaa4474309a875176;hp=1e0789595f403691f4b40dd291a2cadc4a1059a9;hpb=b826efba6de491659c9c2025e42b1bf9df8d933c;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 1e0789595f..d7c80d4242 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * Copyright 2014 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright 2014 6WIND S.A. */ #include @@ -62,7 +33,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -71,6 +43,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -81,22 +54,26 @@ #include "ixgbe_rxtx.h" #ifdef RTE_LIBRTE_IEEE1588 -#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#define IXGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST #else #define IXGBE_TX_IEEE1588_TMST 0 #endif /* Bit Mask to indicate what bits required for building TX context */ -#define IXGBE_TX_OFFLOAD_MASK ( \ - PKT_TX_VLAN_PKT | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG | \ - PKT_TX_MACSEC | \ - PKT_TX_OUTER_IP_CKSUM | \ +#define IXGBE_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \ + RTE_MBUF_F_TX_OUTER_IPV4 | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_VLAN | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG | \ + RTE_MBUF_F_TX_MACSEC | \ + RTE_MBUF_F_TX_OUTER_IP_CKSUM | \ + RTE_MBUF_F_TX_SEC_OFFLOAD | \ IXGBE_TX_IEEE1588_TMST) #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK) + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK) #if 1 #define RTE_PMD_USE_PREFETCH @@ -111,11 +88,6 @@ #define rte_ixgbe_prefetch(p) do {} while (0) #endif -#ifdef RTE_IXGBE_INC_VECTOR -uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -#endif - /********************************************************************* * * TX functions @@ -126,7 +98,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, * Check for descriptors with their DD bit set and free mbufs. * Return the total number of buffers freed. */ -static inline int __attribute__((always_inline)) +static __rte_always_inline int ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) { struct ixgbe_tx_entry *txep; @@ -184,7 +156,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) int i; for (i = 0; i < 4; ++i, ++txdp, ++pkts) { - buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -207,7 +179,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) uint64_t buf_dma_addr; uint32_t pkt_len; - buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -337,7 +309,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); return nb_pkts; } @@ -368,7 +340,6 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#ifdef RTE_IXGBE_INC_VECTOR static uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -390,12 +361,12 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -#endif static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, - uint64_t ol_flags, union ixgbe_tx_offload tx_offload) + uint64_t ol_flags, union ixgbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) { uint32_t type_tucmd_mlhl; uint32_t mss_l4len_idx = 0; @@ -412,14 +383,13 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, /* Specify which HW CTX to upload. */ mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); - if (ol_flags & PKT_TX_VLAN_PKT) { + if (ol_flags & RTE_MBUF_F_TX_VLAN) tx_offload_mask.vlan_tci |= ~0; - } /* check if TCP segmentation required for this packet */ - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* implies IP cksum in IPv4 */ - if (ol_flags & PKT_TX_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | IXGBE_ADVTXD_TUCMD_L4T_TCP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; @@ -435,31 +405,34 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT; } else { /* no TSO, check if hardware checksum is needed */ - if (ol_flags & PKT_TX_IP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; } - switch (ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_UDP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; - case PKT_TX_TCP_CKSUM: + case RTE_MBUF_F_TX_TCP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; - case PKT_TX_SCTP_CKSUM: + case RTE_MBUF_F_TX_SCTP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; @@ -470,7 +443,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, } } - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) { tx_offload_mask.outer_l2_len |= ~0; tx_offload_mask.outer_l3_len |= ~0; tx_offload_mask.l2_len |= ~0; @@ -479,6 +452,21 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } +#ifdef RTE_LIB_SECURITY + if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + union ixgbe_crypto_tx_desc_md *md = + (union ixgbe_crypto_tx_desc_md *)mdata; + seqnum_seed |= + (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0; + type_tucmd_mlhl |= + (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = @@ -489,7 +477,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); vlan_macip_lens = tx_offload.l3_len; - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) vlan_macip_lens |= (tx_offload.outer_l2_len << IXGBE_ADVTXD_MACLEN_SHIFT); else @@ -539,11 +527,11 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) { uint32_t tmp = 0; - if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) + if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) tmp |= IXGBE_ADVTXD_POPTS_TXSM; - if (ol_flags & PKT_TX_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) tmp |= IXGBE_ADVTXD_POPTS_IXSM; - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) tmp |= IXGBE_ADVTXD_POPTS_TXSM; return tmp; } @@ -553,13 +541,13 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) { uint32_t cmdtype = 0; - if (ol_flags & PKT_TX_VLAN_PKT) + if (ol_flags & RTE_MBUF_F_TX_VLAN) cmdtype |= IXGBE_ADVTXD_DCMD_VLE; - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) cmdtype |= IXGBE_ADVTXD_DCMD_TSE; - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT); - if (ol_flags & PKT_TX_MACSEC) + if (ol_flags & RTE_MBUF_F_TX_MACSEC) cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC; return cmdtype; } @@ -593,11 +581,11 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; status = txr[desc_to_clean_to].wb.status; if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { - PMD_TX_FREE_LOG(DEBUG, - "TX descriptor %4u is not done" - "(port=%d queue=%d)", - desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); /* Failed to clean any descriptors, better luck next time */ return -(1); } @@ -610,11 +598,11 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) nb_tx_to_clean = (uint16_t)(desc_to_clean_to - last_desc_cleaned); - PMD_TX_FREE_LOG(DEBUG, - "Cleaning %4u TX descriptors: %4u to %4u " - "(port=%d queue=%d)", - nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); /* * The last descriptor to clean is done, so that means all the @@ -657,6 +645,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; +#ifdef RTE_LIB_SECURITY + uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -684,6 +675,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIB_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; @@ -695,6 +689,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +#ifdef RTE_LIB_SECURITY + if (use_ipsec) { + union ixgbe_crypto_tx_desc_md *ipsec_mdata = + (union ixgbe_crypto_tx_desc_md *) + rte_security_dynfield(tx_pkt); + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, @@ -745,12 +748,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * nb_used better be less than or equal to txq->tx_rs_thresh */ if (nb_used > txq->nb_tx_free) { - PMD_TX_FREE_LOG(DEBUG, - "Not enough free TX descriptors " - "nb_used=%4u nb_free=%4u " - "(port=%d queue=%d)", - nb_used, txq->nb_tx_free, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); if (ixgbe_xmit_cleanup(txq) != 0) { /* Could not clean any descriptors */ @@ -761,17 +764,17 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* nb_used better be <= txq->tx_rs_thresh */ if (unlikely(nb_used > txq->tx_rs_thresh)) { - PMD_TX_FREE_LOG(DEBUG, - "The number of descriptors needed to " - "transmit the packet exceeds the " - "RS bit threshold. This will impact " - "performance." - "nb_used=%4u nb_free=%4u " - "tx_rs_thresh=%4u. " - "(port=%d queue=%d)", - nb_used, txq->nb_tx_free, - txq->tx_rs_thresh, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_rs_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_rs_thresh, + txq->port_id, txq->queue_id); /* * Loop here until there are enough TX * descriptors or until the ring cannot be @@ -821,14 +824,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; #ifdef RTE_LIBRTE_IEEE1588 - if (ol_flags & PKT_TX_IEEE1588_TMST) + if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) cmd_type_len |= IXGBE_ADVTXD_MAC_1588; #endif olinfo_status = 0; if (tx_ol_req) { - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* when TSO is on, paylen in descriptor is the * not the packet len but the tcp payload len */ pkt_len -= (tx_offload.l2_len + @@ -855,7 +858,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -873,6 +877,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) + olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; +#endif m_seg = tx_pkt; do { @@ -888,7 +896,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -910,10 +918,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set RS bit only on threshold packets' last descriptor */ if (txq->nb_tx_used >= txq->tx_rs_thresh) { - PMD_TX_FREE_LOG(DEBUG, - "Setting RS bit on TXD id=" - "%4u (port=%d queue=%d)", - tx_last, txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); cmd_type_len |= IXGBE_TXD_CMD_RS; @@ -939,7 +947,7 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -970,25 +978,31 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG + /* check the size of packet */ + if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_ETHDEV_DEBUG_TX ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1084,282 +1098,312 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD -#define IXGBE_PACKET_TYPE_MAX 0X80 -#define IXGBE_PACKET_TYPE_TN_MAX 0X100 -#define IXGBE_PACKET_TYPE_SHIFT 0X04 +/** + * Use 2 different table for normal packet and tunnel packet + * to save the space. + */ +const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = + RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, +}; + +const uint32_t + ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + + [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, +}; + +static int +ixgbe_monitor_callback(const uint64_t value, + const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) +{ + const uint64_t m = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD); + /* + * we expect the DD bit to be set to 1 if this descriptor was already + * written to. + */ + return (value & m) == m ? -1 : 0; +} + +int +ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq = rx_queue; + uint16_t desc; + + desc = rxq->rx_tail; + rxdp = &rxq->rx_ring[desc]; + /* watch for changes in status bit */ + pmc->addr = &rxdp->wb.upper.status_error; + + /* comparison callback */ + pmc->fn = ixgbe_monitor_callback; + + /* the registers are 32-bit */ + pmc->size = sizeof(uint32_t); + + return 0; +} /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ static inline uint32_t ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) { - /** - * Use 2 different table for normal packet and tunnel packet - * to save the space. - */ - static const uint32_t - ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { - [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, - [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4, - [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = - RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - }; - - static const uint32_t - ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { - [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER, - [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | - RTE_PTYPE_INNER_L4_UDP, - - [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, - [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT, - [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = - RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, - }; if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) return RTE_PTYPE_UNKNOWN; @@ -1387,14 +1431,14 @@ static inline uint64_t ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) { static uint64_t ip_rss_types_map[16] __rte_cache_aligned = { - 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, - 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, - PKT_RX_RSS_HASH, 0, 0, 0, - 0, 0, 0, PKT_RX_FDIR, + 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, + 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, + RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, RTE_MBUF_F_RX_FDIR, }; #ifdef RTE_LIBRTE_IEEE1588 static uint64_t ip_pkt_etqf_map[8] = { - 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP, 0, 0, 0, 0, }; @@ -1416,19 +1460,20 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) /* * Check if VLAN present only. * Do not check whether L3/L4 rx checksum done by NIC or not, - * That can be found from rte_eth_rxmode.hw_ip_checksum flag + * That can be found from rte_eth_rxmode.offloads flag */ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0; #ifdef RTE_LIBRTE_IEEE1588 if (rx_status & IXGBE_RXD_STAT_TMST) - pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; + pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST; #endif return pkt_flags; } static inline uint64_t -rx_desc_error_to_pkt_flags(uint32_t rx_status) +rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info, + uint8_t rx_udp_csum_zero_err) { uint64_t pkt_flags; @@ -1437,18 +1482,35 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) * Bit 30: L4I, L4I integrity error */ static uint64_t error_to_pkt_flags_map[4] = { - PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, - PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, - PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, - PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, + RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, + RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, + RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD }; pkt_flags = error_to_pkt_flags_map[(rx_status >> IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + /* Mask out the bad UDP checksum error if the hardware has UDP zero + * checksum error issue, so that the software application will then + * have to recompute the checksum itself if needed. + */ + if ((rx_status & IXGBE_RXDADV_ERR_TCPE) && + (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + rx_udp_csum_zero_err) + pkt_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_BAD; + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { - pkt_flags |= PKT_RX_EIP_CKSUM_BAD; + pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + } + +#ifdef RTE_LIB_SECURITY + if (rx_status & IXGBE_RXD_STAT_SECP) { + pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD; + if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) + pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED; } +#endif return pkt_flags; } @@ -1523,7 +1585,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* convert descriptor fields to rte mbuf flags */ pkt_flags = rx_desc_status_to_pkt_flags(s[j], vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j], + (uint16_t)pkt_info[j], + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags ((uint16_t)pkt_info[j]); mb->ol_flags = pkt_flags; @@ -1531,10 +1595,10 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) ixgbe_rxd_pkt_info_to_pkt_type (pkt_info[j], rxq->pkt_type_mask); - if (likely(pkt_flags & PKT_RX_RSS_HASH)) + if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) mb->hash.rss = rte_le_to_cpu_32( rxdp[j].wb.lower.hi_dword.rss); - else if (pkt_flags & PKT_RX_FDIR) { + else if (pkt_flags & RTE_MBUF_F_RX_FDIR) { mb->hash.fdir.hash = rte_le_to_cpu_16( rxdp[j].wb.lower.hi_dword.csum_ip.csum) & IXGBE_ATR_HASH_MASK; @@ -1592,7 +1656,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) mb->data_off = RTE_PKTMBUF_HEADROOM; /* populate the descriptors */ - dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb)); + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1674,7 +1738,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, cur_free_trigger); } @@ -1824,7 +1888,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -1852,11 +1916,13 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->port = rxq->port_id; pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags = pkt_flags | ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); rxm->ol_flags = pkt_flags; @@ -1864,10 +1930,10 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask); - if (likely(pkt_flags & PKT_RX_RSS_HASH)) + if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) rxm->hash.rss = rte_le_to_cpu_32( rxd.wb.lower.hi_dword.rss); - else if (pkt_flags & PKT_RX_FDIR) { + else if (pkt_flags & RTE_MBUF_F_RX_FDIR) { rxm->hash.fdir.hash = rte_le_to_cpu_16( rxd.wb.lower.hi_dword.csum_ip.csum) & IXGBE_ATR_HASH_MASK; @@ -1900,7 +1966,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); - IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id); nb_hold = 0; } rxq->nb_rx_hold = nb_hold; @@ -1943,21 +2009,22 @@ ixgbe_fill_cluster_head_buf( head->port = rxq->port_id; - /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is * set in the pkt_flags field. */ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); head->ol_flags = pkt_flags; head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask); - if (likely(pkt_flags & PKT_RX_RSS_HASH)) + if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); - else if (pkt_flags & PKT_RX_FDIR) { + else if (pkt_flags & RTE_MBUF_F_RX_FDIR) { head->hash.fdir.hash = rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum) & IXGBE_ATR_HASH_MASK; @@ -2009,11 +2076,11 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, bool eop; struct ixgbe_rx_entry *rxe; struct ixgbe_scattered_rx_entry *sc_entry; - struct ixgbe_scattered_rx_entry *next_sc_entry; + struct ixgbe_scattered_rx_entry *next_sc_entry = NULL; struct ixgbe_rx_entry *next_rxe = NULL; struct rte_mbuf *first_seg; struct rte_mbuf *rxm; - struct rte_mbuf *nmb; + struct rte_mbuf *nmb = NULL; union ixgbe_adv_rx_desc rxd; uint16_t data_len; uint16_t next_id; @@ -2045,8 +2112,7 @@ next_desc: * of the ixgbe PMD. * * TODO: - * - Get rid of "volatile" crap and let the compiler do its - * job. + * - Get rid of "volatile" and let the compiler do its job. * - Use the proper memory barrier (rte_rmb()) to ensure the * memory ordering below. */ @@ -2079,8 +2145,9 @@ next_desc: if (!ixgbe_rx_alloc_bufs(rxq, false)) { rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, - next_rdt); + IXGBE_PCI_REG_WC_WRITE_RELAXED( + rxq->rdt_reg_addr, + next_rdt); nb_hold -= rxq->rx_free_thresh; } else { PMD_RX_LOG(DEBUG, "RX bulk alloc failed " @@ -2118,7 +2185,7 @@ next_desc: if (!bulk_alloc) { __le64 dma = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); /* * Update RX descriptor with the physical address of the * new data buffer of the new allocated mbuf. @@ -2245,7 +2312,7 @@ next_desc: rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); rte_wmb(); - IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); nb_hold = 0; } @@ -2273,7 +2340,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * **********************************************************************/ -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) { unsigned i; @@ -2288,7 +2355,118 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) } } -static void __attribute__((cold)) +static int +ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct ixgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (ixgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = ixgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +static int +ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} + +int +ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif + txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && + (rte_eal_process_type() != RTE_PROC_PRIMARY || + txq->sw_ring_v != NULL)) { + return ixgbe_tx_done_cleanup_vec(txq, free_cnt); + } else { + return ixgbe_tx_done_cleanup_simple(txq, free_cnt); + } + } + + return ixgbe_tx_done_cleanup_full(txq, free_cnt); +} + +static void __rte_cold ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { if (txq != NULL && @@ -2296,24 +2474,25 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) rte_free(txq->sw_ring); } -static void __attribute__((cold)) +static void __rte_cold ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) { if (txq != NULL && txq->ops != NULL) { txq->ops->release_mbufs(txq); txq->ops->free_swring(txq); + rte_memzone_free(txq->mz); rte_free(txq); } } -void __attribute__((cold)) -ixgbe_dev_tx_queue_release(void *txq) +void __rte_cold +ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - ixgbe_tx_queue_release(txq); + ixgbe_tx_queue_release(dev->data->tx_queues[qid]); } /* (Re)set dynamic ixgbe_tx_queue fields to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; @@ -2363,29 +2542,30 @@ static const struct ixgbe_txq_ops def_txq_ops = { * the queue parameters. Used in tx_queue_setup by primary process and then * in dev_init by secondary process when attaching to an existing ethdev. */ -void __attribute__((cold)) +void __rte_cold ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + if ((txq->offloads == 0) && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; -#ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else -#endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); PMD_INIT_LOG(DEBUG, - " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", - (unsigned long)txq->txq_flags, - (unsigned long)IXGBE_SIMPLE_FLAGS); + " - offloads = 0x%" PRIx64, + txq->offloads); PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, @@ -2395,7 +2575,46 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } } -int __attribute__((cold)) +uint64_t +ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +uint64_t +ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + tx_offload_capa = + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY; +#endif + return tx_offload_capa; +} + +int __rte_cold ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2406,10 +2625,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq; struct ixgbe_hw *hw; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2437,14 +2659,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * tx_rs_thresh must be a divisor of the ring size. * tx_free_thresh must be greater than 0. * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * One descriptor in the TX ring is used as a sentinel to avoid a * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " "of TX descriptors minus 2. (tx_rs_thresh=%u " @@ -2525,6 +2762,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + txq->mz = tz; txq->nb_tx_desc = nb_desc; txq->tx_rs_thresh = tx_rs_thresh; txq->tx_free_thresh = tx_free_thresh; @@ -2535,9 +2773,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIB_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + RTE_ETH_TX_OFFLOAD_SECURITY); +#endif /* * Modification to set VFTDT for virtual function if vf is detected @@ -2551,7 +2793,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, else txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; /* Allocate software ring */ @@ -2588,10 +2830,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * * @m scattered cluster head */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_free_sc_cluster(struct rte_mbuf *m) { - uint8_t i, nb_segs = m->nb_segs; + uint16_t i, nb_segs = m->nb_segs; struct rte_mbuf *next_seg; for (i = 0; i < nb_segs; i++) { @@ -2601,18 +2843,16 @@ ixgbe_free_sc_cluster(struct rte_mbuf *m) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; -#ifdef RTE_IXGBE_INC_VECTOR /* SSE Vector driver has a different way of releasing mbufs. */ if (rxq->rx_using_sse) { ixgbe_rx_queue_release_mbufs_vec(rxq); return; } -#endif if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { @@ -2640,21 +2880,22 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } } -static void __attribute__((cold)) +static void __rte_cold ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) { if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); rte_free(rxq->sw_ring); rte_free(rxq->sw_sc_ring); + rte_memzone_free(rxq->mz); rte_free(rxq); } } -void __attribute__((cold)) -ixgbe_dev_rx_queue_release(void *rxq) +void __rte_cold +ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - ixgbe_rx_queue_release(rxq); + ixgbe_rx_queue_release(dev->data->rx_queues[qid]); } /* @@ -2665,7 +2906,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * -EINVAL: the preconditions are NOT satisfied and the default Rx burst * function must be used. */ -static inline int __attribute__((cold)) +static inline int __rte_cold check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) { int ret = 0; @@ -2702,7 +2943,7 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) } /* Reset dynamic ixgbe_rx_queue fields back to defaults */ -static void __attribute__((cold)) +static void __rte_cold ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; @@ -2741,16 +2982,96 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); rxq->rx_tail = 0; rxq->nb_rx_hold = 0; + + if (rxq->pkt_first_seg != NULL) + rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) rxq->rxrearm_start = 0; rxq->rxrearm_nb = 0; #endif } -int __attribute__((cold)) +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + offloads |= RTE_ETH_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + +int __rte_cold ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2762,12 +3083,14 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_rx_queue *rxq; struct ixgbe_hw *hw; uint16_t len; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2797,10 +3120,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* * The packet type in RX descriptor is different for different NICs. @@ -2817,6 +3143,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, else rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as checksum + * errors. + */ + if (hw->mac.type == ixgbe_mac_82599EB) + rxq->rx_udp_csum_zero_err = 1; + /* * Allocate RX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for @@ -2829,6 +3162,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->mz = rz; /* * Zero init all the descriptors in the ring. */ @@ -2853,7 +3187,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); } - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; /* @@ -2925,14 +3259,14 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, } uint32_t -ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +ixgbe_dev_rx_queue_count(void *rx_queue) { #define IXGBE_RXQ_SCAN_INTERVAL 4 volatile union ixgbe_adv_rx_desc *rxdp; struct ixgbe_rx_queue *rxq; uint32_t desc = 0; - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = rx_queue; rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && @@ -2948,24 +3282,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) return desc; } -int -ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) -{ - volatile union ixgbe_adv_rx_desc *rxdp; - struct ixgbe_rx_queue *rxq = rx_queue; - uint32_t desc; - - if (unlikely(offset >= rxq->nb_rx_desc)) - return 0; - desc = rxq->rx_tail + offset; - if (desc >= rxq->nb_rx_desc) - desc -= rxq->nb_rx_desc; - - rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & - rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); -} - int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) { @@ -2976,7 +3292,7 @@ ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) if (unlikely(offset >= rxq->nb_rx_desc)) return -EINVAL; -#ifdef RTE_IXGBE_INC_VECTOR +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) if (rxq->rx_using_sse) nb_hold = rxq->rxrearm_nb; else @@ -3023,12 +3339,43 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } -void __attribute__((cold)) +/* + * Set up link loopback for X540/X550 mode Tx->Rx. + */ +static inline void __rte_cold +ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) +{ + uint32_t macc; + PMD_INIT_FUNC_TRACE(); + + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + macc = IXGBE_READ_REG(hw, IXGBE_MACC); + + if (enable) { + /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */ + autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE; + /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */ + macc |= IXGBE_MACC_FLU; + } else { + autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE; + macc &= ~IXGBE_MACC_FLU; + } + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); +} + +void __rte_cold ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -3049,6 +3396,14 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) ixgbe_reset_rx_queue(adapter, rxq); } } + /* If loopback mode was enabled, reconfigure the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, false); + } } void @@ -3059,13 +3414,13 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); for (i = 0; i < dev->data->nb_rx_queues; i++) { - ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + ixgbe_dev_rx_queue_release(dev, i); dev->data->rx_queues[i] = NULL; } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { - ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + ixgbe_dev_tx_queue_release(dev, i); dev->data->tx_queues[i] = NULL; } dev->data->nb_tx_queues = 0; @@ -3150,23 +3505,23 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) /* Set configured hashing protocols in MRQC register */ rss_hf = rss_conf->rss_hf; mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */ - if (rss_hf & ETH_RSS_IPV4) + if (rss_hf & RTE_ETH_RSS_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; - if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; - if (rss_hf & ETH_RSS_IPV6) + if (rss_hf & RTE_ETH_RSS_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; - if (rss_hf & ETH_RSS_IPV6_EX) + if (rss_hf & RTE_ETH_RSS_IPV6_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; - if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - if (rss_hf & ETH_RSS_IPV6_TCP_EX) + if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; - if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; - if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - if (rss_hf & ETH_RSS_IPV6_UDP_EX) + if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } @@ -3248,23 +3603,23 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } rss_hf = 0; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4) - rss_hf |= ETH_RSS_IPV4; + rss_hf |= RTE_ETH_RSS_IPV4; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6) - rss_hf |= ETH_RSS_IPV6; + rss_hf |= RTE_ETH_RSS_IPV6; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX) - rss_hf |= ETH_RSS_IPV6_EX; + rss_hf |= RTE_ETH_RSS_IPV6_EX; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP) - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP) - rss_hf |= ETH_RSS_IPV6_TCP_EX; + rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP) - rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX; rss_conf->rss_hf = rss_hf; return 0; } @@ -3273,6 +3628,7 @@ static void ixgbe_rss_configure(struct rte_eth_dev *dev) { struct rte_eth_rss_conf rss_conf; + struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; uint32_t reta; uint16_t i; @@ -3281,6 +3637,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); + adapter = dev->data->dev_private; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); sp_reta_size = ixgbe_reta_size_get(hw->mac.type); @@ -3290,16 +3647,18 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) * The byte-swap is needed because NIC registers are in * little-endian order. */ - reta = 0; - for (i = 0, j = 0; i < sp_reta_size; i++, j++) { - reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); - - if (j == dev->data->nb_rx_queues) - j = 0; - reta = (reta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, reta_reg, - rte_bswap32(reta)); + if (adapter->rss_reta_updated == 0) { + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } } /* @@ -3336,12 +3695,12 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; num_pools = cfg->nb_queue_pools; /* Check we have a valid number of pools */ - if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) { + if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) { ixgbe_rss_disable(dev); return; } /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */ - nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools); + nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools); /* * RXPBSIZE @@ -3366,7 +3725,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } /* zero alloc all unused TCs */ - for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); @@ -3375,7 +3734,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } /* MRQC: enable vmdq and dcb */ - mrqc = (num_pools == ETH_16_POOLS) ? + mrqc = (num_pools == RTE_ETH_16_POOLS) ? IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); @@ -3391,7 +3750,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */ queue_mapping = 0; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) /* * mapping is done with 3 bits per priority, * so shift by i*3 each time @@ -3415,7 +3774,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* VFRE: pool enabling for receive - 16 or 32 */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), - num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /* * MPSAR - allow pools to read specific mac addresses @@ -3497,7 +3856,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, if (hw->mac.type != ixgbe_mac_82598EB) /*PF VF Transmit Enable*/ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), - vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(dev, dcb_config); @@ -3513,19 +3872,26 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) { - dcb_config->num_tcs.pg_tcs = ETH_8_TCS; - dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) { + dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS; } else { - dcb_config->num_tcs.pg_tcs = ETH_4_TCS; - dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS; + } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; } + /* User Priority to Traffic Class mapping */ - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3539,20 +3905,26 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) { - dcb_config->num_tcs.pg_tcs = ETH_8_TCS; - dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) { + dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS; } else { - dcb_config->num_tcs.pg_tcs = ETH_4_TCS; - dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS; + } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; } /* User Priority to Traffic Class mapping */ - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3568,12 +3940,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3589,12 +3967,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3751,14 +4135,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; struct ixgbe_dcb_tc_config *tc; - uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_bw_conf *bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private); switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_VMDQ_DCB: + case RTE_ETH_MQ_RX_VMDQ_DCB: dcb_config->vt_mode = true; if (hw->mac.type != ixgbe_mac_82598EB) { config_dcb_rx = DCB_RX_CONFIG; @@ -3771,8 +4156,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_vmdq_dcb_configure(dev); } break; - case ETH_MQ_RX_DCB: - case ETH_MQ_RX_DCB_RSS: + case RTE_ETH_MQ_RX_DCB: + case RTE_ETH_MQ_RX_DCB_RSS: dcb_config->vt_mode = false; config_dcb_rx = DCB_RX_CONFIG; /* Get dcb TX configuration parameters from rte_eth_conf */ @@ -3785,7 +4170,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, break; } switch (dev->data->dev_conf.txmode.mq_mode) { - case ETH_MQ_TX_VMDQ_DCB: + case RTE_ETH_MQ_TX_VMDQ_DCB: dcb_config->vt_mode = true; config_dcb_tx = DCB_TX_CONFIG; /* get DCB and VT TX configuration parameters @@ -3796,7 +4181,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config); break; - case ETH_MQ_TX_DCB: + case RTE_ETH_MQ_TX_DCB: dcb_config->vt_mode = false; config_dcb_tx = DCB_TX_CONFIG; /*get DCB TX configuration parameters from rte_eth_conf*/ @@ -3812,15 +4197,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, nb_tcs = dcb_config->num_tcs.pfc_tcs; /* Unpack map */ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); - if (nb_tcs == ETH_4_TCS) { + if (nb_tcs == RTE_ETH_4_TCS) { /* Avoid un-configured priority mapping to TC0 */ uint8_t j = 4; uint8_t mask = 0xFF; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++) mask = (uint8_t)(mask & (~(1 << map[i]))); for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { - if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) + if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES) map[j++] = i; mask >>= 1; } @@ -3870,9 +4255,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } /* zero alloc all unused TCs */ - for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - } } if (config_dcb_tx) { /* Only support an equally distributed @@ -3886,7 +4270,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); } /* Clear unused TCs, if any, to zero buffer size*/ - for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); } @@ -3922,7 +4306,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); /* Check if the PFC is supported */ - if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { pbsize = (uint16_t)(rx_buffer_size / nb_tcs); for (i = 0; i < nb_tcs; i++) { /* @@ -3936,7 +4320,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, tc->pfc = ixgbe_dcb_pfc_enabled; } ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); - if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS) + if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS) pfc_en &= 0x0F; ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); } @@ -3957,12 +4341,12 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* check support mq_mode for DCB */ - if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) + if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB && + dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB && + dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS) return; - if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES) + if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES) return; /** Configure DCB hardware **/ @@ -4018,7 +4402,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) /* VFRE: pool enabling for receive - 64 */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX); - if (num_pools == ETH_64_POOLS) + if (num_pools == RTE_ETH_64_POOLS) IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX); /* @@ -4093,7 +4477,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } -static int __attribute__((cold)) +static int __rte_cold ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; @@ -4111,14 +4495,11 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) return -ENOMEM; } - rte_mbuf_refcnt_set(mbuf, 1); - mbuf->next = NULL; mbuf->data_off = RTE_PKTMBUF_HEADROOM; - mbuf->nb_segs = 1; mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; @@ -4142,11 +4523,11 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev) mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); mrqc &= ~IXGBE_MRQC_MRQE_MASK; switch (RTE_ETH_DEV_SRIOV(dev).active) { - case ETH_64_POOLS: + case RTE_ETH_64_POOLS: mrqc |= IXGBE_MRQC_VMDQRSS64EN; break; - case ETH_32_POOLS: + case RTE_ETH_32_POOLS: mrqc |= IXGBE_MRQC_VMDQRSS32EN; break; @@ -4167,17 +4548,17 @@ ixgbe_config_vf_default(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); switch (RTE_ETH_DEV_SRIOV(dev).active) { - case ETH_64_POOLS: + case RTE_ETH_64_POOLS: IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN); break; - case ETH_32_POOLS: + case RTE_ETH_32_POOLS: IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN); break; - case ETH_16_POOLS: + case RTE_ETH_16_POOLS: IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN); break; @@ -4204,21 +4585,21 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) * any DCB/RSS w/o VMDq multi-queue setting */ switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_RSS: - case ETH_MQ_RX_DCB_RSS: - case ETH_MQ_RX_VMDQ_RSS: + case RTE_ETH_MQ_RX_RSS: + case RTE_ETH_MQ_RX_DCB_RSS: + case RTE_ETH_MQ_RX_VMDQ_RSS: ixgbe_rss_configure(dev); break; - case ETH_MQ_RX_VMDQ_DCB: + case RTE_ETH_MQ_RX_VMDQ_DCB: ixgbe_vmdq_dcb_configure(dev); break; - case ETH_MQ_RX_VMDQ_ONLY: + case RTE_ETH_MQ_RX_VMDQ_ONLY: ixgbe_vmdq_rx_hw_configure(dev); break; - case ETH_MQ_RX_NONE: + case RTE_ETH_MQ_RX_NONE: default: /* if mq_mode is none, disable rss mode.*/ ixgbe_rss_disable(dev); @@ -4229,18 +4610,18 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) * Support RSS together with SRIOV. */ switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_RSS: - case ETH_MQ_RX_VMDQ_RSS: + case RTE_ETH_MQ_RX_RSS: + case RTE_ETH_MQ_RX_VMDQ_RSS: ixgbe_config_vf_rss(dev); break; - case ETH_MQ_RX_VMDQ_DCB: - case ETH_MQ_RX_DCB: + case RTE_ETH_MQ_RX_VMDQ_DCB: + case RTE_ETH_MQ_RX_DCB: /* In SRIOV, the configuration is the same as VMDq case */ ixgbe_vmdq_dcb_configure(dev); break; /* DCB/RSS together with SRIOV is not supported */ - case ETH_MQ_RX_VMDQ_DCB_RSS: - case ETH_MQ_RX_DCB_RSS: + case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: + case RTE_ETH_MQ_RX_DCB_RSS: PMD_INIT_LOG(ERR, "Could not support DCB/RSS with VMDq & SRIOV"); return -1; @@ -4274,7 +4655,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) * SRIOV inactive scheme * any DCB w/o VMDq multi-queue setting */ - if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY) + if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY) ixgbe_vmdq_tx_hw_configure(hw); else { mtqc = IXGBE_MTQC_64Q_1PB; @@ -4287,13 +4668,13 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) * SRIOV active scheme * FIXME if support DCB together with VMDq & SRIOV */ - case ETH_64_POOLS: + case RTE_ETH_64_POOLS: mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; break; - case ETH_32_POOLS: + case RTE_ETH_32_POOLS: mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF; break; - case ETH_16_POOLS: + case RTE_ETH_16_POOLS: mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; break; @@ -4326,7 +4707,7 @@ ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ uint16_t maxdesc = - IPV4_MAX_PKT_LEN / + RTE_IPV4_MAX_PKT_LEN / (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); if (maxdesc >= 16) @@ -4396,22 +4777,21 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) } } -void __attribute__((cold)) +void __rte_cold ixgbe_set_rx_function(struct rte_eth_dev *dev) { uint16_t i, rx_using_sse; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; /* * In order to allow Vector Rx there are a few configuration * conditions to be met and Rx Bulk Allocation should be allowed. */ if (ixgbe_rx_vec_dev_conf_condition_check(dev) || - !adapter->rx_bulk_alloc_allowed) { + !adapter->rx_bulk_alloc_allowed || + rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " - "preconditions or RTE_IXGBE_INC_VECTOR is " - "not enabled", + "preconditions", dev->data->port_id); adapter->rx_vec_allowed = false; @@ -4500,6 +4880,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; +#ifdef RTE_LIB_SECURITY + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + RTE_ETH_RX_OFFLOAD_SECURITY); +#endif } } @@ -4526,10 +4910,10 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* Sanity check */ dev->dev_ops->dev_infos_get(dev, &dev_info); - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) + if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO) rsc_capable = true; - if (!rsc_capable && rx_conf->enable_lro) { + if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) { PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " "support it"); return -EINVAL; @@ -4537,7 +4921,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ - if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) && + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) { /* * According to chapter of 4.6.7.2.1 of the Spec Rev. * 3.0 RSC configuration requires HW CRC stripping being @@ -4551,20 +4936,16 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - if ((rsc_capable) && (rx_conf->enable_lro)) - /* - * Since NFS packets coalescing is not supported - clear - * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is - * enabled. - */ - rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | - IXGBE_RFCTL_NFSR_DIS); + if ((rsc_capable) && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) + rfctl &= ~IXGBE_RFCTL_RSC_DIS; else rfctl |= IXGBE_RFCTL_RSC_DIS; + /* disable NFS filtering */ + rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS; IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ - if (!rx_conf->enable_lro) + if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) return 0; /* Set RDRXCTL.RSCACKC bit */ @@ -4621,7 +5002,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) * at most 500us latency for a single RSC aggregation. */ eitr &= ~IXGBE_EITR_ITR_INT_MASK; - eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS; + eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT); + eitr |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl); @@ -4645,7 +5027,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* * Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4661,6 +5043,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) uint16_t buf_size; uint16_t i; struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD; int rc; PMD_INIT_FUNC_TRACE(); @@ -4684,34 +5067,44 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (rx_conf->hw_strip_crc) - hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; - else + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; + else + hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; /* * Configure jumbo frame support, if any. */ - if (rx_conf->jumbo_frame == 1) { + if (dev->data->mtu > RTE_ETHER_MTU) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; - maxfrs |= (rx_conf->max_rx_pkt_len << 16); + maxfrs |= (frame_size << 16); IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); } else hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; /* - * If loopback mode is configured for 82599, set LPBK bit. + * If loopback mode is configured, set LPBK bit. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + if (dev->data->dev_conf.lpbk_mode != 0) { + rc = ixgbe_check_supported_loopback_mode(dev); + if (rc < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + return rc; + } hlreg0 |= IXGBE_HLREG0_LPBK; - else + } else { hlreg0 &= ~IXGBE_HLREG0_LPBK; + } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4720,7 +5113,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -4734,28 +5130,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (rx_conf->header_split) { - if (hw->mac.type == ixgbe_mac_82599EB) { - /* Must setup the PSRTYPE register */ - uint32_t psrtype; - - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); - } - srrctl = ((rx_conf->split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -4778,12 +5153,13 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_SRRCTL_BSIZEPKT_SHIFT); /* It adds dual VLAN length for supporting dual VLAN */ - if (dev->data->dev_conf.rxmode.max_rx_pkt_len + - 2 * IXGBE_VLAN_TAG_SIZE > buf_size) + if (frame_size + 2 * RTE_VLAN_HLEN > buf_size) dev->data->scattered_rx = 1; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } - if (rx_conf->enable_scatter) + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; /* @@ -4798,7 +5174,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (rx_conf->hw_ip_checksum) + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -4808,10 +5184,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (rx_conf->hw_strip_crc) - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - else + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; + else + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } @@ -4828,7 +5204,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbe_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4895,10 +5271,29 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) ixgbe_dev_mq_tx_configure(dev); } +/* + * Check if requested loopback mode is supported + */ +int +ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX) + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + return 0; + + return -ENOTSUP; +} + /* * Set up link for 82599 loopback mode Tx->Rx. */ -static inline void __attribute__((cold)) +static inline void __rte_cold ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) { PMD_INIT_FUNC_TRACE(); @@ -4926,7 +5321,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) /* * Start Transmit and Receive Units. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4982,10 +5377,31 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) rxctrl |= IXGBE_RXCTRL_RXEN; hw->mac.ops.enable_rx_dma(hw, rxctrl); - /* If loopback mode is enabled for 82599, set up the link accordingly */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - ixgbe_setup_loopback_link_82599(hw); + /* If loopback mode is enabled, set up the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_setup_loopback_link_82599(hw); + else if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, true); + } + +#ifdef RTE_LIB_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & + RTE_ETH_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & + RTE_ETH_TX_OFFLOAD_SECURITY)) { + ret = ixgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "ixgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif return 0; } @@ -4993,7 +5409,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) /* * Start Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -5004,34 +5420,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - /* Allocate buffers for descriptor rings */ - if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { - PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", - rx_queue_id); - return -1; - } - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + rxq = dev->data->rx_queues[rx_queue_id]; - /* Wait until RX Enable ready */ - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", - rx_queue_id); - rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); return -1; + } + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -5039,12 +5451,11 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Stop Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_rx_queue *rxq; uint32_t rxdctl; int poll_ms; @@ -5052,30 +5463,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = dev->data->rx_queues[rx_queue_id]; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); - /* Wait until RX Enable bit clear */ - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", - rx_queue_id); + /* Wait until RX Enable bit clear */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); - rte_delay_us(RTE_IXGBE_WAIT_100_US); + rte_delay_us(RTE_IXGBE_WAIT_100_US); - ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(adapter, rxq); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } else - return -1; + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -5084,7 +5491,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Start Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5095,30 +5502,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + txq = dev->data->tx_queues[tx_queue_id]; + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, - IXGBE_TXDCTL(txq->reg_idx)); - } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d", tx_queue_id); - } - rte_wmb(); - IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else - return -1; + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", + tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -5126,7 +5530,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* * Stop Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -5138,9 +5542,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id >= dev->data->nb_tx_queues) - return -1; - txq = dev->data->tx_queues[tx_queue_id]; /* Wait until TX queue is empty */ @@ -5154,8 +5555,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) IXGBE_TDT(txq->reg_idx)); } while (--poll_ms && (txtdh != txtdt)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " - "when stopping.", tx_queue_id); + PMD_INIT_LOG(ERR, + "Tx Queue %d is not empty when stopping.", + tx_queue_id); } txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); @@ -5171,8 +5573,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) IXGBE_TXDCTL(txq->reg_idx)); } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d", tx_queue_id); + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); } if (txq->ops != NULL) { @@ -5199,6 +5601,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -5217,18 +5620,20 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } /* * [VF] Initializes Receive Unit. */ -int __attribute__((cold)) +int __rte_cold ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD; uint64_t bus_addr; uint32_t srrctl, psrtype = 0; uint16_t buf_size; @@ -5265,9 +5670,15 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, * VF packets received can work in all cases. */ - ixgbevf_rlpml_set_vf(hw, - (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + if (ixgbevf_rlpml_set_vf(hw, frame_size) != 0) + PMD_INIT_LOG(ERR, "Set max packet length to %d failed.", + frame_size); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5291,18 +5702,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (dev->data->dev_conf.rxmode.header_split) { - srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -5327,24 +5727,17 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.enable_scatter || + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER || /* It adds dual VLAN length for supporting dual VLAN */ - (dev->data->dev_conf.rxmode.max_rx_pkt_len + - 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { + (frame_size + 2 * RTE_VLAN_HLEN) > buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; } - } -#ifdef RTE_HEADER_SPLIT_ENABLE - if (dev->data->dev_conf.rxmode.header_split) - /* Must setup the PSRTYPE register */ - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; -#endif + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; + } /* Set RQPL for VF RSS according to max Rx queue */ psrtype |= (dev->data->nb_rx_queues >> 1) << @@ -5359,7 +5752,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* * [VF] Initializes Transmit Unit. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5400,7 +5793,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* * [VF] Start Transmit and Receive Units. */ -void __attribute__((cold)) +void __rte_cold ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5461,14 +5854,119 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } } -/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ -int __attribute__((weak)) +int +ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +int +ixgbe_config_rss_filter(struct rte_eth_dev *dev, + struct ixgbe_rte_flow_rss_conf *conf, bool add) +{ + struct ixgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + + if (!add) { + if (ixgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { + ixgbe_rss_disable(dev); + memset(&filter_info->rss_info, 0, + sizeof(struct ixgbe_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (filter_info->rss_info.conf.queue_num) + return -EINVAL; + /* Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == conf->conf.queue_num) + j = 0; + reta = (reta << 8) | conf->conf.queue[j]; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) { + ixgbe_rss_disable(dev); + return 0; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + ixgbe_hw_rss_hash_set(hw, &rss_conf); + + if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; + + return 0; +} + +/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */ +#if defined(RTE_ARCH_PPC_64) +int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -uint16_t __attribute__((weak)) +uint16_t ixgbe_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5477,7 +5975,7 @@ ixgbe_recv_pkts_vec( return 0; } -uint16_t __attribute__((weak)) +uint16_t ixgbe_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -5486,8 +5984,29 @@ ixgbe_recv_scattered_pkts_vec( return 0; } -int __attribute__((weak)) +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) { return -1; } + +uint16_t +ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq) +{ + return -1; +} + +void +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return; +} +#endif