-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
*/
#include <sys/queue.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_net.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
#include "base/ixgbe_common.h"
#include "ixgbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+ PKT_TX_MACSEC | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_SEC_OFFLOAD | \
+ IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+#endif
+
/*********************************************************************
*
* TX functions
* Check for descriptors with their DD bit set and free mbufs.
* Return the total number of buffers freed.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
struct ixgbe_tx_entry *txep;
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
/* free buffers one at a time */
- m = __rte_pktmbuf_prefree_seg(txep->mbuf);
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
txep->mbuf = NULL;
if (unlikely(m == NULL))
int i;
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
uint64_t buf_dma_addr;
uint32_t pkt_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
return nb_tx;
}
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+#endif
+
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx = 0;
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
+#ifdef RTE_LIBRTE_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union ixgbe_crypto_tx_desc_md *md =
+ (union ixgbe_crypto_tx_desc_md *)mdata;
+ seqnum_seed |=
+ (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+ type_tucmd_mlhl |=
+ (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec;
+#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIBRTE_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
/* If hardware offload required */
tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec) {
+ union ixgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union ixgbe_crypto_tx_desc_md *)
+ &tx_pkt->udata64;
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload, &tx_pkt->udata64);
txe->last_id = tx_last;
tx_id = txe->next_id;
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
+#endif
m_seg = tx_pkt;
do {
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
}
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
/*********************************************************************
*
* RX functions
*
**********************************************************************/
-#define IXGBE_PACKET_TYPE_IPV4 0X01
-#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
-#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
-#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
-#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
-#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
-#define IXGBE_PACKET_TYPE_IPV6 0X04
-#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
-#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
-#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
-#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+
+#define IXGBE_PACKET_TYPE_ETHER 0X00
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
#define IXGBE_PACKET_TYPE_NVGRE 0X00
#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
-#define IXGBE_PACKET_TYPE_MAX 0X80
-#define IXGBE_PACKET_TYPE_TN_MAX 0X100
-#define IXGBE_PACKET_TYPE_SHIFT 0X04
+/**
+ * Use 2 different table for normal packet and tunnel packet
+ * to save the space.
+ */
+const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const uint32_t
+ ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+
+ [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
{
- /**
- * Use 2 different table for normal packet and tunnel packet
- * to save the space.
- */
- static const uint32_t
- ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4,
- [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
- };
-
- static const uint32_t
- ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_SCTP,
-
- [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
- };
if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
return RTE_PTYPE_UNKNOWN;
* Bit 30: L4I, L4I integrity error
*/
static uint64_t error_to_pkt_flags_map[4] = {
- 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
};
pkt_flags = error_to_pkt_flags_map[(rx_status >>
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
+#ifdef RTE_LIBRTE_SECURITY
+ if (rx_status & IXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
return pkt_flags;
}
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
- for (j = LOOK_AHEAD-1; j >= 0; --j)
+ for (j = 0; j < LOOK_AHEAD; j++)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
- for (j = LOOK_AHEAD - 1; j >= 0; --j)
- pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
- lo_dword.data);
+ rte_smp_rmb();
/* Compute how many status bits were set */
- nb_dd = 0;
- for (j = 0; j < LOOK_AHEAD; ++j)
- nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
+ lo_dword.data);
nb_rx += nb_dd;
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
if (reset_mbuf) {
- mb->next = NULL;
- mb->nb_segs = 1;
mb->port = rxq->port_id;
}
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* populate the descriptors */
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
head->port = rxq->port_id;
- /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
if (!bulk_alloc) {
__le64 dma =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/*
* Update RX descriptor with the physical address of the
* new data buffer of the new allocated mbuf.
goto next_desc;
}
- /*
- * This is the last buffer of the received packet - return
- * the current cluster to the user.
- */
- rxm->next = NULL;
-
/* Initialize the first mbuf of the returned packet */
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
- && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ if ((txq->offloads == 0) &&
+#ifdef RTE_LIBRTE_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prepare = ixgbe_prep_pkts;
}
}
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIBRTE_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
/*
* Modification to set VFTDT for virtual function if vf is detected
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
static void __attribute__((cold))
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
- uint8_t i, nb_segs = m->nb_segs;
+ uint16_t i, nb_segs = m->nb_segs;
struct rte_mbuf *next_seg;
for (i = 0; i < nb_segs; i++) {
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
* rxq->rx_free_thresh < rxq->nb_rx_desc
* (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
- * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc <
- (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "IXGBE_MAX_RING_DESC=%d, "
- "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
- RTE_PMD_IXGBE_RX_MAX_BURST);
- ret = -EINVAL;
}
return ret;
/*
* By default, the Rx queue setup function allocates enough memory for
* IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
- * extra memory at the end of the descriptor ring to be zero'd out. A
- * pre-condition for using the Rx burst bulk alloc function is that the
- * number of descriptors is less than or equal to
- * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
- * constraints here to see if we need to zero out memory after the end
- * of the H/W descriptor ring.
+ * extra memory at the end of the descriptor ring to be zero'd out.
*/
if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (rxq->rx_using_sse)
+ nb_hold = rxq->rxrearm_nb;
+ else
+#endif
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
/**
* ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
if (hw->mac.type != ixgbe_mac_82598EB) {
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- /* Disable drop for all queues */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
}
static void
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
/*get DCB TX configuration parameters from rte_eth_conf*/
ixgbe_dcb_tx_config(dev, dcb_config);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
/* Re-configure 4 TCs BW */
for (i = 0; i < nb_tcs; i++) {
tc = &dcb_config->tc_config[i];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
- (uint8_t)(100 / nb_tcs);
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
(uint8_t)(100 / nb_tcs);
}
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
(dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
return;
- if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
return -ENOMEM;
}
- rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
case ETH_MQ_RX_VMDQ_RSS:
ixgbe_config_vf_rss(dev);
break;
-
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
- "Could not support DCB with VMDq & SRIOV");
+ "Could not support DCB/RSS with VMDq & SRIOV");
return -1;
default:
ixgbe_config_vf_default(dev);
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
+#ifdef RTE_LIBRTE_SECURITY
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+#endif
}
}
bool rsc_capable = false;
uint16_t i;
uint32_t rdrxctl;
+ uint32_t rfctl;
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
}
/* RFCTL configuration */
- if (rsc_capable) {
- uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-
- if (rx_conf->enable_lro)
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
- else
- rfctl |= IXGBE_RFCTL_RSC_DIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
- }
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
* at most 500us latency for a single RSC aggregation.
*/
eitr &= ~IXGBE_EITR_ITR_INT_MASK;
- eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
+ 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (rx_conf->header_split) {
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
-
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
- }
- srrctl = ((rx_conf->split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
+#ifdef RTE_LIBRTE_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY)) {
+ ret = ixgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "ixgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
return 0;
}
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (dev->data->dev_conf.rxmode.header_split) {
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
- }
-#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
- /* Must setup the PSRTYPE register */
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-#endif
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
/* Set RQPL for VF RSS according to max Rx queue */
psrtype |= (dev->data->nb_rx_queues >> 1) <<
}
}
+int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct ixgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+ if (!add) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ ixgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct ixgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+ if (j == conf->conf.queue_num)
+ j = 0;
+ reta = (reta << 8) | conf->conf.queue[j];
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, reta_reg,
+ rte_bswap32(reta));
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+ ixgbe_rss_disable(dev);
+ return -EINVAL;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+
/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
int __attribute__((weak))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)