-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
-#define I40E_MIN_RING_DESC 64
-#define I40E_MAX_RING_DESC 4096
-#define I40E_ALIGN 128
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
-#define I40E_MAX_PKT_TYPE 256
#define I40E_TX_MAX_BURST 32
#define I40E_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN 128
+
#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ I40E_TX_IEEE1588_TMST)
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id);
-static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
static inline void
i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN_PKT;
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_PKT;
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
#define I40E_RX_ERR_BITS 0x3f
- if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
- return flags;
- /* If RXE bit set, all other status bits are meaningless */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- flags |= PKT_RX_MAC_ERR;
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return flags;
}
- /* If RECIPE bit set, all other status indications should be ignored */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
- flags |= PKT_RX_RECIP_ERR;
- return flags;
- }
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
- flags |= PKT_RX_HBUF_OVERFLOW;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_EIP_CKSUM_BAD;
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
- flags |= PKT_RX_OVERSIZE;
return flags;
}
}
#endif
-/* For each value it means, datasheet of hardware can tell more details */
-static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
-{
- static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
- /* L2 types */
- /* [0] reserved */
- [1] = RTE_PTYPE_L2_ETHER,
- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
- /* [3] - [5] reserved */
- [6] = RTE_PTYPE_L2_ETHER_LLDP,
- /* [7] - [10] reserved */
- [11] = RTE_PTYPE_L2_ETHER_ARP,
- /* [12] - [21] reserved */
-
- /* Non tunneled IPv4 */
- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [25] reserved */
- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv4 --> IPv4 */
- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [32] reserved */
- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> IPv6 */
- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [39] reserved */
- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN */
- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [47] reserved */
- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [54] reserved */
- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [62] reserved */
- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [69] reserved */
- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [77] reserved */
- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [84] reserved */
- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* Non tunneled IPv6 */
- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [91] reserved */
- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv6 --> IPv4 */
- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [98] reserved */
- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> IPv6 */
- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [105] reserved */
- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN */
- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [113] reserved */
- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [120] reserved */
- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [128] reserved */
- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [135] reserved */
- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [143] reserved */
- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [150] reserved */
- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* All others reserved */
- };
-
- return ptype_table[ptype];
-}
-
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
#endif
return flags;
}
+
+static inline void
+i40e_parse_tunneling_params(uint64_t ol_flags,
+ union i40e_tx_offload tx_offload,
+ uint32_t *cd_tunneling)
+{
+ /* EIPT: External (outer) IP header type */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* EIPLEN: External (outer) IP header length, in DWords */
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* L4TUNT: L4 Tunneling Type */
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 00b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+
+ /* L4TUNLEN: L4 Tunneling Length, in Words
+ *
+ * We depend on app to set rte_mbuf.l2_len correctly.
+ * For IP in GRE it should be set to the length of the GRE
+ * header;
+ * for MAC in GRE or MAC in UDP it should be set to the length
+ * of the GRE or UDP headers plus the inner MAC up to including
+ * its last Ethertype.
+ */
+ *cd_tunneling |= (tx_offload.l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+}
+
static inline void
i40e_txd_enable_checksum(uint64_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
- union i40e_tx_offload tx_offload,
- uint32_t *cd_tunneling)
+ union i40e_tx_offload tx_offload)
{
- /* UDP tunneling packet TX checksum offload */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-
+ /* Set MACLEN */
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-
- /* Now set the ctx descriptor fields */
- *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- (tx_offload.l2_len >> 1) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
- } else
+ else
*td_offset |= (tx_offload.l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
}
}
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
-}
-
/* Construct the tx flags */
static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "I40E_MAX_RING_DESC=%d, "
- "RTE_PMD_I40E_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, I40E_MAX_RING_DESC,
- RTE_PMD_I40E_RX_MAX_BURST);
- ret = -EINVAL;
}
#else
ret = -EINVAL;
int32_t s[I40E_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
I40E_RXD_QW1_STATUS_SHIFT;
}
+ rte_smp_rmb();
+
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
mb->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
/* Update rx tail regsiter */
rte_wmb();
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
return nb_rx;
}
+#else
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
uint16_t
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
uint16_t rx_id, nb_hold;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl;
nb_rx = 0;
nb_hold = 0;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
- nmb = rte_rxmbuf_alloc(rxq->mp);
- if (unlikely(!nmb))
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
rxm->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
- nmb = rte_rxmbuf_alloc(rxq->mp);
- if (unlikely(!nmb))
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
first_seg->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->hash.rss =
+ first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
- pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
#ifdef RTE_LIBRTE_IEEE1588
pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT;
+ PKT_TX_QINQ_PKT |
+ PKT_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
#endif
- return ((flags & mask) ? 1 : 0);
+ return (flags & mask) ? 1 : 0;
}
/* set i40e TSO context descriptor */
}
/**
- * in case of tunneling packet, the outer_l2_len and
+ * in case of non tunneling packet, the outer_l2_len and
* outer_l3_len must be 0.
*/
hdr_len = tx_offload.outer_l2_len +
uint16_t nb_tx;
uint32_t td_cmd;
uint32_t td_offset;
- uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
uint16_t nb_used;
td_cmd = 0;
td_tag = 0;
td_offset = 0;
- tx_flags = 0;
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- tx_flags |= tx_pkt->vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
- tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ td_tag = tx_pkt->vlan_tci;
}
/* Always enable CRC offload insertion */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Enable checksum offloading */
+ /* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- tx_offload, &cd_tunneling_params);
- }
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
+ i40e_parse_tunneling_params(ol_flags, tx_offload,
+ &cd_tunneling_params);
+ /* Enable checksum offloading */
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
+ i40e_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
if (nb_ctx) {
/* Setup TX context descriptor if required */
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
+ if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
{
uint64_t dma_addr;
- dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
/* Update the tx tail register */
rte_wmb();
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
return nb_tx;
}
+static uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Check for m->nb_segs to not exceed the limits. */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+ (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
- }
+ } else
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
rxq = dev->data->rx_queues[rx_queue_id];
/*
- * rx_queue_id is queue id aplication refers to, while
+ * rx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
}
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* txq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
i40e_tx_queue_release_mbufs(txq);
i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
}
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to i40e_rxd_pkt_type_mapping() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
+ return ptypes;
+ return NULL;
+}
+
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+ if ((requested & dev_info.rx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
- uint16_t len;
- int use_def_burst_func = 1;
-
- if (hw->mac.type == I40E_MAC_VF) {
- struct i40e_vf *vf =
- I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t len, i;
+ uint16_t reg_idx, base, bsf, tc_mapping;
+ int q_offset, use_def_burst_func = 1;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+ return -ENOTSUP;
+ }
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
- if (vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI not available or queue "
- "index exceeds the maximum");
- return I40E_ERR_PARAM;
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
}
- if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
"invalid", nb_desc);
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/* Free memory if needed */
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"rx queue data structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mp = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF)
- rxq->reg_idx = queue_idx;
- else /* PF device */
- rxq->reg_idx = vsi->base_queue +
- i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+ rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
- ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
- ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "rx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ len = I40E_MAX_RING_DESC;
+
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
-#ifdef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_rx_queue(rxq);
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
- if (!use_def_burst_func && !dev->data->scattered_rx) {
+ if (!use_def_burst_func) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ rxq->dcb_tc = i;
}
return 0;
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
return 0;
}
return ret;
}
+int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+ << I40E_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static int
+i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported; /* All per port offloads */
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa;
+ if ((requested & dev_info.tx_offload_capa) != requested)
+ return 0; /* requested range check */
+ return !((mandatory ^ requested) & supported);
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
struct i40e_tx_queue *txq;
const struct rte_memzone *tz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
-
- if (hw->mac.type == I40E_MAC_VF) {
- struct i40e_vf *vf =
- I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t reg_idx, i, base, bsf, tc_mapping;
+ int q_offset;
+ struct rte_eth_dev_info dev_info;
+
+ if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ dev_info.tx_offload_capa); }
+
+ if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
- if (vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
- "exceeds the maximum", queue_idx);
- return I40E_ERR_PARAM;
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
}
- if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
"invalid", nb_desc);
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/**
return I40E_ERR_PARAM;
}
if (tx_free_thresh >= (nb_desc - 3)) {
- PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
- "tx_free_thresh must be less than the "
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
"(tx_free_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"tx queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "tx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF)
- txq->reg_idx = queue_idx;
- else /* PF device */
- txq->reg_idx = vsi->base_queue +
- i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+ txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
/* Use a simple TX queue without offloads or multi segs if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts;
+ i40e_set_tx_function_flag(dev, txq);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->enabled_tc & (1 << i)))
+ continue;
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+ if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+ txq->dcb_tc = i;
}
return 0;
rte_free(q);
}
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, I40E_ALIGN);
-#endif
-}
-
const struct rte_memzone *
i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
- const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *mz;
mz = rte_memzone_lookup(name);
if (mz)
return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_ALIGN);
-#endif
+
+ mz = rte_memzone_reserve_aligned(name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN);
return mz;
}
{
uint16_t i;
- if (!rxq || !rxq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
return;
}
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
void
i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
{
+ struct rte_eth_dev *dev;
uint16_t i;
+ dev = &rte_eth_devices[txq->port_id];
+
if (!txq || !txq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
}
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_ring[i].mbuf) {
+ /**
+ * vPMD tx will not set sw_ring's mbuf to NULL after free,
+ * so need to free remains more carefully.
+ */
+ if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
+ dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+ i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
+ } else {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
}
}
#ifdef RTE_LIBRTE_IEEE1588
tx_ctx.timesync_ena = 1;
#endif
- tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
if (vsi->type == I40E_VSI_FDIR)
tx_ctx.fd_ena = TRUE;
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union i40e_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!mbuf)) {
PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
int err = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* showiv indicates if inner VLAN is stripped inside of tunnel
+ * packet. When set it to 1, vlan information is stripped from
+ * the inner header, but the hardware does not put it in the
+ * descriptor. So set it zero by default.
+ */
+ rx_ctx.showiv = 0;
rx_ctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
/* Init the RX tail regieter. */
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
i40e_reset_tx_queue(dev->data->tx_queues[i]);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
i40e_reset_rx_queue(dev->data->rx_queues[i]);
}
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
struct i40e_tx_queue *txq;
const struct rte_memzone *tz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
sizeof(struct i40e_tx_queue),
ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "fdir_tx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
sizeof(struct i40e_rx_queue),
ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "fdir_rx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
+ rxq->rx_ring_phys_addr = rz->iova;
+ memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
return I40E_SUCCESS;
}
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst =
+ i40e_recv_scattered_pkts_vec_avx2;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
+#endif
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ } else {
+ ad->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (txq && i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
+void __attribute__((cold))
+i40e_set_default_pctype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ ad->pctypes_mask = 0ULL;
+
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ }
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (ad->pctypes_tbl[i])
+ ad->flow_types_mask |= (1ULL << i);
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+ }
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
uint16_t __attribute__((weak))
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
return 0;
}
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int __attribute__((weak))
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
return -1;
}
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
void __attribute__((weak))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
}
uint16_t __attribute__((weak))
-i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
- struct rte_mbuf __rte_unused **tx_pkts,
- uint16_t __rte_unused nb_pkts)
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
{
return 0;
}