X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=bc660596b0751116f09473673254550f00f6f89b;hb=4205da3a470974b879d05d337fff2e8bdfec1f70;hp=3a7b68e782c01024622d6bca7e8a34cde4737c19;hpb=9df0826a42e890006c77861c74b79907f33b8201;p=dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 3a7b68e782..bc660596b0 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -46,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -99,16 +70,12 @@ #define I40E_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK) -static uint16_t i40e_xmit_pkts_simple(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); - static inline void i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) { if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { - mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", @@ -589,7 +556,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) mb->nb_segs = 1; mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(\ - rte_mbuf_data_dma_addr_default(mb)); + rte_mbuf_data_iova_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -752,7 +719,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -869,7 +836,7 @@ i40e_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); /* Set data buffer address and data length of the mbuf */ rxdp->read.hdr_addr = 0; @@ -1202,7 +1169,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Setup TX Descriptor */ slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" "buf_dma_addr: %#"PRIx64";\n" @@ -1301,7 +1268,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) uint32_t i; for (i = 0; i < 4; i++, txdp++, pkts++) { - dma_addr = rte_mbuf_data_dma_addr(*pkts); + dma_addr = rte_mbuf_data_iova(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -1315,7 +1282,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) { uint64_t dma_addr; - dma_addr = rte_mbuf_data_dma_addr(*pkts); + dma_addr = rte_mbuf_data_iova(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -1473,13 +1440,10 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, m = tx_pkts[i]; ol_flags = m->ol_flags; - /** - * m->nb_segs is uint8_t, so nb_segs is always less than - * I40E_TX_MAX_SEG. - * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG. - */ + /* Check for m->nb_segs to not exceed the limits. */ if (!(ol_flags & PKT_TX_TCP_SEG)) { - if (m->nb_segs > I40E_TX_MAX_MTU_SEG) { + if (m->nb_segs > I40E_TX_MAX_SEG || + m->nb_segs > I40E_TX_MAX_MTU_SEG) { rte_errno = -EINVAL; return i; } @@ -1721,11 +1685,27 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) #endif dev->rx_pkt_burst == i40e_recv_scattered_pkts || dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec) + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2) return ptypes; return NULL; } +static int +i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + struct rte_eth_dev_info dev_info; + uint64_t mandatory = dev->data->dev_conf.rxmode.offloads; + uint64_t supported; /* All per port offloads */ + + dev->dev_ops->dev_infos_get(dev, &dev_info); + supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa; + if ((requested & dev_info.rx_offload_capa) != requested) + return 0; /* requested range check */ + return !((mandatory ^ requested) & supported); +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1746,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len, i; uint16_t reg_idx, base, bsf, tc_mapping; int q_offset, use_def_burst_func = 1; + struct rte_eth_dev_info dev_info; + + if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) { + dev->dev_ops->dev_infos_get(dev, &dev_info); + PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + dev_info.rx_offload_capa); + return -ENOTSUP; + } if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -1794,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; rxq->rx_deferred_start = rx_conf->rx_deferred_start; @@ -1823,7 +1815,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, /* Zero all the descriptors in the ring. */ memset(rz->addr, 0, ring_size); - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (union i40e_rx_desc *)rz->addr; len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST); @@ -2006,6 +1998,20 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +static int +i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) +{ + struct rte_eth_dev_info dev_info; + uint64_t mandatory = dev->data->dev_conf.txmode.offloads; + uint64_t supported; /* All per port offloads */ + + dev->dev_ops->dev_infos_get(dev, &dev_info); + supported = dev_info.tx_offload_capa ^ dev_info.tx_queue_offload_capa; + if ((requested & dev_info.tx_offload_capa) != requested) + return 0; /* requested range check */ + return !((mandatory ^ requested) & supported); +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2023,6 +2029,16 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_rs_thresh, tx_free_thresh; uint16_t reg_idx, i, base, bsf, tc_mapping; int q_offset; + struct rte_eth_dev_info dev_info; + + if (!i40e_check_tx_queue_offloads(dev, tx_conf->offloads)) { + dev->dev_ops->dev_infos_get(dev, &dev_info); + PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + dev_info.tx_offload_capa); } if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -2161,7 +2177,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* Allocate software ring */ @@ -2223,12 +2239,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) if (mz) return mz; - if (rte_xen_dom0_supported()) - mz = rte_memzone_reserve_bounded(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M); - else - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN); + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN); return mz; } @@ -2309,18 +2321,41 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) { + struct rte_eth_dev *dev; uint16_t i; + dev = &rte_eth_devices[txq->port_id]; + if (!txq || !txq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; } - for (i = 0; i < txq->nb_tx_desc; i++) { - if (txq->sw_ring[i].mbuf) { + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 || + dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } + } else { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } } } @@ -2433,7 +2468,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.pkt_addr = dma_addr; @@ -2484,7 +2519,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); - if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " @@ -2677,7 +2712,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) txq->reg_idx = pf->fdir.fdir_vsi->base_queue; txq->vsi = pf->fdir.fdir_vsi; - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* * don't need to allocate software ring and reset for the fdir @@ -2733,7 +2768,8 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; rxq->vsi = pf->fdir.fdir_vsi; - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; + memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc)); rxq->rx_ring = (union i40e_rx_desc *)rz->addr; /* @@ -2761,6 +2797,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -2824,6 +2861,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->data->port_id); dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->rx_pkt_burst = + i40e_recv_scattered_pkts_vec_avx2; +#endif } else { PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", @@ -2843,6 +2891,16 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->data->port_id); dev->rx_pkt_burst = i40e_recv_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2; +#endif } else if (ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " @@ -2863,7 +2921,9 @@ i40e_set_rx_function(struct rte_eth_dev *dev) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rx_using_sse = (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec); + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2); for (i = 0; i < dev->data->nb_rx_queues; i++) { struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; @@ -2920,6 +2980,16 @@ i40e_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2; +#endif } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_simple; @@ -2943,6 +3013,64 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev) ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); } +void __attribute__((cold)) +i40e_set_default_pctype_table(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_tbl[i] = 0ULL; + ad->flow_types_mask = 0ULL; + ad->pctypes_mask = 0ULL; + + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = + (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); + + if (hw->mac.type == I40E_MAC_X722) { + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + } + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) { + if (ad->pctypes_tbl[i]) + ad->flow_types_mask |= (1ULL << i); + ad->pctypes_mask |= ad->pctypes_tbl[i]; + } +} + /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ int __attribute__((weak)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) @@ -2968,6 +3096,22 @@ i40e_recv_scattered_pkts_vec( return 0; } +uint16_t __attribute__((weak)) +i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + int __attribute__((weak)) i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) { @@ -2993,3 +3137,11 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, { return 0; } + +uint16_t __attribute__((weak)) +i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +}