-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "base/i40e_prototype.h"
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+/* SSE version of FDIR mark extraction for 4 32B descriptors at a time */
+static inline __m128i
+descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
+{
+ /* 32B descriptors: Load 2nd half of descriptors for FDIR ID data */
+ __m128i desc0_qw23, desc1_qw23, desc2_qw23, desc3_qw23;
+ desc0_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 0)->wb.qword2);
+ desc1_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 1)->wb.qword2);
+ desc2_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 2)->wb.qword2);
+ desc3_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 3)->wb.qword2);
+
+ /* FDIR ID data: move last u32 of each desc to 4 u32 lanes */
+ __m128i v_unpack_01, v_unpack_23;
+ v_unpack_01 = _mm_unpackhi_epi32(desc0_qw23, desc1_qw23);
+ v_unpack_23 = _mm_unpackhi_epi32(desc2_qw23, desc3_qw23);
+ __m128i v_fdir_ids = _mm_unpackhi_epi64(v_unpack_01, v_unpack_23);
+
+ /* Extended Status: extract from each lower 32 bits, to u32 lanes */
+ v_unpack_01 = _mm_unpacklo_epi32(desc0_qw23, desc1_qw23);
+ v_unpack_23 = _mm_unpacklo_epi32(desc2_qw23, desc3_qw23);
+ __m128i v_flt_status = _mm_unpacklo_epi64(v_unpack_01, v_unpack_23);
+
+ /* Shift u32 left and right to "mask away" bits not required.
+ * Data required is 4:5 (zero based), so left shift by 26 (32-6)
+ * and then right shift by 30 (32 - 2 bits required).
+ */
+ v_flt_status = _mm_slli_epi32(v_flt_status, 26);
+ v_flt_status = _mm_srli_epi32(v_flt_status, 30);
+
+ /* Generate constant 1 in all u32 lanes and compare */
+ RTE_BUILD_BUG_ON(I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID != 1);
+ __m128i v_zeros = _mm_setzero_si128();
+ __m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
+ __m128i v_u32_one = _mm_srli_epi32(v_ffff, 31);
+
+ /* per desc mask, bits set if FDIR ID is valid */
+ __m128i v_fd_id_mask = _mm_cmpeq_epi32(v_flt_status, v_u32_one);
+
+ /* Mask ID data to zero if the FD_ID bit not set in desc */
+ v_fdir_ids = _mm_and_si128(v_fdir_ids, v_fd_id_mask);
+
+ /* Extract and store as u32. No advantage to combining into SSE
+ * stores, there are no surrounding stores to around fdir.hi
+ */
+ rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
+ rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
+ rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
+ rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
+
+ /* convert fdir_id_mask into a single bit, then shift as required for
+ * correct location in the mbuf->olflags
+ */
+ const uint32_t FDIR_ID_BIT_SHIFT = 13;
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
+ v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31);
+ v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, FDIR_ID_BIT_SHIFT);
+
+ /* The returned value must be combined into each mbuf. This is already
+ * being done for RSS and VLAN mbuf olflags, so return bits to OR in.
+ */
+ return v_fd_id_mask;
+}
+
+#else /* 32 or 16B FDIR ID handling */
+
+/* Handle 16B descriptor FDIR ID flag setting based on FLM. See scalar driver
+ * for scalar implementation of the same functionality.
*/
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+static inline __m128i
+descs_to_fdir_16b(__m128i fltstat, __m128i descs[4], struct rte_mbuf **rx_pkt)
+{
+ /* unpack filter-status data from descriptors */
+ __m128i v_tmp_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
+ __m128i v_tmp_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
+ __m128i v_fdir_ids = _mm_unpackhi_epi64(v_tmp_01, v_tmp_23);
+
+ /* Generate one bit in each u32 lane */
+ __m128i v_zeros = _mm_setzero_si128();
+ __m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
+ __m128i v_111_mask = _mm_srli_epi32(v_ffff, 29);
+ __m128i v_11_mask = _mm_srli_epi32(v_ffff, 30);
+
+ /* Top lane ones mask for FDIR isolation */
+ __m128i v_desc_fdir_mask = _mm_insert_epi32(v_zeros, UINT32_MAX, 1);
+
+ /* Compare and mask away FDIR ID data if bit not set */
+ __m128i v_u32_bits = _mm_and_si128(v_111_mask, fltstat);
+ __m128i v_fdir_id_mask = _mm_cmpeq_epi32(v_u32_bits, v_11_mask);
+ v_fdir_ids = _mm_and_si128(v_fdir_id_mask, v_fdir_ids);
+
+ /* Store data to fdir.hi in mbuf */
+ rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
+ rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
+ rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
+ rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
+
+ /* Move fdir_id_mask to correct lane, blend RSS to zero on hits */
+ __m128i v_desc3_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 8);
+ __m128i v_desc3_mask = _mm_and_si128(v_desc_fdir_mask, v_desc3_shift);
+ descs[3] = _mm_blendv_epi8(descs[3], _mm_setzero_si128(), v_desc3_mask);
+
+ __m128i v_desc2_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 4);
+ __m128i v_desc2_mask = _mm_and_si128(v_desc_fdir_mask, v_desc2_shift);
+ descs[2] = _mm_blendv_epi8(descs[2], _mm_setzero_si128(), v_desc2_mask);
+
+ __m128i v_desc1_shift = v_fdir_id_mask;
+ __m128i v_desc1_mask = _mm_and_si128(v_desc_fdir_mask, v_desc1_shift);
+ descs[1] = _mm_blendv_epi8(descs[1], _mm_setzero_si128(), v_desc1_mask);
+
+ __m128i v_desc0_shift = _mm_alignr_epi8(v_fdir_id_mask, v_zeros, 12);
+ __m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift);
+ descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask);
+
+ /* Shift to 1 or 0 bit per u32 lane, then to PKT_RX_FDIR_ID offset */
+ const uint32_t FDIR_ID_BIT_SHIFT = 13;
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
+ __m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31);
+ return _mm_slli_epi32(v_mask_one_bit, FDIR_ID_BIT_SHIFT);
+}
+#endif
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
+ __m128i descs[4], struct rte_mbuf **rx_pkts)
{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
__m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ /* Unpack "status" from quadword 1, bits 0:32 */
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
- rss = _mm_srli_epi32(vlan1, 11);
- rss = _mm_shuffle_epi8(rss_flags, rss);
+ const __m128i desc_fltstat = _mm_srli_epi32(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, desc_fltstat);
l3_l4e = _mm_srli_epi32(vlan1, 22);
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
vlan0 = _mm_or_si128(vlan0, rss);
vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
- rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
- rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
- rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
-}
+ /* Extract FDIR ID only if FDIR is enabled to avoid useless work */
+ if (rxq->fdir_enabled) {
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ __m128i v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+ (void)rxdp; /* rxdp not required for 16B desc mode */
+ __m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
+ descs, rx_pkts);
#endif
+ /* OR in ol_flag bits after descriptor speicific extraction */
+ vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
+ }
+
+ /*
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
#define PKTLEN_SHIFT 10
static inline void
-desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
ptype0 = _mm_srli_epi64(ptype0, 30);
ptype1 = _mm_srli_epi64(ptype1, 30);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
- rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
- rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
- rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+ rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
}
/*
int pos;
uint64_t var;
__m128i shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
0xFF, 0xFF, /* pkt_type set as unknown */
0xFF, 0xFF /*pkt_type set as unknown */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
- /* D.1 pkt 3,4 convert format from desc to pktmbuf */
- pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
- pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
-
/* C.1 4=>2 filter staterr info only */
sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
i++;
if (i == nb_bufs)
return nb_bufs;
+ rxq->pkt_first_seg = rx_pkts[i];
}
return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
&split_flags[i]);
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- /* need SSE4.1 support */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- return -1;
-#endif
-
return i40e_rx_vec_dev_conf_condition_check_default(dev);
}