ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
+static inline __m256i
+ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+ __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+ const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
static inline uint16_t
_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
struct rte_mbuf **rx_pkts,
* bit13 is for VLAN indication.
*/
const __m256i flags_mask =
- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
+ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13));
/**
* data to be shuffled by the result of the flags mask shifted by 4
* bits. This gives use the l3_l4 flags.
*/
- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- /* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- /* 2nd 128-bits */
- 0, 0, 0, 0, 0, 0, 0, 0,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ const __m256i l3_l4_flags_shuf =
+ _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
+ PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ /**
+ * second 128-bits
+ * shift right 20 bits to use the low two bits to indicate
+ * outer checksum status
+ * shift right 1 bit to make sure it not exceed 255
+ */
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
- _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD);
+ _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
+ PKT_RX_L4_CKSUM_MASK |
+ PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_OUTER_L4_CKSUM_MASK);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
_mm256_srli_epi32(flag_bits, 4));
l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+ __m256i l4_outer_flags =
+ _mm256_and_si256(l3_l4_flags, l4_outer_mask);
+ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
/* set rss and vlan flags */
const __m256i rss_vlan_flag_bits =
rss_vlan_flag_bits);
/* merge flags */
- const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
- rss_vlan_flags);
+ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ rss_vlan_flags);
+
+ if (rxq->fdir_enabled) {
+ const __m256i fdir_id4_7 =
+ _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+ const __m256i fdir_id0_3 =
+ _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+ const __m256i fdir_id0_7 =
+ _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+ const __m256i fdir_flags =
+ ice_flex_rxd_to_fdir_flags_vec_avx512
+ (fdir_id0_7);
+
+ /* merge with fdir_flags */
+ mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+ /* write to mbuf: have to use scalar store here */
+ rx_pkts[i + 0]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 3);
+
+ rx_pkts[i + 1]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 7);
+
+ rx_pkts[i + 2]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 2);
+
+ rx_pkts[i + 3]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 6);
+
+ rx_pkts[i + 4]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 1);
+
+ rx_pkts[i + 5]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 5);
+
+ rx_pkts[i + 6]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 0);
+
+ rx_pkts[i + 7]->hash.fdir.hi =
+ _mm256_extract_epi32(fdir_id0_7, 4);
+ } /* if() on fdir_enabled */
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
/**
rx_pkts + retval, nb_pkts);
}
+static __rte_always_inline int
+ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
+{
+ struct ice_vec_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh - 1)
+ */
+ txep = (void *)txq->sw_ring;
+ txep += txq->tx_next_dd - (n - 1);
+
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
+ struct rte_mempool *mp = txep[0].mbuf->pool;
+ void **cache_objs;
+ struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
+ rte_lcore_id());
+
+ if (!cache || cache->len == 0)
+ goto normal;
+
+ cache_objs = &cache->objs[cache->len];
+
+ if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ goto done;
+ }
+
+ /* The cache follows the following algorithm
+ * 1. Add the objects to the cache
+ * 2. Anything greater than the cache min value (if it
+ * crosses the cache flush threshold) is flushed to the ring.
+ */
+ /* Add elements back into the cache */
+ uint32_t copied = 0;
+ /* n is multiple of 32 */
+ while (copied < n) {
+ const __m512i a = _mm512_loadu_si512(&txep[copied]);
+ const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
+ const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
+ const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
+
+ _mm512_storeu_si512(&cache_objs[copied], a);
+ _mm512_storeu_si512(&cache_objs[copied + 8], b);
+ _mm512_storeu_si512(&cache_objs[copied + 16], c);
+ _mm512_storeu_si512(&cache_objs[copied + 24], d);
+ copied += 32;
+ }
+ cache->len += n;
+
+ if (cache->len >= cache->flushthresh) {
+ rte_mempool_ops_enqueue_bulk
+ (mp, &cache->objs[cache->size],
+ cache->len - cache->size);
+ cache->len = cache->size;
+ }
+ goto done;
+ }
+
+normal:
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+done:
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
static inline void
ice_vtx1(volatile struct ice_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
((uint64_t)flags << ICE_TXD_QW1_CMD_S));
- /* if unaligned on 32-bit boundary, do one to align */
- if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
- ice_vtx1(txdp, *pkt, flags);
- nb_pkts--, txdp++, pkt++;
- }
-
- /* do two at a time while possible, in bursts */
for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
uint64_t hi_qw3 =
hi_qw_tmpl |
((uint64_t)pkt[0]->data_len <<
ICE_TXD_QW1_TX_BUF_SZ_S);
- __m256i desc2_3 =
- _mm256_set_epi64x
+ __m512i desc0_3 =
+ _mm512_set_epi64
(hi_qw3,
pkt[3]->buf_iova + pkt[3]->data_off,
hi_qw2,
- pkt[2]->buf_iova + pkt[2]->data_off);
- __m256i desc0_1 =
- _mm256_set_epi64x
- (hi_qw1,
+ pkt[2]->buf_iova + pkt[2]->data_off,
+ hi_qw1,
pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
pkt[0]->buf_iova + pkt[0]->data_off);
- _mm256_store_si256((void *)(txdp + 2), desc2_3);
- _mm256_store_si256((void *)txdp, desc0_1);
+ _mm512_storeu_si512((void *)txdp, desc0_3);
}
/* do any last ones */
}
}
+static __rte_always_inline void
+ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
static inline uint16_t
ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ice_tx_entry *txep;
+ struct ice_vec_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_tx_free_bufs(txq);
+ ice_tx_free_bufs_avx512(txq);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = (void *)txq->sw_ring;
+ txep += tx_id;
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ice_tx_backlog_entry(txep, tx_pkts, n);
+ ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
ice_vtx(txdp, tx_pkts, n - 1, flags);
tx_pkts += (n - 1);
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txdp = txq->tx_ring;
+ txep = (void *)txq->sw_ring;
}
- ice_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
ice_vtx(txdp, tx_pkts, nb_commit, flags);