const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD);
+ PKT_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags =
_mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD |
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
/* and with mask to extract bits, flipping 1-0 */
__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
/* the staterr values are not in order, as the count
- * count of dd bits doesn't care. However, for end of
+ * of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_tx_free_bufs(txq);
+ ice_tx_free_bufs_vec(txq);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
nb_commit = nb_pkts;