* @param txq
* Pointer to TX queue structure.
* @param dseg
- * Pointer to buffer descriptor to be writen.
+ * Pointer to buffer descriptor to be written.
* @param pkts
* Pointer to array of packets to be sent.
* @param n
txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
nb_dword_per_wqebb;
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, wqe);
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
return pkts_n;
}
10, 11, 2, 3);
#endif
- /*
- * Not to overflow elts array. Decompress next time after mbuf
- * replenishment.
- */
- if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
- (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
- return;
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
{
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
- __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->hw_timestamp * PKT_RX_TIMESTAMP);
__m128i cv_flags;
const __m128i zero = _mm_setzero_si128();
const __m128i ptype_mask =
(uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
0,
(uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
- (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
+ (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
0);
const __m128i cv_mask =
_mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
const __m128i mbuf_init =
_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
}
elts_idx = rxq->rq_pi & q_mask;
elts = &(*rxq->elts)[elts_idx];
- pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
- (uint16_t)(rxq->rq_ci - rxq->cq_ci));
- /* Not to overflow pkts/elts array. */
- pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
if (!pkts_n)
rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+ if (rxq->hw_timestamp) {
+ pkts[pos]->timestamp =
+ rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos + 1]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);