mlx5_rx_poll_len() returns Rx hash result extracted from either mini CQE or
regular CQE. As mini CQE may not have the hash result if configured
otherwise, it shouldn't assume the first DWORD of mini CQE is always hash
result. mlx5_rx_poll_len() is changed to return pointer to the mini CQE if
compressed.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash);
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
static __rte_always_inline uint32_t
rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
static __rte_always_inline uint32_t
rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
* Pointer to RX queue.
* @param cqe
* CQE to process.
* Pointer to RX queue.
* @param cqe
* CQE to process.
- * @param[out] rss_hash
- * Packet RSS Hash result.
+ * @param[out] mcqe
+ * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
+ * written.
*
* @return
* Packet size in bytes (0 if there is none), -1 in case of completion
*
* @return
* Packet size in bytes (0 if there is none), -1 in case of completion
*/
static inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
*/
static inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash)
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
{
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
{
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
+ *mcqe = &(*mc)[zip->ai & 7];
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
}
} else {
len = rte_be_to_cpu_32(cqe->byte_cnt);
}
} else {
len = rte_be_to_cpu_32(cqe->byte_cnt);
- *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
volatile struct mlx5_wqe_data_seg *wqe =
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
volatile struct mlx5_wqe_data_seg *wqe =
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
- uint32_t rss_hash_res = 0;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res;
if (pkt)
NEXT(seg) = rep;
if (pkt)
NEXT(seg) = rep;
}
if (!pkt) {
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
}
if (!pkt) {
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
- &rss_hash_res);
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
if (!len) {
rte_mbuf_raw_free(rep);
break;
if (!len) {
rte_mbuf_raw_free(rep);
break;
pkt = seg;
assert(len >= (rxq->crc_present << 2));
pkt->ol_flags = 0;
pkt = seg;
assert(len >= (rxq->crc_present << 2));
pkt->ol_flags = 0;
+ /* If compressed, take hash result from mini-CQE. */
+ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
+ cqe->rx_hash_res :
+ mcqe->rx_hash_result);
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
uint16_t consumed_strd;
uint32_t offset;
uint32_t byte_cnt;
uint16_t consumed_strd;
uint32_t offset;
uint32_t byte_cnt;
- uint32_t rss_hash_res = 0;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res;
if (strd_idx == strd_n) {
/* Replace WQE only if the buffer is still in use. */
if (strd_idx == strd_n) {
/* Replace WQE only if the buffer is still in use. */
buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
}
cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
}
cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
- ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &rss_hash_res);
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
if (!ret)
break;
if (unlikely(ret == -1)) {
if (!ret)
break;
if (unlikely(ret == -1)) {
+ /* If compressed, take hash result from mini-CQE. */
+ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
+ cqe->rx_hash_res :
+ mcqe->rx_hash_result);
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
PKT_LEN(pkt) = len;
DATA_LEN(pkt) = len;
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
PKT_LEN(pkt) = len;
DATA_LEN(pkt) = len;