mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
- uint16_t nb_rx;
+ uint16_t nb_rx = 0;
+ uint16_t tn = 0;
uint64_t err = 0;
-
- nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
- if (unlikely(err | rxq->err_state))
- nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
- return nb_rx;
+ bool no_cq = false;
+
+ do {
+ nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq);
+ if (unlikely(err | rxq->err_state))
+ nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
+ tn += nb_rx;
+ if (unlikely(no_cq))
+ break;
+ } while (tn != pkts_n);
+ return tn;
}
/**
* @param[out] err
* Pointer to a flag. Set non-zero value if pkts array has at least one error
* packet to handle.
+ * @param[out] no_cq
+ * Pointer to a boolean. Set true if no new CQE seen.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint64_t *err)
+ uint64_t *err, bool *no_cq)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
- if (!pkts_n)
+ if (!pkts_n) {
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
+ }
/* At this point, there shouldn't be any remaining packets. */
MLX5_ASSERT(rxq->decompressed == 0);
break;
}
/* If no new CQE seen, return without updating cq_db. */
- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
+ *no_cq = true;
return rcvd_pkt;
+ }
/* Update the consumer indexes for non-compressed CQEs. */
MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
}
rte_compiler_barrier();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
}
* @param[out] err
* Pointer to a flag. Set non-zero value if pkts array has at least one error
* packet to handle.
+ * @param[out] no_cq
+ * Pointer to a boolean. Set true if no new CQE seen.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint64_t *err)
+ uint64_t *err, bool *no_cq)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
- if (!pkts_n)
+ if (!pkts_n) {
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
+ }
/* At this point, there shouldn't be any remained packets. */
MLX5_ASSERT(rxq->decompressed == 0);
/*
break;
}
/* If no new CQE seen, return without updating cq_db. */
- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
+ *no_cq = true;
return rcvd_pkt;
+ }
/* Update the consumer indexes for non-compressed CQEs. */
MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
}
rte_cio_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
}
* @param[out] err
* Pointer to a flag. Set non-zero value if pkts array has at least one error
* packet to handle.
+ * @param[out] no_cq
+ * Pointer to a boolean. Set true if no new CQE seen.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint64_t *err)
+ uint64_t *err, bool *no_cq)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
- if (!pkts_n)
+ if (!pkts_n) {
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
+ }
/* At this point, there shouldn't be any remained packets. */
MLX5_ASSERT(rxq->decompressed == 0);
/*
break;
}
/* If no new CQE seen, return without updating cq_db. */
- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
+ *no_cq = true;
return rcvd_pkt;
+ }
/* Update the consumer indexes for non-compressed CQEs. */
MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
}
rte_compiler_barrier();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *no_cq = !rcvd_pkt;
return rcvd_pkt;
}