_mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
}
-uint16_t
-bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+recv_burst_vec_sse(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct bnxt_rx_queue *rxq = rx_queue;
const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
bnxt_rxq_rearm(rxq, rxr);
- /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
- nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
-
cons = raw_cons & (cp_ring_size - 1);
mbcons = (raw_cons / 2) & (rx_ring_size - 1);
* maximum number of packets to receive to be a multiple of the per-
* loop count.
*/
- if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
- desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
- else
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
+ if (nb_pkts < BNXT_RX_DESCS_PER_LOOP_VEC128) {
+ desc_valid_mask >>=
+ 16 * (BNXT_RX_DESCS_PER_LOOP_VEC128 - nb_pkts);
+ } else {
+ nb_pkts =
+ RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC128);
+ }
/* Handle RX burst request */
- for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
- cons += RTE_BNXT_DESCS_PER_LOOP * 2,
- mbcons += RTE_BNXT_DESCS_PER_LOOP) {
- __m128i rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
- __m128i rxcmp[RTE_BNXT_DESCS_PER_LOOP];
+ for (i = 0; i < nb_pkts; i += BNXT_RX_DESCS_PER_LOOP_VEC128,
+ cons += BNXT_RX_DESCS_PER_LOOP_VEC128 * 2,
+ mbcons += BNXT_RX_DESCS_PER_LOOP_VEC128) {
+ __m128i rxcmp1[BNXT_RX_DESCS_PER_LOOP_VEC128];
+ __m128i rxcmp[BNXT_RX_DESCS_PER_LOOP_VEC128];
__m128i tmp0, tmp1, info3_v;
uint32_t num_valid;
#endif
/* Prefetch four descriptor pairs for next iteration. */
- if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
+ if (i + BNXT_RX_DESCS_PER_LOOP_VEC128 < nb_pkts) {
rte_prefetch0(&cp_desc_ring[cons + 8]);
rte_prefetch0(&cp_desc_ring[cons + 12]);
}
rxr);
nb_rx_pkts += num_valid;
- if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
+ if (num_valid < BNXT_RX_DESCS_PER_LOOP_VEC128)
break;
}
rxq->rxrearm_nb += nb_rx_pkts;
cpr->cp_raw_cons += 2 * nb_rx_pkts;
- cpr->valid =
- !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
bnxt_db_cq(cpr);
}
return nb_rx_pkts;
}
+uint16_t
+bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t cnt = 0;
+
+ while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
+ uint16_t burst;
+
+ burst = recv_burst_vec_sse(rx_queue, rx_pkts + cnt,
+ RTE_BNXT_MAX_RX_BURST);
+
+ cnt += burst;
+ nb_pkts -= burst;
+
+ if (burst < RTE_BNXT_MAX_RX_BURST)
+ return cnt;
+ }
+
+ return cnt + recv_burst_vec_sse(rx_queue, rx_pkts + cnt, nb_pkts);
+}
+
static void
bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
{
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
raw_cons = NEXT_RAW_CMP(raw_cons);
} while (nb_tx_pkts < ring_mask);
- cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
/* Handle TX burst request */
to_send = nb_pkts;
- while (to_send >= RTE_BNXT_DESCS_PER_LOOP) {
+ while (to_send >= BNXT_TX_DESCS_PER_LOOP) {
/* Prefetch next transmit buffer descriptors. */
rte_prefetch0(txbd + 4);
rte_prefetch0(txbd + 7);
bnxt_xmit_one(tx_pkts[2], txbd++, tx_buf++);
bnxt_xmit_one(tx_pkts[3], txbd++, tx_buf++);
- to_send -= RTE_BNXT_DESCS_PER_LOOP;
- tx_pkts += RTE_BNXT_DESCS_PER_LOOP;
+ to_send -= BNXT_TX_DESCS_PER_LOOP;
+ tx_pkts += BNXT_TX_DESCS_PER_LOOP;
}
while (to_send) {