rxq->rxrearm_start = 0;
return 0;
}
+
+static inline void
+bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
+ struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
+ int nb, i;
+
+ /*
+ * Number of mbufs to allocate must be a multiple of four. The
+ * allocation must not go past the end of the ring.
+ */
+ nb = RTE_MIN(rxq->rxrearm_nb & ~0x3,
+ rxq->nb_rx_desc - rxq->rxrearm_start);
+
+ /* Allocate new mbufs into the software ring. */
+ if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
+
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 4 mbufs per loop. */
+ for (i = 0; i < nb; i += 4) {
+ rxbds[0].address = rte_mbuf_data_iova_default(rx_bufs[0]);
+ rxbds[1].address = rte_mbuf_data_iova_default(rx_bufs[1]);
+ rxbds[2].address = rte_mbuf_data_iova_default(rx_bufs[2]);
+ rxbds[3].address = rte_mbuf_data_iova_default(rx_bufs[3]);
+
+ rxbds += 4;
+ rx_bufs += 4;
+ }
+
+ rxq->rxrearm_start += nb;
+ bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= nb;
+}
#endif /* _BNXT_RXTX_VEC_COMMON_H_ */
* RX Ring handling
*/
-static inline void
-bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
-{
- struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
- struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
- struct rte_mbuf *mb0, *mb1;
- int nb, i;
-
- const uint64x2_t hdr_room = {0, RTE_PKTMBUF_HEADROOM};
- const uint64x2_t addrmask = {0, UINT64_MAX};
-
- /*
- * Number of mbufs to allocate must be a multiple of two. The
- * allocation must not go past the end of the ring.
- */
- nb = RTE_MIN(rxq->rxrearm_nb & ~0x1,
- rxq->nb_rx_desc - rxq->rxrearm_start);
-
- /* Allocate new mbufs into the software ring */
- if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
- rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
-
- return;
- }
-
- /* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < nb; i += 2, rx_bufs += 2) {
- uint64x2_t buf_addr0, buf_addr1;
- uint64x2_t rxbd0, rxbd1;
-
- mb0 = rx_bufs[0];
- mb1 = rx_bufs[1];
-
- /* Load address fields from both mbufs */
- buf_addr0 = vld1q_u64((uint64_t *)&mb0->buf_addr);
- buf_addr1 = vld1q_u64((uint64_t *)&mb1->buf_addr);
-
- /* Load both rx descriptors (preserving some existing fields) */
- rxbd0 = vld1q_u64((uint64_t *)(rxbds + 0));
- rxbd1 = vld1q_u64((uint64_t *)(rxbds + 1));
-
- /* Add default offset to buffer address. */
- buf_addr0 = vaddq_u64(buf_addr0, hdr_room);
- buf_addr1 = vaddq_u64(buf_addr1, hdr_room);
-
- /* Clear all fields except address. */
- buf_addr0 = vandq_u64(buf_addr0, addrmask);
- buf_addr1 = vandq_u64(buf_addr1, addrmask);
-
- /* Clear address field in descriptor. */
- rxbd0 = vbicq_u64(rxbd0, addrmask);
- rxbd1 = vbicq_u64(rxbd1, addrmask);
-
- /* Set address field in descriptor. */
- rxbd0 = vaddq_u64(rxbd0, buf_addr0);
- rxbd1 = vaddq_u64(rxbd1, buf_addr1);
-
- /* Store descriptors to memory. */
- vst1q_u64((uint64_t *)(rxbds++), rxbd0);
- vst1q_u64((uint64_t *)(rxbds++), rxbd1);
- }
-
- rxq->rxrearm_start += nb;
- bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
- if (rxq->rxrearm_start >= rxq->nb_rx_desc)
- rxq->rxrearm_start = 0;
-
- rxq->rxrearm_nb -= nb;
-}
-
static uint32_t
bnxt_parse_pkt_type(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
{
* RX Ring handling
*/
-static inline void
-bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
-{
- struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
- struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
- struct rte_mbuf *mb0, *mb1;
- int nb, i;
-
- const __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 0);
- const __m128i addrmask = _mm_set_epi64x(UINT64_MAX, 0);
-
- /*
- * Number of mbufs to allocate must be a multiple of two. The
- * allocation must not go past the end of the ring.
- */
- nb = RTE_MIN(rxq->rxrearm_nb & ~0x1,
- rxq->nb_rx_desc - rxq->rxrearm_start);
-
- /* Allocate new mbufs into the software ring */
- if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
- rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
-
- return;
- }
-
- /* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < nb; i += 2, rx_bufs += 2) {
- __m128i buf_addr0, buf_addr1;
- __m128i rxbd0, rxbd1;
-
- mb0 = rx_bufs[0];
- mb1 = rx_bufs[1];
-
- /* Load address fields from both mbufs */
- buf_addr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
- buf_addr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
-
- /* Load both rx descriptors (preserving some existing fields) */
- rxbd0 = _mm_loadu_si128((__m128i *)(rxbds + 0));
- rxbd1 = _mm_loadu_si128((__m128i *)(rxbds + 1));
-
- /* Add default offset to buffer address. */
- buf_addr0 = _mm_add_epi64(buf_addr0, hdr_room);
- buf_addr1 = _mm_add_epi64(buf_addr1, hdr_room);
-
- /* Clear all fields except address. */
- buf_addr0 = _mm_and_si128(buf_addr0, addrmask);
- buf_addr1 = _mm_and_si128(buf_addr1, addrmask);
-
- /* Clear address field in descriptor. */
- rxbd0 = _mm_andnot_si128(addrmask, rxbd0);
- rxbd1 = _mm_andnot_si128(addrmask, rxbd1);
-
- /* Set address field in descriptor. */
- rxbd0 = _mm_add_epi64(rxbd0, buf_addr0);
- rxbd1 = _mm_add_epi64(rxbd1, buf_addr1);
-
- /* Store descriptors to memory. */
- _mm_store_si128((__m128i *)(rxbds++), rxbd0);
- _mm_store_si128((__m128i *)(rxbds++), rxbd1);
- }
-
- rxq->rxrearm_start += nb;
- bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
- if (rxq->rxrearm_start >= rxq->nb_rx_desc)
- rxq->rxrearm_start = 0;
-
- rxq->rxrearm_nb -= nb;
-}
-
static __m128i
bnxt_parse_pkt_type(__m128i mm_rxcmp, __m128i mm_rxcmp1)
{