nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
- ol_flags0 = PKT_RX_RSS_HASH;
- ol_flags1 = PKT_RX_RSS_HASH;
- ol_flags2 = PKT_RX_RSS_HASH;
- ol_flags3 = PKT_RX_RSS_HASH;
+ ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0; ol_flags1 = 0;
ol_flags2 = 0; ol_flags3 = 0;
vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
+ /* Update that no more segments */
+ mbuf0->next = NULL;
+ mbuf1->next = NULL;
+ mbuf2->next = NULL;
+ mbuf3->next = NULL;
+
/* Store the mbufs to rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
otx2_prefetch_store_keep(mbuf3);
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
- __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
- __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
- __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
/* Advance head pointer and packets */
head += NIX_DESCS_PER_LOOP; head &= qmask;
rxq->head = head;
rxq->available -= packets;
- rte_cio_wmb();
+ rte_io_wmb();
/* Free all the CQs that we've processed */
otx2_write64((rxq->wdata | packets), rxq->cq_door);
/* For PTP enabled, scalar rx function should be chosen as most of the
* PTP apps are implemented to rx burst 1 pkt.
*/
- if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
pick_rx_func(eth_dev, nix_eth_rx_burst);
else
pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
/* Copy multi seg version with no offload for tear down sequence */