nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
- ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
- ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
- ol_flags0 = PKT_RX_RSS_HASH;
- ol_flags1 = PKT_RX_RSS_HASH;
- ol_flags2 = PKT_RX_RSS_HASH;
- ol_flags3 = PKT_RX_RSS_HASH;
+ ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
+ ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0; ol_flags1 = 0;
ol_flags2 = 0; ol_flags3 = 0;
vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
+ /* Update that no more segments */
+ mbuf0->next = NULL;
+ mbuf1->next = NULL;
+ mbuf2->next = NULL;
+ mbuf3->next = NULL;
+
/* Store the mbufs to rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
otx2_prefetch_store_keep(mbuf3);
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
- __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
- __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
- __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
/* Advance head pointer and packets */
head += NIX_DESCS_PER_LOOP; head &= qmask;
rxq->head = head;
rxq->available -= packets;
- rte_cio_wmb();
+ rte_io_wmb();
/* Free all the CQs that we've processed */
otx2_write64((rxq->wdata | packets), rxq->cq_door);
#endif
-#define R(name, f5, f4, f3, f2, f1, f0, flags) \
-static uint16_t __rte_noinline __hot \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
} \
\
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
(flags) | NIX_RX_MULTI_SEG_F); \
} \
\
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_vec_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
static inline void
pick_rx_func(struct rte_eth_dev *eth_dev,
- const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
+ const eth_rx_burst_t rx_burst[2][2][2][2][2][2][2])
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
- /* [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
+ /* [SEC] [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
- const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags) \
- [f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_ ## name,
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_ ## name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags) \
- [f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_mseg_ ## name,
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_mseg_ ## name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags) \
- [f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_vec_ ## name,
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_vec_ ## name,
NIX_RX_FASTPATH_MODES
#undef R
/* For PTP enabled, scalar rx function should be chosen as most of the
* PTP apps are implemented to rx burst 1 pkt.
*/
- if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (dev->scalar_ena || dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
pick_rx_func(eth_dev, nix_eth_rx_burst);
else
pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
- if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
/* Copy multi seg version with no offload for tear down sequence */
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
dev->rx_pkt_burst_no_offload =
- nix_eth_rx_burst_mseg[0][0][0][0][0][0];
+ nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
rte_mb();
}