1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include "otx2_ethdev.h"
10 #define NIX_DESCS_PER_LOOP 4
11 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
12 #define CQE_SZ(x) ((x) * NIX_CQ_ENTRY_SZ)
14 static inline uint16_t
15 nix_rx_nb_pkts(struct otx2_eth_rxq *rxq, const uint64_t wdata,
16 const uint16_t pkts, const uint32_t qmask)
18 uint32_t available = rxq->available;
20 /* Update the available count if cached value is not enough */
21 if (unlikely(available < pkts)) {
22 uint64_t reg, head, tail;
24 /* Use LDADDA version to avoid reorder */
25 reg = otx2_atomic64_add_sync(wdata, rxq->cq_status);
26 /* CQ_OP_STATUS operation error */
27 if (reg & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
28 reg & BIT_ULL(CQ_OP_STAT_CQ_ERR))
32 head = (reg >> 20) & 0xFFFFF;
34 available = tail - head + qmask + 1;
36 available = tail - head;
38 rxq->available = available;
41 return RTE_MIN(pkts, available);
44 static __rte_always_inline uint16_t
45 nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
46 uint16_t pkts, const uint16_t flags)
48 struct otx2_eth_rxq *rxq = rx_queue;
49 const uint64_t mbuf_init = rxq->mbuf_initializer;
50 const void *lookup_mem = rxq->lookup_mem;
51 const uint64_t data_off = rxq->data_off;
52 const uintptr_t desc = rxq->desc;
53 const uint64_t wdata = rxq->wdata;
54 const uint32_t qmask = rxq->qmask;
55 uint16_t packets = 0, nb_pkts;
56 uint32_t head = rxq->head;
57 struct nix_cqe_hdr_s *cq;
58 struct rte_mbuf *mbuf;
60 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
62 while (packets < nb_pkts) {
63 /* Prefetch N desc ahead */
64 rte_prefetch_non_temporal((void *)(desc + (CQE_SZ(head + 2))));
65 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
67 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
69 otx2_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
71 otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags);
72 rx_pkts[packets++] = mbuf;
73 otx2_prefetch_store_keep(mbuf);
79 rxq->available -= nb_pkts;
81 /* Free all the CQs that we've processed */
82 otx2_write64((wdata | nb_pkts), rxq->cq_door);
88 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
89 static uint16_t __rte_noinline __hot \
90 otx2_nix_recv_pkts_ ## name(void *rx_queue, \
91 struct rte_mbuf **rx_pkts, uint16_t pkts) \
93 return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
96 static uint16_t __rte_noinline __hot \
97 otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue, \
98 struct rte_mbuf **rx_pkts, uint16_t pkts) \
100 return nix_recv_pkts(rx_queue, rx_pkts, pkts, \
101 (flags) | NIX_RX_MULTI_SEG_F); \
104 NIX_RX_FASTPATH_MODES
108 pick_rx_func(struct rte_eth_dev *eth_dev,
109 const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
111 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
113 /* [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
114 eth_dev->rx_pkt_burst = rx_burst
115 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
116 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
117 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
118 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
119 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
120 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)];
124 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
126 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
128 const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
129 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
130 [f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_ ## name,
132 NIX_RX_FASTPATH_MODES
136 const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
137 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
138 [f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_mseg_ ## name,
140 NIX_RX_FASTPATH_MODES
144 pick_rx_func(eth_dev, nix_eth_rx_burst);
146 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
147 pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
149 /* Copy multi seg version with no offload for tear down sequence */
150 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151 dev->rx_pkt_burst_no_offload =
152 nix_eth_rx_burst_mseg[0][0][0][0][0][0];