1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #define NIX_RX_OFFLOAD_NONE (0)
11 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
12 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
13 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
14 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
16 /* Flags to control cqe_to_mbuf conversion function.
17 * Defining it from backwards to denote its been
18 * not used as offload flags to pick function
20 #define NIX_RX_MULTI_SEG_F BIT(15)
22 #define CNXK_NIX_CQ_ENTRY_SZ 128
23 #define NIX_DESCS_PER_LOOP 4
24 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
25 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
27 union mbuf_initializer {
37 static __rte_always_inline uint64_t
38 nix_clear_data_off(uint64_t oldval)
40 union mbuf_initializer mbuf_init = {.value = oldval};
42 mbuf_init.fields.data_off = 0;
43 return mbuf_init.value;
46 static __rte_always_inline struct rte_mbuf *
47 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
51 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
52 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
53 return (struct rte_mbuf *)(buff - data_off);
56 static __rte_always_inline uint32_t
57 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
59 const uint16_t *const ptype = lookup_mem;
60 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
61 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
62 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
64 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
67 static __rte_always_inline uint32_t
68 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
70 const uint32_t *const ol_flags =
71 (const uint32_t *)((const uint8_t *)lookup_mem +
74 return ol_flags[(in & 0xfff00000) >> 20];
77 static inline uint64_t
78 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
79 struct rte_mbuf *mbuf)
81 /* There is no separate bit to check match_id
82 * is valid or not? and no flag to identify it is an
83 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
84 * action. The former case addressed through 0 being invalid
85 * value and inc/dec match_id pair when MARK is activated.
86 * The later case addressed through defining
87 * CNXK_FLOW_MARK_DEFAULT as value for
88 * RTE_FLOW_ACTION_TYPE_MARK.
89 * This would translate to not use
90 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
91 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
92 * i.e valid mark_id's are from
93 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
95 if (likely(match_id)) {
96 ol_flags |= PKT_RX_FDIR;
97 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
98 ol_flags |= PKT_RX_FDIR_ID;
99 mbuf->hash.fdir.hi = match_id - 1;
106 static __rte_always_inline void
107 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
110 const rte_iova_t *iova_list;
111 struct rte_mbuf *head;
112 const rte_iova_t *eol;
116 sg = *(const uint64_t *)(rx + 1);
117 nb_segs = (sg >> 48) & 0x3;
118 mbuf->nb_segs = nb_segs;
119 mbuf->data_len = sg & 0xFFFF;
122 eol = ((const rte_iova_t *)(rx + 1) +
123 ((rx->cn9k.desc_sizem1 + 1) << 1));
124 /* Skip SG_S and first IOVA*/
125 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
128 rearm = rearm & ~0xFFFF;
132 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
135 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
137 mbuf->data_len = sg & 0xFFFF;
139 *(uint64_t *)(&mbuf->rearm_data) = rearm;
143 if (!nb_segs && (iova_list + 1 < eol)) {
144 sg = *(const uint64_t *)(iova_list);
145 nb_segs = (sg >> 48) & 0x3;
146 head->nb_segs += nb_segs;
147 iova_list = (const rte_iova_t *)(iova_list + 1);
153 static __rte_always_inline void
154 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
155 struct rte_mbuf *mbuf, const void *lookup_mem,
156 const uint64_t val, const uint16_t flag)
158 const union nix_rx_parse_u *rx =
159 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
160 const uint16_t len = rx->cn9k.pkt_lenm1 + 1;
161 const uint64_t w1 = *(const uint64_t *)rx;
162 uint64_t ol_flags = 0;
164 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
165 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
167 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
168 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
170 mbuf->packet_type = 0;
172 if (flag & NIX_RX_OFFLOAD_RSS_F) {
173 mbuf->hash.rss = tag;
174 ol_flags |= PKT_RX_RSS_HASH;
177 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
178 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
180 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
182 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
184 mbuf->ol_flags = ol_flags;
185 *(uint64_t *)(&mbuf->rearm_data) = val;
188 if (flag & NIX_RX_MULTI_SEG_F) {
189 nix_cqe_xtract_mseg(rx, mbuf, val);
191 mbuf->data_len = len;
196 static inline uint16_t
197 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
198 const uint16_t pkts, const uint32_t qmask)
200 uint32_t available = rxq->available;
202 /* Update the available count if cached value is not enough */
203 if (unlikely(available < pkts)) {
204 uint64_t reg, head, tail;
206 /* Use LDADDA version to avoid reorder */
207 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
208 /* CQ_OP_STATUS operation error */
209 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
210 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
213 tail = reg & 0xFFFFF;
214 head = (reg >> 20) & 0xFFFFF;
216 available = tail - head + qmask + 1;
218 available = tail - head;
220 rxq->available = available;
223 return RTE_MIN(pkts, available);
226 static __rte_always_inline uint16_t
227 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
228 const uint16_t flags)
230 struct cn9k_eth_rxq *rxq = rx_queue;
231 const uint64_t mbuf_init = rxq->mbuf_initializer;
232 const void *lookup_mem = rxq->lookup_mem;
233 const uint64_t data_off = rxq->data_off;
234 const uintptr_t desc = rxq->desc;
235 const uint64_t wdata = rxq->wdata;
236 const uint32_t qmask = rxq->qmask;
237 uint16_t packets = 0, nb_pkts;
238 uint32_t head = rxq->head;
239 struct nix_cqe_hdr_s *cq;
240 struct rte_mbuf *mbuf;
242 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
244 while (packets < nb_pkts) {
245 /* Prefetch N desc ahead */
246 rte_prefetch_non_temporal(
247 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
248 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
250 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
252 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
254 rx_pkts[packets++] = mbuf;
255 roc_prefetch_store_keep(mbuf);
261 rxq->available -= nb_pkts;
263 /* Free all the CQs that we've processed */
264 plt_write64((wdata | nb_pkts), rxq->cq_door);
269 #define RSS_F NIX_RX_OFFLOAD_RSS_F
270 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
271 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
272 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
274 /* [MARK] [CKSUM] [PTYPE] [RSS] */
275 #define NIX_RX_FASTPATH_MODES \
276 R(no_offload, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
277 R(rss, 0, 0, 0, 1, RSS_F) \
278 R(ptype, 0, 0, 1, 0, PTYPE_F) \
279 R(ptype_rss, 0, 0, 1, 1, PTYPE_F | RSS_F) \
280 R(cksum, 0, 1, 0, 0, CKSUM_F) \
281 R(cksum_rss, 0, 1, 0, 1, CKSUM_F | RSS_F) \
282 R(cksum_ptype, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
283 R(cksum_ptype_rss, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
284 R(mark, 1, 0, 0, 0, MARK_F) \
285 R(mark_rss, 1, 0, 0, 1, MARK_F | RSS_F) \
286 R(mark_ptype, 1, 0, 1, 0, MARK_F | PTYPE_F) \
287 R(mark_ptype_rss, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
288 R(mark_cksum, 1, 1, 0, 0, MARK_F | CKSUM_F) \
289 R(mark_cksum_rss, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
290 R(mark_cksum_ptype, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F)\
291 R(mark_cksum_ptype_rss, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
293 #define R(name, f3, f2, f1, f0, flags) \
294 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
295 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
297 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
298 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
300 NIX_RX_FASTPATH_MODES
303 #endif /* __CN9K_RX_H__ */