1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #define NIX_TX_OFFLOAD_NONE (0)
9 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
10 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
11 #define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
12 #define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
13 #define NIX_TX_OFFLOAD_TSTAMP_F BIT(4)
15 /* Flags to control xmit_prepare function.
16 * Defining it from backwards to denote its been
17 * not used as offload flags to pick function
19 #define NIX_TX_MULTI_SEG_F BIT(15)
21 #define NIX_TX_NEED_SEND_HDR_W1 \
22 (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
23 NIX_TX_OFFLOAD_VLAN_QINQ_F)
25 #define NIX_TX_NEED_EXT_HDR \
26 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F)
28 /* Function to determine no of tx subdesc required in case ext
29 * sub desc is enabled.
31 static __rte_always_inline int
32 otx2_nix_tx_ext_subs(const uint16_t flags)
34 return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 2 :
35 ((flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) ? 1 : 0);
38 static __rte_always_inline void
39 otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
40 const uint64_t ol_flags, const uint16_t no_segdw,
43 if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
44 struct nix_send_mem_s *send_mem;
45 uint16_t off = (no_segdw - 1) << 1;
46 const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
48 send_mem = (struct nix_send_mem_s *)(cmd + off);
49 if (flags & NIX_TX_MULTI_SEG_F) {
50 /* Retrieving the default desc values */
51 cmd[off] = send_mem_desc[6];
53 /* Using compiler barier to avoid voilation of C
56 rte_compiler_barrier();
59 /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
60 * should not be recorded, hence changing the alg type to
61 * NIX_SENDMEMALG_SET and also changing send mem addr field to
62 * next 8 bytes as it corrpt the actual tx tstamp registered
65 send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
67 send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] +
72 static __rte_always_inline uint64_t
73 otx2_pktmbuf_detach(struct rte_mbuf *m)
75 struct rte_mempool *mp = m->pool;
76 uint32_t mbuf_size, buf_len;
81 /* Update refcount of direct mbuf */
82 md = rte_mbuf_from_indirect(m);
83 refcount = rte_mbuf_refcnt_update(md, -1);
85 priv_size = rte_pktmbuf_priv_size(mp);
86 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
87 buf_len = rte_pktmbuf_data_room_size(mp);
89 m->priv_size = priv_size;
90 m->buf_addr = (char *)m + mbuf_size;
91 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
92 m->buf_len = (uint16_t)buf_len;
93 rte_pktmbuf_reset_headroom(m);
99 /* Now indirect mbuf is safe to free */
103 rte_mbuf_refcnt_set(md, 1);
114 static __rte_always_inline uint64_t
115 otx2_nix_prefree_seg(struct rte_mbuf *m)
117 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
118 if (!RTE_MBUF_DIRECT(m))
119 return otx2_pktmbuf_detach(m);
124 } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
125 if (!RTE_MBUF_DIRECT(m))
126 return otx2_pktmbuf_detach(m);
128 rte_mbuf_refcnt_set(m, 1);
134 /* Mbuf is having refcount more than 1 so need not to be freed */
139 otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
141 struct nix_send_ext_s *send_hdr_ext;
142 struct nix_send_hdr_s *send_hdr;
143 uint64_t ol_flags = 0, mask;
144 union nix_send_hdr_w1_u w1;
145 union nix_send_sg_s *sg;
147 send_hdr = (struct nix_send_hdr_s *)cmd;
148 if (flags & NIX_TX_NEED_EXT_HDR) {
149 send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
150 sg = (union nix_send_sg_s *)(cmd + 4);
151 /* Clear previous markings */
152 send_hdr_ext->w0.lso = 0;
153 send_hdr_ext->w1.u = 0;
155 sg = (union nix_send_sg_s *)(cmd + 2);
158 if (flags & NIX_TX_NEED_SEND_HDR_W1) {
159 ol_flags = m->ol_flags;
163 if (!(flags & NIX_TX_MULTI_SEG_F)) {
164 send_hdr->w0.total = m->data_len;
166 npa_lf_aura_handle_to_aura(m->pool->pool_id);
171 * 3 => IPV4 with csum
173 * L3type and L3ptr needs to be set for either
174 * L3 csum or L4 csum or LSO
178 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
179 (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
180 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
181 const uint8_t ol3type =
182 ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
183 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
184 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
187 w1.ol3type = ol3type;
188 mask = 0xffffull << ((!!ol3type) << 4);
189 w1.ol3ptr = ~mask & m->outer_l2_len;
190 w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
193 w1.ol4type = csum + (csum << 1);
196 w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
197 ((!!(ol_flags & PKT_TX_IPV6)) << 2);
198 w1.il3ptr = w1.ol4ptr + m->l2_len;
199 w1.il4ptr = w1.il3ptr + m->l3_len;
200 /* Increment it by 1 if it is IPV4 as 3 is with csum */
201 w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
204 w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
206 /* In case of no tunnel header use only
207 * shift IL3/IL4 fields a bit to use
208 * OL3/OL4 for header checksum
211 w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
212 ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
214 } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
215 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
216 const uint8_t outer_l2_len = m->outer_l2_len;
219 w1.ol3ptr = outer_l2_len;
220 w1.ol4ptr = outer_l2_len + m->outer_l3_len;
221 /* Increment it by 1 if it is IPV4 as 3 is with csum */
222 w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
223 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
224 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
227 w1.ol4type = csum + (csum << 1);
229 } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
230 const uint8_t l2_len = m->l2_len;
232 /* Always use OLXPTR and OLXTYPE when only
233 * when one header is present
238 w1.ol4ptr = l2_len + m->l3_len;
239 /* Increment it by 1 if it is IPV4 as 3 is with csum */
240 w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
241 ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
242 !!(ol_flags & PKT_TX_IP_CKSUM);
245 w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
248 if (flags & NIX_TX_NEED_EXT_HDR &&
249 flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
250 send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
251 /* HW will update ptr after vlan0 update */
252 send_hdr_ext->w1.vlan1_ins_ptr = 12;
253 send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
255 send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
256 /* 2B before end of l2 header */
257 send_hdr_ext->w1.vlan0_ins_ptr = 12;
258 send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
261 if (flags & NIX_TX_NEED_SEND_HDR_W1)
262 send_hdr->w1.u = w1.u;
264 if (!(flags & NIX_TX_MULTI_SEG_F)) {
265 sg->seg1_size = m->data_len;
266 *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
268 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
269 /* DF bit = 1 if refcount of current mbuf or parent mbuf
271 * DF bit = 0 otherwise
273 send_hdr->w0.df = otx2_nix_prefree_seg(m);
275 /* Mark mempool object as "put" since it is freed by NIX */
276 if (!send_hdr->w0.df)
277 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
282 static __rte_always_inline void
283 otx2_nix_xmit_one(uint64_t *cmd, void *lmt_addr,
284 const rte_iova_t io_addr, const uint32_t flags)
289 otx2_lmt_mov(lmt_addr, cmd, otx2_nix_tx_ext_subs(flags));
290 lmt_status = otx2_lmt_submit(io_addr);
291 } while (lmt_status == 0);
294 static __rte_always_inline uint16_t
295 otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
297 struct nix_send_hdr_s *send_hdr;
298 union nix_send_sg_s *sg;
299 struct rte_mbuf *m_next;
300 uint64_t *slist, sg_u;
305 send_hdr = (struct nix_send_hdr_s *)cmd;
306 send_hdr->w0.total = m->pkt_len;
307 send_hdr->w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
309 if (flags & NIX_TX_NEED_EXT_HDR)
314 sg = (union nix_send_sg_s *)&cmd[2 + off];
315 /* Clear sg->u header before use */
316 sg->u &= 0xFC00000000000000;
318 slist = &cmd[3 + off];
321 nb_segs = m->nb_segs;
323 /* Fill mbuf segments */
326 sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
327 *slist = rte_mbuf_data_iova(m);
328 /* Set invert df if buffer is not to be freed by H/W */
329 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
330 sg_u |= (otx2_nix_prefree_seg(m) << (i + 55));
331 /* Mark mempool object as "put" since it is freed by NIX */
332 if (!(sg_u & (1ULL << (i + 55)))) {
334 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
339 if (i > 2 && nb_segs) {
341 /* Next SG subdesc */
342 *(uint64_t *)slist = sg_u & 0xFC00000000000000;
345 sg = (union nix_send_sg_s *)slist;
354 segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
355 /* Roundup extra dwords to multiple of 2 */
356 segdw = (segdw >> 1) + (segdw & 0x1);
358 segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
359 send_hdr->w0.sizem1 = segdw - 1;
364 static __rte_always_inline void
365 otx2_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr,
366 rte_iova_t io_addr, uint16_t segdw)
371 otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
372 lmt_status = otx2_lmt_submit(io_addr);
373 } while (lmt_status == 0);
376 #define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
377 #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
378 #define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
379 #define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
380 #define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
382 /* [TSTMP] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
383 #define NIX_TX_FASTPATH_MODES \
384 T(no_offload, 0, 0, 0, 0, 0, 4, \
385 NIX_TX_OFFLOAD_NONE) \
386 T(l3l4csum, 0, 0, 0, 0, 1, 4, \
388 T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
390 T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
391 OL3OL4CSUM_F | L3L4CSUM_F) \
392 T(vlan, 0, 0, 1, 0, 0, 6, \
394 T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
395 VLAN_F | L3L4CSUM_F) \
396 T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
397 VLAN_F | OL3OL4CSUM_F) \
398 T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
399 VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
400 T(noff, 0, 1, 0, 0, 0, 4, \
402 T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
403 NOFF_F | L3L4CSUM_F) \
404 T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
405 NOFF_F | OL3OL4CSUM_F) \
406 T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
407 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
408 T(noff_vlan, 0, 1, 1, 0, 0, 6, \
410 T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
411 NOFF_F | VLAN_F | L3L4CSUM_F) \
412 T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
413 NOFF_F | VLAN_F | OL3OL4CSUM_F) \
414 T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
415 NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
416 T(ts, 1, 0, 0, 0, 0, 8, \
418 T(ts_l3l4csum, 1, 0, 0, 0, 1, 8, \
419 TSP_F | L3L4CSUM_F) \
420 T(ts_ol3ol4csum, 1, 0, 0, 1, 0, 8, \
421 TSP_F | OL3OL4CSUM_F) \
422 T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 8, \
423 TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
424 T(ts_vlan, 1, 0, 1, 0, 0, 8, \
426 T(ts_vlan_l3l4csum, 1, 0, 1, 0, 1, 8, \
427 TSP_F | VLAN_F | L3L4CSUM_F) \
428 T(ts_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 8, \
429 TSP_F | VLAN_F | OL3OL4CSUM_F) \
430 T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 8, \
431 TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
432 T(ts_noff, 1, 1, 0, 0, 0, 8, \
434 T(ts_noff_l3l4csum, 1, 1, 0, 0, 1, 8, \
435 TSP_F | NOFF_F | L3L4CSUM_F) \
436 T(ts_noff_ol3ol4csum, 1, 1, 0, 1, 0, 8, \
437 TSP_F | NOFF_F | OL3OL4CSUM_F) \
438 T(ts_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 8, \
439 TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
440 T(ts_noff_vlan, 1, 1, 1, 0, 0, 8, \
441 TSP_F | NOFF_F | VLAN_F) \
442 T(ts_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 8, \
443 TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
444 T(ts_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 8, \
445 TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
446 T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 8, \
447 TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
449 #endif /* __OTX2_TX_H__ */