1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #define NIX_TX_OFFLOAD_NONE (0)
9 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
10 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
11 #define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
12 #define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
13 #define NIX_TX_OFFLOAD_TSTAMP_F BIT(4)
15 /* Flags to control xmit_prepare function.
16 * Defining it from backwards to denote its been
17 * not used as offload flags to pick function
19 #define NIX_TX_MULTI_SEG_F BIT(15)
21 #define NIX_TX_NEED_SEND_HDR_W1 \
22 (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
23 NIX_TX_OFFLOAD_VLAN_QINQ_F)
25 #define NIX_TX_NEED_EXT_HDR \
26 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F)
28 /* Function to determine no of tx subdesc required in case ext
29 * sub desc is enabled.
31 static __rte_always_inline int
32 otx2_nix_tx_ext_subs(const uint16_t flags)
34 return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 2 :
35 ((flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) ? 1 : 0);
38 static __rte_always_inline void
39 otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
40 const uint64_t ol_flags, const uint16_t no_segdw,
43 if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
44 struct nix_send_mem_s *send_mem;
45 uint16_t off = (no_segdw - 1) << 1;
47 send_mem = (struct nix_send_mem_s *)(cmd + off);
48 if (flags & NIX_TX_MULTI_SEG_F)
49 /* Retrieving the default desc values */
50 cmd[off] = send_mem_desc[6];
52 /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
53 * should not be updated at tx tstamp registered address, rather
54 * a dummy address which is eight bytes ahead would be updated
56 send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] +
57 !(ol_flags & PKT_TX_IEEE1588_TMST));
62 otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
64 struct nix_send_ext_s *send_hdr_ext;
65 struct nix_send_hdr_s *send_hdr;
66 uint64_t ol_flags = 0, mask;
67 union nix_send_hdr_w1_u w1;
68 union nix_send_sg_s *sg;
70 send_hdr = (struct nix_send_hdr_s *)cmd;
71 if (flags & NIX_TX_NEED_EXT_HDR) {
72 send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
73 sg = (union nix_send_sg_s *)(cmd + 4);
74 /* Clear previous markings */
75 send_hdr_ext->w0.lso = 0;
76 send_hdr_ext->w1.u = 0;
78 sg = (union nix_send_sg_s *)(cmd + 2);
81 if (flags & NIX_TX_NEED_SEND_HDR_W1) {
82 ol_flags = m->ol_flags;
86 if (!(flags & NIX_TX_MULTI_SEG_F)) {
87 send_hdr->w0.total = m->data_len;
89 npa_lf_aura_handle_to_aura(m->pool->pool_id);
96 * L3type and L3ptr needs to be set for either
97 * L3 csum or L4 csum or LSO
101 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
102 (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
103 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
104 const uint8_t ol3type =
105 ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
106 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
107 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
110 w1.ol3type = ol3type;
111 mask = 0xffffull << ((!!ol3type) << 4);
112 w1.ol3ptr = ~mask & m->outer_l2_len;
113 w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
116 w1.ol4type = csum + (csum << 1);
119 w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
120 ((!!(ol_flags & PKT_TX_IPV6)) << 2);
121 w1.il3ptr = w1.ol4ptr + m->l2_len;
122 w1.il4ptr = w1.il3ptr + m->l3_len;
123 /* Increment it by 1 if it is IPV4 as 3 is with csum */
124 w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
127 w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
129 /* In case of no tunnel header use only
130 * shift IL3/IL4 fields a bit to use
131 * OL3/OL4 for header checksum
134 w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
135 ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
137 } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
138 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
139 const uint8_t outer_l2_len = m->outer_l2_len;
142 w1.ol3ptr = outer_l2_len;
143 w1.ol4ptr = outer_l2_len + m->outer_l3_len;
144 /* Increment it by 1 if it is IPV4 as 3 is with csum */
145 w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
146 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
147 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
150 w1.ol4type = csum + (csum << 1);
152 } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
153 const uint8_t l2_len = m->l2_len;
155 /* Always use OLXPTR and OLXTYPE when only
156 * when one header is present
161 w1.ol4ptr = l2_len + m->l3_len;
162 /* Increment it by 1 if it is IPV4 as 3 is with csum */
163 w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
164 ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
165 !!(ol_flags & PKT_TX_IP_CKSUM);
168 w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
171 if (flags & NIX_TX_NEED_EXT_HDR &&
172 flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
173 send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
174 /* HW will update ptr after vlan0 update */
175 send_hdr_ext->w1.vlan1_ins_ptr = 12;
176 send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
178 send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
179 /* 2B before end of l2 header */
180 send_hdr_ext->w1.vlan0_ins_ptr = 12;
181 send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
184 if (flags & NIX_TX_NEED_SEND_HDR_W1)
185 send_hdr->w1.u = w1.u;
187 if (!(flags & NIX_TX_MULTI_SEG_F)) {
188 sg->seg1_size = m->data_len;
189 *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
191 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
192 /* Set don't free bit if reference count > 1 */
193 if (rte_pktmbuf_prefree_seg(m) == NULL)
194 send_hdr->w0.df = 1; /* SET DF */
196 /* Mark mempool object as "put" since it is freed by NIX */
197 if (!send_hdr->w0.df)
198 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
203 static __rte_always_inline void
204 otx2_nix_xmit_one(uint64_t *cmd, void *lmt_addr,
205 const rte_iova_t io_addr, const uint32_t flags)
210 otx2_lmt_mov(lmt_addr, cmd, otx2_nix_tx_ext_subs(flags));
211 lmt_status = otx2_lmt_submit(io_addr);
212 } while (lmt_status == 0);
215 static __rte_always_inline uint16_t
216 otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
218 struct nix_send_hdr_s *send_hdr;
219 union nix_send_sg_s *sg;
220 struct rte_mbuf *m_next;
221 uint64_t *slist, sg_u;
226 send_hdr = (struct nix_send_hdr_s *)cmd;
227 send_hdr->w0.total = m->pkt_len;
228 send_hdr->w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
230 if (flags & NIX_TX_NEED_EXT_HDR)
235 sg = (union nix_send_sg_s *)&cmd[2 + off];
237 slist = &cmd[3 + off];
240 nb_segs = m->nb_segs;
242 /* Fill mbuf segments */
245 sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
246 *slist = rte_mbuf_data_iova(m);
247 /* Set invert df if reference count > 1 */
248 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
250 ((uint64_t)(rte_pktmbuf_prefree_seg(m) == NULL) <<
252 /* Mark mempool object as "put" since it is freed by NIX */
253 if (!(sg_u & (1ULL << (i + 55)))) {
255 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
260 if (i > 2 && nb_segs) {
262 /* Next SG subdesc */
263 *(uint64_t *)slist = sg_u & 0xFC00000000000000;
266 sg = (union nix_send_sg_s *)slist;
275 segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
276 /* Roundup extra dwords to multiple of 2 */
277 segdw = (segdw >> 1) + (segdw & 0x1);
279 segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
280 send_hdr->w0.sizem1 = segdw - 1;
285 static __rte_always_inline void
286 otx2_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr,
287 rte_iova_t io_addr, uint16_t segdw)
292 otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
293 lmt_status = otx2_lmt_submit(io_addr);
294 } while (lmt_status == 0);
297 #define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
298 #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
299 #define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
300 #define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
301 #define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
303 /* [TSTMP] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
304 #define NIX_TX_FASTPATH_MODES \
305 T(no_offload, 0, 0, 0, 0, 0, 4, \
306 NIX_TX_OFFLOAD_NONE) \
307 T(l3l4csum, 0, 0, 0, 0, 1, 4, \
309 T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
311 T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
312 OL3OL4CSUM_F | L3L4CSUM_F) \
313 T(vlan, 0, 0, 1, 0, 0, 6, \
315 T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
316 VLAN_F | L3L4CSUM_F) \
317 T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
318 VLAN_F | OL3OL4CSUM_F) \
319 T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
320 VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
321 T(noff, 0, 1, 0, 0, 0, 4, \
323 T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
324 NOFF_F | L3L4CSUM_F) \
325 T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
326 NOFF_F | OL3OL4CSUM_F) \
327 T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
328 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
329 T(noff_vlan, 0, 1, 1, 0, 0, 6, \
331 T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
332 NOFF_F | VLAN_F | L3L4CSUM_F) \
333 T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
334 NOFF_F | VLAN_F | OL3OL4CSUM_F) \
335 T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
336 NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
337 T(ts, 1, 0, 0, 0, 0, 8, \
339 T(ts_l3l4csum, 1, 0, 0, 0, 1, 8, \
340 TSP_F | L3L4CSUM_F) \
341 T(ts_ol3ol4csum, 1, 0, 0, 1, 0, 8, \
342 TSP_F | OL3OL4CSUM_F) \
343 T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 8, \
344 TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
345 T(ts_vlan, 1, 0, 1, 0, 0, 8, \
347 T(ts_vlan_l3l4csum, 1, 0, 1, 0, 1, 8, \
348 TSP_F | VLAN_F | L3L4CSUM_F) \
349 T(ts_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 8, \
350 TSP_F | VLAN_F | OL3OL4CSUM_F) \
351 T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 8, \
352 TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
353 T(ts_noff, 1, 1, 0, 0, 0, 8, \
355 T(ts_noff_l3l4csum, 1, 1, 0, 0, 1, 8, \
356 TSP_F | NOFF_F | L3L4CSUM_F) \
357 T(ts_noff_ol3ol4csum, 1, 1, 0, 1, 0, 8, \
358 TSP_F | NOFF_F | OL3OL4CSUM_F) \
359 T(ts_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 8, \
360 TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
361 T(ts_noff_vlan, 1, 1, 1, 0, 0, 8, \
362 TSP_F | NOFF_F | VLAN_F) \
363 T(ts_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 8, \
364 TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
365 T(ts_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 8, \
366 TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
367 T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 8, \
368 TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
370 #endif /* __OTX2_TX_H__ */