mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / octeontx / octeontx_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS                                   \
11         uint16_t rx_offload_flags;                      \
12         uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE           (0)
17 #define OCCTX_RX_MULTI_SEG_F            BIT(0)
18 #define OCCTX_RX_OFFLOAD_CSUM_F         BIT(1)
19 #define OCCTX_RX_VLAN_FLTR_F            BIT(2)
20
21 #define OCCTX_TX_OFFLOAD_NONE           (0)
22 #define OCCTX_TX_MULTI_SEG_F            BIT(0)
23 #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(1)
24 #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
25 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
26
27 /* Packet type table */
28 #define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
29
30 /* octeontx send header sub descriptor structure */
31 RTE_STD_C11
32 union octeontx_send_hdr_w0_u {
33         uint64_t u;
34         struct {
35                 uint64_t total   : 16;
36                 uint64_t markptr : 8;
37                 uint64_t l3ptr   : 8;
38                 uint64_t l4ptr   : 8;
39                 uint64_t ii      : 1;
40                 uint64_t shp_dis : 1;
41                 uint64_t ckle    : 1;
42                 uint64_t cklf    : 2;
43                 uint64_t ckl3    : 1;
44                 uint64_t ckl4    : 2;
45                 uint64_t p       : 1;
46                 uint64_t format  : 7;
47                 uint64_t tstamp  : 1;
48                 uint64_t tso_eom : 1;
49                 uint64_t df      : 1;
50                 uint64_t tso     : 1;
51                 uint64_t n2      : 1;
52                 uint64_t scntn1  : 3;
53         };
54 };
55
56 RTE_STD_C11
57 union octeontx_send_hdr_w1_u {
58         uint64_t u;
59         struct {
60                 uint64_t tso_mss : 14;
61                 uint64_t shp_ra  : 2;
62                 uint64_t tso_sb  : 8;
63                 uint64_t leptr   : 8;
64                 uint64_t lfptr   : 8;
65                 uint64_t shp_chg : 9;
66                 uint64_t tso_fn  : 7;
67                 uint64_t l2len   : 8;
68         };
69 };
70
71 struct octeontx_send_hdr_s {
72         union octeontx_send_hdr_w0_u w0;
73         union octeontx_send_hdr_w1_u w1;
74 };
75
76 static const uint32_t __rte_cache_aligned
77 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
78         [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
79         [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
80         [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
81         [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
82         [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
83         [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
84         [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
85         [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
86         [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
87         [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
88
89         [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
90         [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
91                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
92         [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
93         [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
94         [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
95         [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
96         [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
97         [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
98                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
99         [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
100                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
101         [LC_IPV4][LE_NONE][LF_NVGRE] =
102                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
103
104         [LC_IPV4_OPT][LE_NONE][LF_NONE] =
105                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
106         [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
107                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
108         [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
109                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
110         [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
111                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
112         [LC_IPV4_OPT][LE_NONE][LF_TCP] =
113                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
114         [LC_IPV4_OPT][LE_NONE][LF_UDP] =
115                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
116         [LC_IPV4_OPT][LE_NONE][LF_GRE] =
117                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
118         [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
119                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
120         [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
121                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
122         [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
123                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
124
125         [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
126         [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
127                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
128         [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
129         [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
130         [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
131         [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
132         [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
133         [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
134                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
135         [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
136                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
137         [LC_IPV6][LE_NONE][LF_NVGRE] =
138                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
139         [LC_IPV6_OPT][LE_NONE][LF_NONE] =
140                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
141         [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
142                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
143         [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
144                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
145         [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
146                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
147         [LC_IPV6_OPT][LE_NONE][LF_TCP] =
148                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
149         [LC_IPV6_OPT][LE_NONE][LF_UDP] =
150                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
151         [LC_IPV6_OPT][LE_NONE][LF_GRE] =
152                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
153         [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
154                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
155         [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
156                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
157         [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
158                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
159
160 };
161
162
163 static __rte_always_inline uint64_t
164 octeontx_pktmbuf_detach(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
165 {
166         struct rte_mempool *mp = m->pool;
167         uint32_t mbuf_size, buf_len;
168         struct rte_mbuf *md;
169         uint16_t priv_size;
170         uint16_t refcount;
171
172         /* Update refcount of direct mbuf */
173         md = rte_mbuf_from_indirect(m);
174         /* The real data will be in the direct buffer, inform callers this */
175         *m_tofree = md;
176         refcount = rte_mbuf_refcnt_update(md, -1);
177
178         priv_size = rte_pktmbuf_priv_size(mp);
179         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
180         buf_len = rte_pktmbuf_data_room_size(mp);
181
182         m->priv_size = priv_size;
183         m->buf_addr = (char *)m + mbuf_size;
184         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
185         m->buf_len = (uint16_t)buf_len;
186         rte_pktmbuf_reset_headroom(m);
187         m->data_len = 0;
188         m->ol_flags = 0;
189         m->next = NULL;
190         m->nb_segs = 1;
191
192         /* Now indirect mbuf is safe to free */
193         rte_pktmbuf_free(m);
194
195         if (refcount == 0) {
196                 rte_mbuf_refcnt_set(md, 1);
197                 md->data_len = 0;
198                 md->ol_flags = 0;
199                 md->next = NULL;
200                 md->nb_segs = 1;
201                 return 0;
202         } else {
203                 return 1;
204         }
205 }
206
207 static __rte_always_inline uint64_t
208 octeontx_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
209 {
210         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
211                 if (!RTE_MBUF_DIRECT(m))
212                         return octeontx_pktmbuf_detach(m, m_tofree);
213
214                 m->next = NULL;
215                 m->nb_segs = 1;
216                 return 0;
217         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
218                 if (!RTE_MBUF_DIRECT(m))
219                         return octeontx_pktmbuf_detach(m, m_tofree);
220
221                 rte_mbuf_refcnt_set(m, 1);
222                 m->next = NULL;
223                 m->nb_segs = 1;
224                 return 0;
225         }
226
227         /* Mbuf is having refcount more than 1 so need not to be freed */
228         return 1;
229 }
230
231 static __rte_always_inline void
232 octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
233                              struct rte_mbuf *m)
234 {
235         struct octeontx_send_hdr_s *send_hdr =
236                                 (struct octeontx_send_hdr_s *)cmd_buf;
237         uint64_t ol_flags = m->ol_flags;
238
239         /* PKO Checksum L4 Algorithm Enumeration
240          * 0x0 - No checksum
241          * 0x1 - UDP L4 checksum
242          * 0x2 - TCP L4 checksum
243          * 0x3 - SCTP L4 checksum
244          */
245         const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
246                       (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
247                       (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
248
249         const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
250                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
251                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
252                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
253                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
254                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
255                                       !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
256
257         const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
258                                     !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
259         const uint8_t outer_l2_len = m->outer_l2_len;
260         const uint8_t l2_len = m->l2_len;
261
262         if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
263             (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
264                 if (is_tunnel_parsed) {
265                         /* Outer L3 */
266                         send_hdr->w0.l3ptr = outer_l2_len;
267                         send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
268                         /* Set clk3 for PKO to calculate IPV4 header checksum */
269                         send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
270
271                         /* Outer L4 */
272                         send_hdr->w0.ckl4 = csum_outer;
273
274                         /* Inner L3 */
275                         send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
276                         send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
277                         /* Set clke for PKO to calculate inner IPV4 header
278                          * checksum.
279                          */
280                         send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
281
282                         /* Inner L4 */
283                         send_hdr->w0.cklf = csum;
284                 } else {
285                         /* Inner L3 */
286                         send_hdr->w0.l3ptr = l2_len;
287                         send_hdr->w0.l4ptr = l2_len + m->l3_len;
288                         /* Set clk3 for PKO to calculate IPV4 header checksum */
289                         send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
290
291                         /* Inner L4 */
292                         send_hdr->w0.ckl4 = csum;
293                 }
294         } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
295                 /* Outer L3 */
296                 send_hdr->w0.l3ptr = outer_l2_len;
297                 send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
298                 /* Set clk3 for PKO to calculate IPV4 header checksum */
299                 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
300
301                 /* Outer L4 */
302                 send_hdr->w0.ckl4 = csum_outer;
303         } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
304                 /* Inner L3 */
305                 send_hdr->w0.l3ptr = l2_len;
306                 send_hdr->w0.l4ptr = l2_len + m->l3_len;
307                 /* Set clk3 for PKO to calculate IPV4 header checksum */
308                 send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
309
310                 /* Inner L4 */
311                 send_hdr->w0.ckl4 = csum;
312         }
313 }
314
315 static __rte_always_inline uint16_t
316 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
317                         const uint16_t flag)
318 {
319         uint16_t gaura_id, nb_desc = 0;
320         struct rte_mbuf *m_tofree;
321         rte_iova_t iova;
322         uint16_t data_len;
323
324         m_tofree = tx_pkt;
325
326         data_len = tx_pkt->data_len;
327         iova = rte_mbuf_data_iova(tx_pkt);
328
329         /* Setup PKO_SEND_HDR_S */
330         cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
331         cmd_buf[nb_desc++] = 0x0;
332
333         /* Enable tx checksum offload */
334         if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
335             (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
336                 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
337
338         /* SEND_HDR[DF] bit controls if buffer is to be freed or
339          * not, as SG_DESC[I] and SEND_HDR[II] are clear.
340          */
341         if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
342                 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt, &m_tofree) <<
343                                58);
344
345         /* Mark mempool object as "put" since it is freed by PKO */
346         if (!(cmd_buf[0] & (1ULL << 58)))
347                 RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
348                                         1, 0);
349         /* Get the gaura Id */
350         gaura_id =
351                 octeontx_fpa_bufpool_gaura((uintptr_t)m_tofree->pool->pool_id);
352
353         /* Setup PKO_SEND_BUFLINK_S */
354         cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
355                 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
356                 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
357                 data_len;
358         cmd_buf[nb_desc++] = iova;
359
360         return nb_desc;
361 }
362
363 static __rte_always_inline uint16_t
364 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
365                         const uint16_t flag)
366 {
367         uint16_t nb_segs, nb_desc = 0;
368         uint16_t gaura_id, len = 0;
369         struct rte_mbuf *m_next = NULL, *m_tofree;
370         rte_iova_t iova;
371         uint16_t data_len;
372
373         nb_segs = tx_pkt->nb_segs;
374         /* Setup PKO_SEND_HDR_S */
375         cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
376         cmd_buf[nb_desc++] = 0x0;
377
378         /* Enable tx checksum offload */
379         if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
380             (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
381                 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
382
383         do {
384                 m_next = tx_pkt->next;
385                 /* Get TX parameters up front, octeontx_prefree_seg might change
386                  * them
387                  */
388                 m_tofree = tx_pkt;
389                 data_len = tx_pkt->data_len;
390                 iova = rte_mbuf_data_iova(tx_pkt);
391
392                 /* Setup PKO_SEND_GATHER_S */
393                 cmd_buf[nb_desc] = 0;
394
395                 /* SG_DESC[I] bit controls if buffer is to be freed or
396                  * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
397                  */
398                 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
399                         cmd_buf[nb_desc] |=
400                                 (octeontx_prefree_seg(tx_pkt, &m_tofree) << 57);
401                 }
402
403                 /* To handle case where mbufs belong to diff pools, like
404                  * fragmentation
405                  */
406                 gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
407                                         m_tofree->pool->pool_id);
408
409                 /* Setup PKO_SEND_GATHER_S */
410                 cmd_buf[nb_desc] |= PKO_SEND_GATHER_SUBDC                |
411                                    PKO_SEND_GATHER_LDTYPE(0x1ull)        |
412                                    PKO_SEND_GATHER_GAUAR((long)gaura_id) |
413                                    data_len;
414
415                 /* Mark mempool object as "put" since it is freed by
416                  * PKO.
417                  */
418                 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
419                         tx_pkt->next = NULL;
420                         RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
421                                                 (void **)&m_tofree, 1, 0);
422                 }
423                 nb_desc++;
424
425                 cmd_buf[nb_desc++] = iova;
426
427                 nb_segs--;
428                 len += data_len;
429                 tx_pkt = m_next;
430         } while (nb_segs);
431
432         return nb_desc;
433 }
434
435 static __rte_always_inline uint16_t
436 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
437                      uint16_t nb_pkts, uint64_t *cmd_buf,
438                      const uint16_t flags)
439 {
440         struct octeontx_txq *txq = tx_queue;
441         octeontx_dq_t *dq = &txq->dq;
442         uint16_t count = 0, nb_desc;
443         rte_io_wmb();
444
445         while (count < nb_pkts) {
446                 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
447                         break;
448
449                 if (flags & OCCTX_TX_MULTI_SEG_F) {
450                         nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
451                                                                cmd_buf, flags);
452                 } else {
453                         nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
454                                                           cmd_buf, flags);
455                 }
456
457                 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
458                                    nb_desc);
459
460                 count++;
461         }
462         return count;
463 }
464
465 uint16_t
466 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
467
468 #define L3L4CSUM_F   OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
469 #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
470 #define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
471 #define MULT_F       OCCTX_TX_MULTI_SEG_F
472
473 /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
474 #define OCCTX_TX_FASTPATH_MODES                                                \
475 T(no_offload,                           0, 0, 0, 0,     4,                     \
476                                         OCCTX_TX_OFFLOAD_NONE)                 \
477 T(mseg,                                 0, 0, 0, 1,     14,                    \
478                                         MULT_F)                                \
479 T(l3l4csum,                             0, 0, 1, 0,     4,                     \
480                                         L3L4CSUM_F)                            \
481 T(l3l4csum_mseg,                        0, 0, 1, 1,     14,                    \
482                                         L3L4CSUM_F | MULT_F)                   \
483 T(ol3ol4csum,                           0, 1, 0, 0,     4,                     \
484                                         OL3OL4CSUM_F)                          \
485 T(ol3l4csum_mseg,                       0, 1, 0, 1,     14,                    \
486                                         OL3OL4CSUM_F | MULT_F)                 \
487 T(ol3l4csum_l3l4csum,                   0, 1, 1, 0,     4,                     \
488                                         OL3OL4CSUM_F | L3L4CSUM_F)             \
489 T(ol3l4csum_l3l4csum_mseg,              0, 1, 1, 1,     14,                    \
490                                         OL3OL4CSUM_F | L3L4CSUM_F | MULT_F)    \
491 T(noff,                                 1, 0, 0, 0,     4,                     \
492                                         NOFF_F)                                \
493 T(noff_mseg,                            1, 0, 0, 1,     14,                    \
494                                         NOFF_F | MULT_F)                       \
495 T(noff_l3l4csum,                        1, 0, 1, 0,     4,                     \
496                                         NOFF_F | L3L4CSUM_F)                   \
497 T(noff_l3l4csum_mseg,                   1, 0, 1, 1,     14,                    \
498                                         NOFF_F | L3L4CSUM_F | MULT_F)          \
499 T(noff_ol3ol4csum,                      1, 1, 0, 0,     4,                     \
500                                         NOFF_F | OL3OL4CSUM_F)                 \
501 T(noff_ol3ol4csum_mseg,                 1, 1, 0, 1,     14,                    \
502                                         NOFF_F | OL3OL4CSUM_F | MULT_F)        \
503 T(noff_ol3ol4csum_l3l4csum,             1, 1, 1, 0,     4,                     \
504                                         NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)    \
505 T(noff_ol3ol4csum_l3l4csum_mseg,        1, 1, 1, 1,     14,                    \
506                                         NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
507                                         MULT_F)
508
509 /* RX offload macros */
510 #define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
511 #define CSUM_F          OCCTX_RX_OFFLOAD_CSUM_F
512 #define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
513
514 /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
515 #define OCCTX_RX_FASTPATH_MODES                                                \
516 R(no_offload,                           0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
517 R(mseg,                                 0, 0, 1,  MULT_RX_F)                   \
518 R(csum,                                 0, 1, 0,  CSUM_F)                      \
519 R(csum_mseg,                            0, 1, 1,  CSUM_F | MULT_RX_F)          \
520 R(vlan,                                 1, 0, 0,  VLAN_FLTR_F)                 \
521 R(vlan_mseg,                            1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
522 R(vlan_csum,                            1, 1, 0,  VLAN_FLTR_F | CSUM_F)        \
523 R(vlan_csum_mseg,                       1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
524                                         MULT_RX_F)
525
526  #endif /* __OCTEONTX_RXTX_H__ */