net/dpaa: support FMCless mode
[dpdk.git] / drivers / net / octeontx / octeontx_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <rte_ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS                                   \
11         uint16_t rx_offload_flags;                      \
12         uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE           (0)
17 #define OCCTX_RX_MULTI_SEG_F            BIT(0)
18 #define OCCTX_RX_OFFLOAD_CSUM_F         BIT(1)
19 #define OCCTX_RX_VLAN_FLTR_F            BIT(2)
20
21 #define OCCTX_TX_OFFLOAD_NONE           (0)
22 #define OCCTX_TX_MULTI_SEG_F            BIT(0)
23 #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(1)
24 #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
25 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
26
27 /* Packet type table */
28 #define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
29
30 /* octeontx send header sub descriptor structure */
31 RTE_STD_C11
32 union octeontx_send_hdr_w0_u {
33         uint64_t u;
34         struct {
35                 uint64_t total   : 16;
36                 uint64_t markptr : 8;
37                 uint64_t l3ptr   : 8;
38                 uint64_t l4ptr   : 8;
39                 uint64_t ii      : 1;
40                 uint64_t shp_dis : 1;
41                 uint64_t ckle    : 1;
42                 uint64_t cklf    : 2;
43                 uint64_t ckl3    : 1;
44                 uint64_t ckl4    : 2;
45                 uint64_t p       : 1;
46                 uint64_t format  : 7;
47                 uint64_t tstamp  : 1;
48                 uint64_t tso_eom : 1;
49                 uint64_t df      : 1;
50                 uint64_t tso     : 1;
51                 uint64_t n2      : 1;
52                 uint64_t scntn1  : 3;
53         };
54 };
55
56 RTE_STD_C11
57 union octeontx_send_hdr_w1_u {
58         uint64_t u;
59         struct {
60                 uint64_t tso_mss : 14;
61                 uint64_t shp_ra  : 2;
62                 uint64_t tso_sb  : 8;
63                 uint64_t leptr   : 8;
64                 uint64_t lfptr   : 8;
65                 uint64_t shp_chg : 9;
66                 uint64_t tso_fn  : 7;
67                 uint64_t l2len   : 8;
68         };
69 };
70
71 struct octeontx_send_hdr_s {
72         union octeontx_send_hdr_w0_u w0;
73         union octeontx_send_hdr_w1_u w1;
74 };
75
76 static const uint32_t __rte_cache_aligned
77 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
78         [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
79         [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
80         [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
81         [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
82         [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
83         [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
84         [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
85         [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
86         [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
87         [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
88
89         [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
90         [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
91                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
92         [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
93         [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
94         [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
95         [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
96         [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
97         [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
98                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
99         [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
100                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
101         [LC_IPV4][LE_NONE][LF_NVGRE] =
102                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
103
104         [LC_IPV4_OPT][LE_NONE][LF_NONE] =
105                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
106         [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
107                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
108         [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
109                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
110         [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
111                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
112         [LC_IPV4_OPT][LE_NONE][LF_TCP] =
113                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
114         [LC_IPV4_OPT][LE_NONE][LF_UDP] =
115                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
116         [LC_IPV4_OPT][LE_NONE][LF_GRE] =
117                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
118         [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
119                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
120         [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
121                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
122         [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
123                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
124
125         [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
126         [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
127                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
128         [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
129         [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
130         [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
131         [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
132         [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
133         [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
134                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
135         [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
136                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
137         [LC_IPV6][LE_NONE][LF_NVGRE] =
138                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
139         [LC_IPV6_OPT][LE_NONE][LF_NONE] =
140                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
141         [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
142                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
143         [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
144                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
145         [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
146                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
147         [LC_IPV6_OPT][LE_NONE][LF_TCP] =
148                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
149         [LC_IPV6_OPT][LE_NONE][LF_UDP] =
150                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
151         [LC_IPV6_OPT][LE_NONE][LF_GRE] =
152                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
153         [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
154                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
155         [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
156                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
157         [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
158                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
159
160 };
161
162
163 static __rte_always_inline uint64_t
164 octeontx_pktmbuf_detach(struct rte_mbuf *m)
165 {
166         struct rte_mempool *mp = m->pool;
167         uint32_t mbuf_size, buf_len;
168         struct rte_mbuf *md;
169         uint16_t priv_size;
170         uint16_t refcount;
171
172         /* Update refcount of direct mbuf */
173         md = rte_mbuf_from_indirect(m);
174         refcount = rte_mbuf_refcnt_update(md, -1);
175
176         priv_size = rte_pktmbuf_priv_size(mp);
177         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
178         buf_len = rte_pktmbuf_data_room_size(mp);
179
180         m->priv_size = priv_size;
181         m->buf_addr = (char *)m + mbuf_size;
182         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
183         m->buf_len = (uint16_t)buf_len;
184         rte_pktmbuf_reset_headroom(m);
185         m->data_len = 0;
186         m->ol_flags = 0;
187         m->next = NULL;
188         m->nb_segs = 1;
189
190         /* Now indirect mbuf is safe to free */
191         rte_pktmbuf_free(m);
192
193         if (refcount == 0) {
194                 rte_mbuf_refcnt_set(md, 1);
195                 md->data_len = 0;
196                 md->ol_flags = 0;
197                 md->next = NULL;
198                 md->nb_segs = 1;
199                 return 0;
200         } else {
201                 return 1;
202         }
203 }
204
205 static __rte_always_inline uint64_t
206 octeontx_prefree_seg(struct rte_mbuf *m)
207 {
208         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
209                 if (!RTE_MBUF_DIRECT(m))
210                         return octeontx_pktmbuf_detach(m);
211
212                 m->next = NULL;
213                 m->nb_segs = 1;
214                 return 0;
215         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
216                 if (!RTE_MBUF_DIRECT(m))
217                         return octeontx_pktmbuf_detach(m);
218
219                 rte_mbuf_refcnt_set(m, 1);
220                 m->next = NULL;
221                 m->nb_segs = 1;
222                 return 0;
223         }
224
225         /* Mbuf is having refcount more than 1 so need not to be freed */
226         return 1;
227 }
228
229 static __rte_always_inline void
230 octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
231                              struct rte_mbuf *m)
232 {
233         struct octeontx_send_hdr_s *send_hdr =
234                                 (struct octeontx_send_hdr_s *)cmd_buf;
235         uint64_t ol_flags = m->ol_flags;
236
237         /* PKO Checksum L4 Algorithm Enumeration
238          * 0x0 - No checksum
239          * 0x1 - UDP L4 checksum
240          * 0x2 - TCP L4 checksum
241          * 0x3 - SCTP L4 checksum
242          */
243         const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) +
244                       (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
245                       (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
246
247         const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) ||
248                                       !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) ||
249                                       !!(ol_flags & PKT_TX_TUNNEL_VXLAN) ||
250                                       !!(ol_flags & PKT_TX_TUNNEL_GRE) ||
251                                       !!(ol_flags & PKT_TX_TUNNEL_GENEVE) ||
252                                       !!(ol_flags & PKT_TX_TUNNEL_IP) ||
253                                       !!(ol_flags & PKT_TX_TUNNEL_IPIP));
254
255         const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) ||
256                                     !!(ol_flags & PKT_TX_TUNNEL_UDP));
257         const uint8_t outer_l2_len = m->outer_l2_len;
258         const uint8_t l2_len = m->l2_len;
259
260         if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
261             (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
262                 if (is_tunnel_parsed) {
263                         /* Outer L3 */
264                         send_hdr->w0.l3ptr = outer_l2_len;
265                         send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
266                         /* Set clk3 for PKO to calculate IPV4 header checksum */
267                         send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
268
269                         /* Outer L4 */
270                         send_hdr->w0.ckl4 = csum_outer;
271
272                         /* Inner L3 */
273                         send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
274                         send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
275                         /* Set clke for PKO to calculate inner IPV4 header
276                          * checksum.
277                          */
278                         send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4);
279
280                         /* Inner L4 */
281                         send_hdr->w0.cklf = csum;
282                 } else {
283                         /* Inner L3 */
284                         send_hdr->w0.l3ptr = l2_len;
285                         send_hdr->w0.l4ptr = l2_len + m->l3_len;
286                         /* Set clk3 for PKO to calculate IPV4 header checksum */
287                         send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
288
289                         /* Inner L4 */
290                         send_hdr->w0.ckl4 = csum;
291                 }
292         } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
293                 /* Outer L3 */
294                 send_hdr->w0.l3ptr = outer_l2_len;
295                 send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
296                 /* Set clk3 for PKO to calculate IPV4 header checksum */
297                 send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
298
299                 /* Outer L4 */
300                 send_hdr->w0.ckl4 = csum_outer;
301         } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
302                 /* Inner L3 */
303                 send_hdr->w0.l3ptr = l2_len;
304                 send_hdr->w0.l4ptr = l2_len + m->l3_len;
305                 /* Set clk3 for PKO to calculate IPV4 header checksum */
306                 send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
307
308                 /* Inner L4 */
309                 send_hdr->w0.ckl4 = csum;
310         }
311 }
312
313 static __rte_always_inline uint16_t
314 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
315                         const uint16_t flag)
316 {
317         uint16_t gaura_id, nb_desc = 0;
318
319         /* Setup PKO_SEND_HDR_S */
320         cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
321         cmd_buf[nb_desc++] = 0x0;
322
323         /* Enable tx checksum offload */
324         if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
325             (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
326                 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
327
328         /* SEND_HDR[DF] bit controls if buffer is to be freed or
329          * not, as SG_DESC[I] and SEND_HDR[II] are clear.
330          */
331         if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
332                 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
333                                58);
334
335         /* Mark mempool object as "put" since it is freed by PKO */
336         if (!(cmd_buf[0] & (1ULL << 58)))
337                 __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
338                                         1, 0);
339         /* Get the gaura Id */
340         gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
341                                               tx_pkt->pool->pool_id);
342
343         /* Setup PKO_SEND_BUFLINK_S */
344         cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
345                 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
346                 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
347                 tx_pkt->data_len;
348         cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
349
350         return nb_desc;
351 }
352
353 static __rte_always_inline uint16_t
354 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
355                         const uint16_t flag)
356 {
357         uint16_t nb_segs, nb_desc = 0;
358         uint16_t gaura_id, len = 0;
359         struct rte_mbuf *m_next = NULL;
360
361         nb_segs = tx_pkt->nb_segs;
362         /* Setup PKO_SEND_HDR_S */
363         cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
364         cmd_buf[nb_desc++] = 0x0;
365
366         /* Enable tx checksum offload */
367         if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
368             (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
369                 octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
370
371         do {
372                 m_next = tx_pkt->next;
373                 /* To handle case where mbufs belong to diff pools, like
374                  * fragmentation
375                  */
376                 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
377                                                       tx_pkt->pool->pool_id);
378
379                 /* Setup PKO_SEND_GATHER_S */
380                 cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC                 |
381                                    PKO_SEND_GATHER_LDTYPE(0x1ull)        |
382                                    PKO_SEND_GATHER_GAUAR((long)gaura_id) |
383                                    tx_pkt->data_len;
384
385                 /* SG_DESC[I] bit controls if buffer is to be freed or
386                  * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
387                  */
388                 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
389                         cmd_buf[nb_desc] |=
390                              (octeontx_prefree_seg(tx_pkt) << 57);
391                 }
392
393                 /* Mark mempool object as "put" since it is freed by
394                  * PKO.
395                  */
396                 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
397                         tx_pkt->next = NULL;
398                         __mempool_check_cookies(tx_pkt->pool,
399                                                 (void **)&tx_pkt, 1, 0);
400                 }
401                 nb_desc++;
402
403                 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
404
405                 nb_segs--;
406                 len += tx_pkt->data_len;
407                 tx_pkt = m_next;
408         } while (nb_segs);
409
410         return nb_desc;
411 }
412
413 static __rte_always_inline uint16_t
414 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
415                      uint16_t nb_pkts, uint64_t *cmd_buf,
416                      const uint16_t flags)
417 {
418         struct octeontx_txq *txq = tx_queue;
419         octeontx_dq_t *dq = &txq->dq;
420         uint16_t count = 0, nb_desc;
421         rte_cio_wmb();
422
423         while (count < nb_pkts) {
424                 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
425                         break;
426
427                 if (flags & OCCTX_TX_MULTI_SEG_F) {
428                         nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
429                                                                cmd_buf, flags);
430                 } else {
431                         nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
432                                                           cmd_buf, flags);
433                 }
434
435                 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
436                                    nb_desc);
437
438                 count++;
439         }
440         return count;
441 }
442
443 uint16_t
444 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
445
446 #define L3L4CSUM_F   OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
447 #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
448 #define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
449 #define MULT_F       OCCTX_TX_MULTI_SEG_F
450
451 /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
452 #define OCCTX_TX_FASTPATH_MODES                                                \
453 T(no_offload,                           0, 0, 0, 0,     4,                     \
454                                         OCCTX_TX_OFFLOAD_NONE)                 \
455 T(mseg,                                 0, 0, 0, 1,     14,                    \
456                                         MULT_F)                                \
457 T(l3l4csum,                             0, 0, 1, 0,     4,                     \
458                                         L3L4CSUM_F)                            \
459 T(l3l4csum_mseg,                        0, 0, 1, 1,     14,                    \
460                                         L3L4CSUM_F | MULT_F)                   \
461 T(ol3ol4csum,                           0, 1, 0, 0,     4,                     \
462                                         OL3OL4CSUM_F)                          \
463 T(ol3l4csum_mseg,                       0, 1, 0, 1,     14,                    \
464                                         OL3OL4CSUM_F | MULT_F)                 \
465 T(ol3l4csum_l3l4csum,                   0, 1, 1, 0,     4,                     \
466                                         OL3OL4CSUM_F | L3L4CSUM_F)             \
467 T(ol3l4csum_l3l4csum_mseg,              0, 1, 1, 1,     14,                    \
468                                         OL3OL4CSUM_F | L3L4CSUM_F | MULT_F)    \
469 T(noff,                                 1, 0, 0, 0,     4,                     \
470                                         NOFF_F)                                \
471 T(noff_mseg,                            1, 0, 0, 1,     14,                    \
472                                         NOFF_F | MULT_F)                       \
473 T(noff_l3l4csum,                        1, 0, 1, 0,     4,                     \
474                                         NOFF_F | L3L4CSUM_F)                   \
475 T(noff_l3l4csum_mseg,                   1, 0, 1, 1,     14,                    \
476                                         NOFF_F | L3L4CSUM_F | MULT_F)          \
477 T(noff_ol3ol4csum,                      1, 1, 0, 0,     4,                     \
478                                         NOFF_F | OL3OL4CSUM_F)                 \
479 T(noff_ol3ol4csum_mseg,                 1, 1, 0, 1,     14,                    \
480                                         NOFF_F | OL3OL4CSUM_F | MULT_F)        \
481 T(noff_ol3ol4csum_l3l4csum,             1, 1, 1, 0,     4,                     \
482                                         NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)    \
483 T(noff_ol3ol4csum_l3l4csum_mseg,        1, 1, 1, 1,     14,                    \
484                                         NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
485                                         MULT_F)
486
487 /* RX offload macros */
488 #define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
489 #define CSUM_F          OCCTX_RX_OFFLOAD_CSUM_F
490 #define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
491
492 /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
493 #define OCCTX_RX_FASTPATH_MODES                                                \
494 R(no_offload,                           0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
495 R(mseg,                                 0, 0, 1,  MULT_RX_F)                   \
496 R(csum,                                 0, 1, 0,  CSUM_F)                      \
497 R(csum_mseg,                            0, 1, 1,  CSUM_F | MULT_RX_F)          \
498 R(vlan,                                 1, 0, 0,  VLAN_FLTR_F)                 \
499 R(vlan_mseg,                            1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
500 R(vlan_csum,                            1, 1, 0,  VLAN_FLTR_F | CSUM_F)        \
501 R(vlan_csum_mseg,                       1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
502                                         MULT_RX_F)
503
504  #endif /* __OCTEONTX_RXTX_H__ */