83057d520e9125bc3143e5b8751b573dca94f144
[dpdk.git] / drivers / net / octeontx / octeontx_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <rte_ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS                                   \
11         uint16_t rx_offload_flags;                      \
12         uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE           (0)
17 #define OCCTX_RX_OFFLOAD_RSS_F          BIT(0)
18 #define OCCTX_RX_MULTI_SEG_F            BIT(15)
19
20 #define OCCTX_TX_OFFLOAD_NONE           (0)
21
22 #define OCCTX_TX_MULTI_SEG_F            BIT(15)
23 /* Packet type table */
24 #define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
25
26 static const uint32_t __rte_cache_aligned
27 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
28         [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
29         [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
30         [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
31         [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
32         [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
33         [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
34         [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
35         [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
36         [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
37         [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
38
39         [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
40         [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
41                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
42         [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
43         [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
44         [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
45         [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
46         [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
47         [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
48                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
49         [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
50                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
51         [LC_IPV4][LE_NONE][LF_NVGRE] =
52                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
53
54         [LC_IPV4_OPT][LE_NONE][LF_NONE] =
55                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
56         [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
57                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
58         [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
59                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
60         [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
61                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
62         [LC_IPV4_OPT][LE_NONE][LF_TCP] =
63                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
64         [LC_IPV4_OPT][LE_NONE][LF_UDP] =
65                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
66         [LC_IPV4_OPT][LE_NONE][LF_GRE] =
67                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
68         [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
69                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
70         [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
71                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
72         [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
73                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
74
75         [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
76         [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
77                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
78         [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
79         [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
80         [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
81         [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
82         [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
83         [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
84                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
85         [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
86                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
87         [LC_IPV6][LE_NONE][LF_NVGRE] =
88                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
89         [LC_IPV6_OPT][LE_NONE][LF_NONE] =
90                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
91         [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
92                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
93         [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
94                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
95         [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
96                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
97         [LC_IPV6_OPT][LE_NONE][LF_TCP] =
98                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
99         [LC_IPV6_OPT][LE_NONE][LF_UDP] =
100                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
101         [LC_IPV6_OPT][LE_NONE][LF_GRE] =
102                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
103         [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
104                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
105         [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
106                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
107         [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
108                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
109
110 };
111
112 static __rte_always_inline int
113 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
114                         struct rte_mbuf *tx_pkt, const uint16_t flag)
115 {
116         uint8_t sz = (4 + (!!(flag & OCCTX_TX_MULTI_SEG_F) * 10));
117         /* Max size of PKO SEND desc is 112 bytes*/
118         uint64_t cmd_buf[sz] __rte_cache_aligned;
119         uint8_t nb_segs, nb_desc = 0;
120         uint16_t gaura_id, len = 0;
121         struct rte_mbuf *m_next = NULL;
122
123         if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
124                 return -ENOSPC;
125
126
127         if (flag & OCCTX_TX_MULTI_SEG_F) {
128                 nb_segs = tx_pkt->nb_segs;
129                 /* Setup PKO_SEND_HDR_S */
130                 cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
131                 cmd_buf[nb_desc++] = 0x0;
132
133                 do {
134                         m_next = tx_pkt->next;
135                         /* To handle case where mbufs belong to diff pools, like
136                          * fragmentation
137                          */
138                         gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
139                                                         tx_pkt->pool->pool_id);
140
141                         /* Setup PKO_SEND_GATHER_S */
142                         cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC           |
143                                              PKO_SEND_GATHER_LDTYPE(0x1ull)  |
144                                              PKO_SEND_GATHER_GAUAR((long)
145                                                                    gaura_id) |
146                                              tx_pkt->data_len;
147                         /* Mark mempool object as "put" since it is freed by
148                          * PKO.
149                          */
150                         if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
151                                 tx_pkt->next = NULL;
152                                 __mempool_check_cookies(tx_pkt->pool,
153                                                         (void **)&tx_pkt, 1, 0);
154                         }
155                         nb_desc++;
156
157                         cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
158
159                         nb_segs--;
160                         len += tx_pkt->data_len;
161                         tx_pkt = m_next;
162                 } while (nb_segs);
163         } else {
164                 /* Setup PKO_SEND_HDR_S */
165                 cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
166                 cmd_buf[nb_desc++] = 0x0;
167
168                 /* Mark mempool object as "put" since it is freed by PKO */
169                 if (!(cmd_buf[0] & (1ULL << 58)))
170                         __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
171                                                 1, 0);
172                 /* Get the gaura Id */
173                 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
174                                                       tx_pkt->pool->pool_id);
175
176                 /* Setup PKO_SEND_BUFLINK_S */
177                 cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
178                                      PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
179                                      PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
180                                      tx_pkt->data_len;
181                 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
182         }
183         octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, nb_desc);
184
185         return 0;
186 }
187
188 uint16_t
189 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
190
191 uint16_t
192 octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
193                         uint16_t nb_pkts);
194
195 uint16_t
196 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
197
198 #endif /* __OCTEONTX_RXTX_H__ */