7ec525eaa20d715d0ff3ffa1d4c2d9ce91e0b8fe
[dpdk.git] / drivers / net / octeontx / octeontx_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <rte_ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS                                   \
11         uint16_t rx_offload_flags;                      \
12         uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE           (0)
17 #define OCCTX_RX_OFFLOAD_RSS_F          BIT(0)
18 #define OCCTX_RX_MULTI_SEG_F            BIT(15)
19
20 #define OCCTX_TX_OFFLOAD_NONE           (0)
21 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
22
23 #define OCCTX_TX_MULTI_SEG_F            BIT(15)
24 /* Packet type table */
25 #define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
26
27 static const uint32_t __rte_cache_aligned
28 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
29         [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
30         [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
31         [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
32         [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
33         [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
34         [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
35         [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
36         [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
37         [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
38         [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
39
40         [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
41         [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
42                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
43         [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
44         [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
45         [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
46         [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
47         [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
48         [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
49                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
50         [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
51                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
52         [LC_IPV4][LE_NONE][LF_NVGRE] =
53                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
54
55         [LC_IPV4_OPT][LE_NONE][LF_NONE] =
56                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
57         [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
58                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
59         [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
60                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
61         [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
62                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
63         [LC_IPV4_OPT][LE_NONE][LF_TCP] =
64                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
65         [LC_IPV4_OPT][LE_NONE][LF_UDP] =
66                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
67         [LC_IPV4_OPT][LE_NONE][LF_GRE] =
68                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
69         [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
70                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
71         [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
72                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
73         [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
74                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
75
76         [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
77         [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
78                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
79         [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
80         [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
81         [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
82         [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
83         [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
84         [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
85                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
86         [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
87                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
88         [LC_IPV6][LE_NONE][LF_NVGRE] =
89                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
90         [LC_IPV6_OPT][LE_NONE][LF_NONE] =
91                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
92         [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
93                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
94         [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
95                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
96         [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
97                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
98         [LC_IPV6_OPT][LE_NONE][LF_TCP] =
99                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
100         [LC_IPV6_OPT][LE_NONE][LF_UDP] =
101                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
102         [LC_IPV6_OPT][LE_NONE][LF_GRE] =
103                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
104         [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
105                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
106         [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
107                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
108         [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
109                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
110
111 };
112
113
114 static __rte_always_inline uint64_t
115 octeontx_pktmbuf_detach(struct rte_mbuf *m)
116 {
117         struct rte_mempool *mp = m->pool;
118         uint32_t mbuf_size, buf_len;
119         struct rte_mbuf *md;
120         uint16_t priv_size;
121         uint16_t refcount;
122
123         /* Update refcount of direct mbuf */
124         md = rte_mbuf_from_indirect(m);
125         refcount = rte_mbuf_refcnt_update(md, -1);
126
127         priv_size = rte_pktmbuf_priv_size(mp);
128         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
129         buf_len = rte_pktmbuf_data_room_size(mp);
130
131         m->priv_size = priv_size;
132         m->buf_addr = (char *)m + mbuf_size;
133         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
134         m->buf_len = (uint16_t)buf_len;
135         rte_pktmbuf_reset_headroom(m);
136         m->data_len = 0;
137         m->ol_flags = 0;
138         m->next = NULL;
139         m->nb_segs = 1;
140
141         /* Now indirect mbuf is safe to free */
142         rte_pktmbuf_free(m);
143
144         if (refcount == 0) {
145                 rte_mbuf_refcnt_set(md, 1);
146                 md->data_len = 0;
147                 md->ol_flags = 0;
148                 md->next = NULL;
149                 md->nb_segs = 1;
150                 return 0;
151         } else {
152                 return 1;
153         }
154 }
155
156 static __rte_always_inline uint64_t
157 octeontx_prefree_seg(struct rte_mbuf *m)
158 {
159         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
160                 if (!RTE_MBUF_DIRECT(m))
161                         return octeontx_pktmbuf_detach(m);
162
163                 m->next = NULL;
164                 m->nb_segs = 1;
165                 return 0;
166         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
167                 if (!RTE_MBUF_DIRECT(m))
168                         return octeontx_pktmbuf_detach(m);
169
170                 rte_mbuf_refcnt_set(m, 1);
171                 m->next = NULL;
172                 m->nb_segs = 1;
173                 return 0;
174         }
175
176         /* Mbuf is having refcount more than 1 so need not to be freed */
177         return 1;
178 }
179
180 static __rte_always_inline uint16_t
181 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
182                         const uint16_t flag)
183 {
184         uint16_t gaura_id, nb_desc = 0;
185
186         /* Setup PKO_SEND_HDR_S */
187         cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
188         cmd_buf[nb_desc++] = 0x0;
189
190         /* SEND_HDR[DF] bit controls if buffer is to be freed or
191          * not, as SG_DESC[I] and SEND_HDR[II] are clear.
192          */
193         if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
194                 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
195                                58);
196
197         /* Mark mempool object as "put" since it is freed by PKO */
198         if (!(cmd_buf[0] & (1ULL << 58)))
199                 __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
200                                         1, 0);
201         /* Get the gaura Id */
202         gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
203                                               tx_pkt->pool->pool_id);
204
205         /* Setup PKO_SEND_BUFLINK_S */
206         cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
207                 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
208                 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
209                 tx_pkt->data_len;
210         cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
211
212         return nb_desc;
213 }
214
215 static __rte_always_inline uint16_t
216 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
217                         const uint16_t flag)
218 {
219         uint16_t nb_segs, nb_desc = 0;
220         uint16_t gaura_id, len = 0;
221         struct rte_mbuf *m_next = NULL;
222
223         nb_segs = tx_pkt->nb_segs;
224         /* Setup PKO_SEND_HDR_S */
225         cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
226         cmd_buf[nb_desc++] = 0x0;
227
228         do {
229                 m_next = tx_pkt->next;
230                 /* To handle case where mbufs belong to diff pools, like
231                  * fragmentation
232                  */
233                 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
234                                                       tx_pkt->pool->pool_id);
235
236                 /* Setup PKO_SEND_GATHER_S */
237                 cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC                 |
238                                    PKO_SEND_GATHER_LDTYPE(0x1ull)        |
239                                    PKO_SEND_GATHER_GAUAR((long)gaura_id) |
240                                    tx_pkt->data_len;
241
242                 /* SG_DESC[I] bit controls if buffer is to be freed or
243                  * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
244                  */
245                 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
246                         cmd_buf[nb_desc] |=
247                              (octeontx_prefree_seg(tx_pkt) << 57);
248                 }
249
250                 /* Mark mempool object as "put" since it is freed by
251                  * PKO.
252                  */
253                 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
254                         tx_pkt->next = NULL;
255                         __mempool_check_cookies(tx_pkt->pool,
256                                                 (void **)&tx_pkt, 1, 0);
257                 }
258                 nb_desc++;
259
260                 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
261
262                 nb_segs--;
263                 len += tx_pkt->data_len;
264                 tx_pkt = m_next;
265         } while (nb_segs);
266
267         return nb_desc;
268 }
269
270 static __rte_always_inline uint16_t
271 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
272                      uint16_t nb_pkts, uint64_t *cmd_buf,
273                      const uint16_t flags)
274 {
275         struct octeontx_txq *txq = tx_queue;
276         octeontx_dq_t *dq = &txq->dq;
277         uint16_t count = 0, nb_desc;
278         rte_cio_wmb();
279
280         while (count < nb_pkts) {
281                 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
282                         break;
283
284                 if (flags & OCCTX_TX_MULTI_SEG_F) {
285                         nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
286                                                                cmd_buf, flags);
287                 } else {
288                         nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
289                                                           cmd_buf, flags);
290                 }
291
292                 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
293                                    nb_desc);
294
295                 count++;
296         }
297         return count;
298 }
299
300 uint16_t
301 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
302
303 #define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
304 #define MULT_F       OCCTX_TX_MULTI_SEG_F
305 /* [NOFF] [MULTI_SEG] */
306 #define OCCTX_TX_FASTPATH_MODES                                               \
307 T(no_offload,                           0, 0,   4,   OCCTX_TX_OFFLOAD_NONE)   \
308 T(mseg,                                 0, 1,   14,  MULT_F)                  \
309 T(noff,                                 1, 0,   4,   NOFF_F)                  \
310 T(noff_mseg,                            1, 1,   14,  NOFF_F | MULT_F)
311
312  #endif /* __OCTEONTX_RXTX_H__ */