net/octeontx: support flow control
[dpdk.git] / drivers / net / octeontx / octeontx_rxtx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
7
8 #include <rte_ethdev_driver.h>
9
10 #define OFFLOAD_FLAGS                                   \
11         uint16_t rx_offload_flags;                      \
12         uint16_t tx_offload_flags
13
14 #define BIT(nr) (1UL << (nr))
15
16 #define OCCTX_RX_OFFLOAD_NONE           (0)
17 #define OCCTX_RX_OFFLOAD_RSS_F          BIT(0)
18 #define OCCTX_RX_VLAN_FLTR_F            BIT(1)
19 #define OCCTX_RX_MULTI_SEG_F            BIT(15)
20
21 #define OCCTX_TX_OFFLOAD_NONE           (0)
22 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
23
24 #define OCCTX_TX_MULTI_SEG_F            BIT(15)
25 /* Packet type table */
26 #define PTYPE_SIZE      OCCTX_PKI_LTYPE_LAST
27
28 static const uint32_t __rte_cache_aligned
29 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
30         [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
31         [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
32         [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
33         [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
34         [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
35         [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
36         [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
37         [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
38         [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
39         [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
40
41         [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
42         [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
43                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
44         [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
45         [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
46         [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
47         [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
48         [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
49         [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
50                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
51         [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
52                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
53         [LC_IPV4][LE_NONE][LF_NVGRE] =
54                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
55
56         [LC_IPV4_OPT][LE_NONE][LF_NONE] =
57                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
58         [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
59                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
60         [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
61                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
62         [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
63                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
64         [LC_IPV4_OPT][LE_NONE][LF_TCP] =
65                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
66         [LC_IPV4_OPT][LE_NONE][LF_UDP] =
67                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
68         [LC_IPV4_OPT][LE_NONE][LF_GRE] =
69                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
70         [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
71                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
72         [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
73                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
74         [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
75                                 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
76
77         [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
78         [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
79                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
80         [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
81         [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
82         [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
83         [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
84         [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
85         [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
86                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
87         [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
88                                 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
89         [LC_IPV6][LE_NONE][LF_NVGRE] =
90                                 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
91         [LC_IPV6_OPT][LE_NONE][LF_NONE] =
92                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
93         [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
94                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
95         [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
96                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
97         [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
98                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
99         [LC_IPV6_OPT][LE_NONE][LF_TCP] =
100                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
101         [LC_IPV6_OPT][LE_NONE][LF_UDP] =
102                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
103         [LC_IPV6_OPT][LE_NONE][LF_GRE] =
104                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
105         [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
106                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
107         [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
108                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
109         [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
110                                 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
111
112 };
113
114
115 static __rte_always_inline uint64_t
116 octeontx_pktmbuf_detach(struct rte_mbuf *m)
117 {
118         struct rte_mempool *mp = m->pool;
119         uint32_t mbuf_size, buf_len;
120         struct rte_mbuf *md;
121         uint16_t priv_size;
122         uint16_t refcount;
123
124         /* Update refcount of direct mbuf */
125         md = rte_mbuf_from_indirect(m);
126         refcount = rte_mbuf_refcnt_update(md, -1);
127
128         priv_size = rte_pktmbuf_priv_size(mp);
129         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
130         buf_len = rte_pktmbuf_data_room_size(mp);
131
132         m->priv_size = priv_size;
133         m->buf_addr = (char *)m + mbuf_size;
134         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
135         m->buf_len = (uint16_t)buf_len;
136         rte_pktmbuf_reset_headroom(m);
137         m->data_len = 0;
138         m->ol_flags = 0;
139         m->next = NULL;
140         m->nb_segs = 1;
141
142         /* Now indirect mbuf is safe to free */
143         rte_pktmbuf_free(m);
144
145         if (refcount == 0) {
146                 rte_mbuf_refcnt_set(md, 1);
147                 md->data_len = 0;
148                 md->ol_flags = 0;
149                 md->next = NULL;
150                 md->nb_segs = 1;
151                 return 0;
152         } else {
153                 return 1;
154         }
155 }
156
157 static __rte_always_inline uint64_t
158 octeontx_prefree_seg(struct rte_mbuf *m)
159 {
160         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
161                 if (!RTE_MBUF_DIRECT(m))
162                         return octeontx_pktmbuf_detach(m);
163
164                 m->next = NULL;
165                 m->nb_segs = 1;
166                 return 0;
167         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
168                 if (!RTE_MBUF_DIRECT(m))
169                         return octeontx_pktmbuf_detach(m);
170
171                 rte_mbuf_refcnt_set(m, 1);
172                 m->next = NULL;
173                 m->nb_segs = 1;
174                 return 0;
175         }
176
177         /* Mbuf is having refcount more than 1 so need not to be freed */
178         return 1;
179 }
180
181 static __rte_always_inline uint16_t
182 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
183                         const uint16_t flag)
184 {
185         uint16_t gaura_id, nb_desc = 0;
186
187         /* Setup PKO_SEND_HDR_S */
188         cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
189         cmd_buf[nb_desc++] = 0x0;
190
191         /* SEND_HDR[DF] bit controls if buffer is to be freed or
192          * not, as SG_DESC[I] and SEND_HDR[II] are clear.
193          */
194         if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
195                 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
196                                58);
197
198         /* Mark mempool object as "put" since it is freed by PKO */
199         if (!(cmd_buf[0] & (1ULL << 58)))
200                 __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
201                                         1, 0);
202         /* Get the gaura Id */
203         gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
204                                               tx_pkt->pool->pool_id);
205
206         /* Setup PKO_SEND_BUFLINK_S */
207         cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
208                 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
209                 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
210                 tx_pkt->data_len;
211         cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
212
213         return nb_desc;
214 }
215
216 static __rte_always_inline uint16_t
217 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
218                         const uint16_t flag)
219 {
220         uint16_t nb_segs, nb_desc = 0;
221         uint16_t gaura_id, len = 0;
222         struct rte_mbuf *m_next = NULL;
223
224         nb_segs = tx_pkt->nb_segs;
225         /* Setup PKO_SEND_HDR_S */
226         cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
227         cmd_buf[nb_desc++] = 0x0;
228
229         do {
230                 m_next = tx_pkt->next;
231                 /* To handle case where mbufs belong to diff pools, like
232                  * fragmentation
233                  */
234                 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
235                                                       tx_pkt->pool->pool_id);
236
237                 /* Setup PKO_SEND_GATHER_S */
238                 cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC                 |
239                                    PKO_SEND_GATHER_LDTYPE(0x1ull)        |
240                                    PKO_SEND_GATHER_GAUAR((long)gaura_id) |
241                                    tx_pkt->data_len;
242
243                 /* SG_DESC[I] bit controls if buffer is to be freed or
244                  * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
245                  */
246                 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
247                         cmd_buf[nb_desc] |=
248                              (octeontx_prefree_seg(tx_pkt) << 57);
249                 }
250
251                 /* Mark mempool object as "put" since it is freed by
252                  * PKO.
253                  */
254                 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
255                         tx_pkt->next = NULL;
256                         __mempool_check_cookies(tx_pkt->pool,
257                                                 (void **)&tx_pkt, 1, 0);
258                 }
259                 nb_desc++;
260
261                 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
262
263                 nb_segs--;
264                 len += tx_pkt->data_len;
265                 tx_pkt = m_next;
266         } while (nb_segs);
267
268         return nb_desc;
269 }
270
271 static __rte_always_inline uint16_t
272 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
273                      uint16_t nb_pkts, uint64_t *cmd_buf,
274                      const uint16_t flags)
275 {
276         struct octeontx_txq *txq = tx_queue;
277         octeontx_dq_t *dq = &txq->dq;
278         uint16_t count = 0, nb_desc;
279         rte_cio_wmb();
280
281         while (count < nb_pkts) {
282                 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
283                         break;
284
285                 if (flags & OCCTX_TX_MULTI_SEG_F) {
286                         nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
287                                                                cmd_buf, flags);
288                 } else {
289                         nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
290                                                           cmd_buf, flags);
291                 }
292
293                 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
294                                    nb_desc);
295
296                 count++;
297         }
298         return count;
299 }
300
301 uint16_t
302 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
303
304 #define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
305 #define MULT_F       OCCTX_TX_MULTI_SEG_F
306 /* [NOFF] [MULTI_SEG] */
307 #define OCCTX_TX_FASTPATH_MODES                                               \
308 T(no_offload,                           0, 0,   4,   OCCTX_TX_OFFLOAD_NONE)   \
309 T(mseg,                                 0, 1,   14,  MULT_F)                  \
310 T(noff,                                 1, 0,   4,   NOFF_F)                  \
311 T(noff_mseg,                            1, 1,   14,  NOFF_F | MULT_F)
312
313  #endif /* __OCTEONTX_RXTX_H__ */