1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
8 #include <rte_ethdev_driver.h>
10 #define OFFLOAD_FLAGS \
11 uint16_t rx_offload_flags; \
12 uint16_t tx_offload_flags
14 #define BIT(nr) (1UL << (nr))
16 #define OCCTX_RX_OFFLOAD_NONE (0)
17 #define OCCTX_RX_OFFLOAD_RSS_F BIT(0)
18 #define OCCTX_RX_VLAN_FLTR_F BIT(1)
19 #define OCCTX_RX_MULTI_SEG_F BIT(15)
21 #define OCCTX_TX_OFFLOAD_NONE (0)
22 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
24 #define OCCTX_TX_MULTI_SEG_F BIT(15)
25 /* Packet type table */
26 #define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
28 static const uint32_t __rte_cache_aligned
29 ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
30 [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
31 [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
32 [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
33 [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
34 [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
35 [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
36 [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
37 [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
38 [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
39 [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
41 [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
42 [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
43 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
44 [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
45 [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
46 [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
47 [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
48 [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
49 [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
50 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
51 [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
52 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
53 [LC_IPV4][LE_NONE][LF_NVGRE] =
54 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
56 [LC_IPV4_OPT][LE_NONE][LF_NONE] =
57 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
58 [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
59 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
60 [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
61 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
62 [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
63 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
64 [LC_IPV4_OPT][LE_NONE][LF_TCP] =
65 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
66 [LC_IPV4_OPT][LE_NONE][LF_UDP] =
67 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
68 [LC_IPV4_OPT][LE_NONE][LF_GRE] =
69 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
70 [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
71 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
72 [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
73 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
74 [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
75 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
77 [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
78 [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
79 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
80 [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
81 [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
82 [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
83 [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
84 [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
85 [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
86 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
87 [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
88 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
89 [LC_IPV6][LE_NONE][LF_NVGRE] =
90 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
91 [LC_IPV6_OPT][LE_NONE][LF_NONE] =
92 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
93 [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
94 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
95 [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
96 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
97 [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
98 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
99 [LC_IPV6_OPT][LE_NONE][LF_TCP] =
100 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
101 [LC_IPV6_OPT][LE_NONE][LF_UDP] =
102 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
103 [LC_IPV6_OPT][LE_NONE][LF_GRE] =
104 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
105 [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
106 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
107 [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
108 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
109 [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
110 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
115 static __rte_always_inline uint64_t
116 octeontx_pktmbuf_detach(struct rte_mbuf *m)
118 struct rte_mempool *mp = m->pool;
119 uint32_t mbuf_size, buf_len;
124 /* Update refcount of direct mbuf */
125 md = rte_mbuf_from_indirect(m);
126 refcount = rte_mbuf_refcnt_update(md, -1);
128 priv_size = rte_pktmbuf_priv_size(mp);
129 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
130 buf_len = rte_pktmbuf_data_room_size(mp);
132 m->priv_size = priv_size;
133 m->buf_addr = (char *)m + mbuf_size;
134 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
135 m->buf_len = (uint16_t)buf_len;
136 rte_pktmbuf_reset_headroom(m);
142 /* Now indirect mbuf is safe to free */
146 rte_mbuf_refcnt_set(md, 1);
157 static __rte_always_inline uint64_t
158 octeontx_prefree_seg(struct rte_mbuf *m)
160 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
161 if (!RTE_MBUF_DIRECT(m))
162 return octeontx_pktmbuf_detach(m);
167 } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
168 if (!RTE_MBUF_DIRECT(m))
169 return octeontx_pktmbuf_detach(m);
171 rte_mbuf_refcnt_set(m, 1);
177 /* Mbuf is having refcount more than 1 so need not to be freed */
181 static __rte_always_inline uint16_t
182 __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
185 uint16_t gaura_id, nb_desc = 0;
187 /* Setup PKO_SEND_HDR_S */
188 cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
189 cmd_buf[nb_desc++] = 0x0;
191 /* SEND_HDR[DF] bit controls if buffer is to be freed or
192 * not, as SG_DESC[I] and SEND_HDR[II] are clear.
194 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
195 cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
198 /* Mark mempool object as "put" since it is freed by PKO */
199 if (!(cmd_buf[0] & (1ULL << 58)))
200 __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
202 /* Get the gaura Id */
203 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
204 tx_pkt->pool->pool_id);
206 /* Setup PKO_SEND_BUFLINK_S */
207 cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
208 PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
209 PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
211 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
216 static __rte_always_inline uint16_t
217 __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
220 uint16_t nb_segs, nb_desc = 0;
221 uint16_t gaura_id, len = 0;
222 struct rte_mbuf *m_next = NULL;
224 nb_segs = tx_pkt->nb_segs;
225 /* Setup PKO_SEND_HDR_S */
226 cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
227 cmd_buf[nb_desc++] = 0x0;
230 m_next = tx_pkt->next;
231 /* To handle case where mbufs belong to diff pools, like
234 gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
235 tx_pkt->pool->pool_id);
237 /* Setup PKO_SEND_GATHER_S */
238 cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC |
239 PKO_SEND_GATHER_LDTYPE(0x1ull) |
240 PKO_SEND_GATHER_GAUAR((long)gaura_id) |
243 /* SG_DESC[I] bit controls if buffer is to be freed or
244 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
246 if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
248 (octeontx_prefree_seg(tx_pkt) << 57);
251 /* Mark mempool object as "put" since it is freed by
254 if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
256 __mempool_check_cookies(tx_pkt->pool,
257 (void **)&tx_pkt, 1, 0);
261 cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
264 len += tx_pkt->data_len;
271 static __rte_always_inline uint16_t
272 __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
273 uint16_t nb_pkts, uint64_t *cmd_buf,
274 const uint16_t flags)
276 struct octeontx_txq *txq = tx_queue;
277 octeontx_dq_t *dq = &txq->dq;
278 uint16_t count = 0, nb_desc;
281 while (count < nb_pkts) {
282 if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
285 if (flags & OCCTX_TX_MULTI_SEG_F) {
286 nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
289 nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
293 octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
302 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
304 #define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F
305 #define MULT_F OCCTX_TX_MULTI_SEG_F
306 /* [NOFF] [MULTI_SEG] */
307 #define OCCTX_TX_FASTPATH_MODES \
308 T(no_offload, 0, 0, 4, OCCTX_TX_OFFLOAD_NONE) \
309 T(mseg, 0, 1, 14, MULT_F) \
310 T(noff, 1, 0, 4, NOFF_F) \
311 T(noff_mseg, 1, 1, 14, NOFF_F | MULT_F)
313 #endif /* __OCTEONTX_RXTX_H__ */