1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 #include "gro_vxlan_tcp4.h"
15 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
16 uint16_t max_flow_num,
17 uint16_t max_item_per_flow);
18 typedef void (*gro_tbl_destroy_fn)(void *tbl);
19 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
21 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
22 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
23 gro_udp4_tbl_create, NULL};
24 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
25 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
28 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
29 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
30 gro_udp4_tbl_pkt_count,
33 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
34 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
36 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
37 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP))
39 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
40 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
41 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
42 RTE_PTYPE_TUNNEL_VXLAN) && \
43 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
44 RTE_PTYPE_INNER_L4_TCP) && \
45 (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
46 (RTE_PTYPE_INNER_L3_IPV4 | \
47 RTE_PTYPE_INNER_L3_IPV4_EXT | \
48 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
52 * GRO context structure. It keeps the table structures, which are
53 * used to merge packets, for different GRO types. Before using
54 * rte_gro_reassemble(), applications need to create the GRO context
58 /* GRO types to perform */
60 /* reassembly tables */
61 void *tbls[RTE_GRO_TYPE_MAX_NUM];
65 rte_gro_ctx_create(const struct rte_gro_param *param)
67 struct gro_ctx *gro_ctx;
68 gro_tbl_create_fn create_tbl_fn;
69 uint64_t gro_type_flag = 0;
70 uint64_t gro_types = 0;
73 gro_ctx = rte_zmalloc_socket(__func__,
74 sizeof(struct gro_ctx),
80 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
81 gro_type_flag = 1ULL << i;
82 if ((param->gro_types & gro_type_flag) == 0)
85 create_tbl_fn = tbl_create_fn[i];
86 if (create_tbl_fn == NULL)
89 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
91 param->max_item_per_flow);
92 if (gro_ctx->tbls[i] == NULL) {
93 /* destroy all created tables */
94 gro_ctx->gro_types = gro_types;
95 rte_gro_ctx_destroy(gro_ctx);
98 gro_types |= gro_type_flag;
100 gro_ctx->gro_types = param->gro_types;
106 rte_gro_ctx_destroy(void *ctx)
108 gro_tbl_destroy_fn destroy_tbl_fn;
109 struct gro_ctx *gro_ctx = ctx;
110 uint64_t gro_type_flag;
113 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
114 gro_type_flag = 1ULL << i;
115 if ((gro_ctx->gro_types & gro_type_flag) == 0)
117 destroy_tbl_fn = tbl_destroy_fn[i];
119 destroy_tbl_fn(gro_ctx->tbls[i]);
125 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
127 const struct rte_gro_param *param)
129 /* allocate a reassembly table for TCP/IPv4 GRO */
130 struct gro_tcp4_tbl tcp_tbl;
131 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
132 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
134 /* allocate a reassembly table for UDP/IPv4 GRO */
135 struct gro_udp4_tbl udp_tbl;
136 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
137 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
139 /* Allocate a reassembly table for VXLAN TCP GRO */
140 struct gro_vxlan_tcp4_tbl vxlan_tbl;
141 struct gro_vxlan_tcp4_flow vxlan_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
142 struct gro_vxlan_tcp4_item vxlan_items[RTE_GRO_MAX_BURST_ITEM_NUM]
145 struct rte_mbuf *unprocess_pkts[nb_pkts];
148 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
149 uint8_t do_tcp4_gro = 0, do_vxlan_gro = 0, do_udp4_gro = 0;
151 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
153 RTE_GRO_UDP_IPV4)) == 0))
156 /* Get the maximum number of packets */
157 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
158 param->max_item_per_flow));
159 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
161 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
162 for (i = 0; i < item_num; i++)
163 vxlan_flows[i].start_index = INVALID_ARRAY_INDEX;
165 vxlan_tbl.flows = vxlan_flows;
166 vxlan_tbl.items = vxlan_items;
167 vxlan_tbl.flow_num = 0;
168 vxlan_tbl.item_num = 0;
169 vxlan_tbl.max_flow_num = item_num;
170 vxlan_tbl.max_item_num = item_num;
174 if (param->gro_types & RTE_GRO_TCP_IPV4) {
175 for (i = 0; i < item_num; i++)
176 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
178 tcp_tbl.flows = tcp_flows;
179 tcp_tbl.items = tcp_items;
180 tcp_tbl.flow_num = 0;
181 tcp_tbl.item_num = 0;
182 tcp_tbl.max_flow_num = item_num;
183 tcp_tbl.max_item_num = item_num;
187 if (param->gro_types & RTE_GRO_UDP_IPV4) {
188 for (i = 0; i < item_num; i++)
189 udp_flows[i].start_index = INVALID_ARRAY_INDEX;
191 udp_tbl.flows = udp_flows;
192 udp_tbl.items = udp_items;
193 udp_tbl.flow_num = 0;
194 udp_tbl.item_num = 0;
195 udp_tbl.max_flow_num = item_num;
196 udp_tbl.max_item_num = item_num;
201 for (i = 0; i < nb_pkts; i++) {
203 * The timestamp is ignored, since all packets
204 * will be flushed from the tables.
206 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
208 ret = gro_vxlan_tcp4_reassemble(pkts[i],
211 /* Merge successfully */
214 unprocess_pkts[unprocess_num++] = pkts[i];
215 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
217 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
219 /* merge successfully */
222 unprocess_pkts[unprocess_num++] = pkts[i];
223 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
225 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
227 /* merge successfully */
230 unprocess_pkts[unprocess_num++] = pkts[i];
232 unprocess_pkts[unprocess_num++] = pkts[i];
235 if ((nb_after_gro < nb_pkts)
236 || (unprocess_num < nb_pkts)) {
238 /* Flush all packets from the tables */
240 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tbl,
245 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
246 &pkts[i], nb_pkts - i);
250 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
251 &pkts[i], nb_pkts - i);
253 /* Copy unprocessed packets */
254 if (unprocess_num > 0) {
255 memcpy(&pkts[i], unprocess_pkts,
256 sizeof(struct rte_mbuf *) *
259 nb_after_gro = i + unprocess_num;
266 rte_gro_reassemble(struct rte_mbuf **pkts,
270 struct rte_mbuf *unprocess_pkts[nb_pkts];
271 struct gro_ctx *gro_ctx = ctx;
272 void *tcp_tbl, *udp_tbl, *vxlan_tbl;
273 uint64_t current_time;
274 uint16_t i, unprocess_num = 0;
275 uint8_t do_tcp4_gro, do_vxlan_gro, do_udp4_gro;
277 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
279 RTE_GRO_UDP_IPV4)) == 0))
282 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
283 vxlan_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
284 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
286 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
288 do_vxlan_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
289 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
290 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
293 current_time = rte_rdtsc();
295 for (i = 0; i < nb_pkts; i++) {
296 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
298 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tbl,
300 unprocess_pkts[unprocess_num++] = pkts[i];
301 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
303 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
305 unprocess_pkts[unprocess_num++] = pkts[i];
306 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
308 if (gro_udp4_reassemble(pkts[i], udp_tbl,
310 unprocess_pkts[unprocess_num++] = pkts[i];
312 unprocess_pkts[unprocess_num++] = pkts[i];
314 if (unprocess_num > 0) {
315 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
319 return unprocess_num;
323 rte_gro_timeout_flush(void *ctx,
324 uint64_t timeout_cycles,
326 struct rte_mbuf **out,
329 struct gro_ctx *gro_ctx = ctx;
330 uint64_t flush_timestamp;
332 uint16_t left_nb_out = max_nb_out;
334 gro_types = gro_types & gro_ctx->gro_types;
335 flush_timestamp = rte_rdtsc() - timeout_cycles;
337 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
338 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
339 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
340 flush_timestamp, out, left_nb_out);
341 left_nb_out = max_nb_out - num;
344 /* If no available space in 'out', stop flushing. */
345 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
346 num += gro_tcp4_tbl_timeout_flush(
347 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
349 &out[num], left_nb_out);
350 left_nb_out = max_nb_out - num;
353 /* If no available space in 'out', stop flushing. */
354 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
355 num += gro_udp4_tbl_timeout_flush(
356 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
358 &out[num], left_nb_out);
365 rte_gro_get_pkt_count(void *ctx)
367 struct gro_ctx *gro_ctx = ctx;
368 gro_tbl_pkt_count_fn pkt_count_fn;
369 uint64_t gro_types = gro_ctx->gro_types, flag;
370 uint64_t item_num = 0;
373 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
375 if ((gro_types & flag) == 0)
379 pkt_count_fn = tbl_pkt_count_fn[i];
381 item_num += pkt_count_fn(gro_ctx->tbls[i]);