1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
12 #include "gro_vxlan_tcp4.h"
14 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
15 uint16_t max_flow_num,
16 uint16_t max_item_per_flow);
17 typedef void (*gro_tbl_destroy_fn)(void *tbl);
18 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
20 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
21 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create, NULL};
22 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
23 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
25 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
26 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
29 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
30 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP))
32 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
33 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
34 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
35 RTE_PTYPE_TUNNEL_VXLAN) && \
36 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
37 RTE_PTYPE_INNER_L4_TCP) && \
38 (((ptype & RTE_PTYPE_INNER_L3_MASK) & \
39 (RTE_PTYPE_INNER_L3_IPV4 | \
40 RTE_PTYPE_INNER_L3_IPV4_EXT | \
41 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)) != 0))
44 * GRO context structure. It keeps the table structures, which are
45 * used to merge packets, for different GRO types. Before using
46 * rte_gro_reassemble(), applications need to create the GRO context
50 /* GRO types to perform */
52 /* reassembly tables */
53 void *tbls[RTE_GRO_TYPE_MAX_NUM];
57 rte_gro_ctx_create(const struct rte_gro_param *param)
59 struct gro_ctx *gro_ctx;
60 gro_tbl_create_fn create_tbl_fn;
61 uint64_t gro_type_flag = 0;
62 uint64_t gro_types = 0;
65 gro_ctx = rte_zmalloc_socket(__func__,
66 sizeof(struct gro_ctx),
72 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
73 gro_type_flag = 1ULL << i;
74 if ((param->gro_types & gro_type_flag) == 0)
77 create_tbl_fn = tbl_create_fn[i];
78 if (create_tbl_fn == NULL)
81 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
83 param->max_item_per_flow);
84 if (gro_ctx->tbls[i] == NULL) {
85 /* destroy all created tables */
86 gro_ctx->gro_types = gro_types;
87 rte_gro_ctx_destroy(gro_ctx);
90 gro_types |= gro_type_flag;
92 gro_ctx->gro_types = param->gro_types;
98 rte_gro_ctx_destroy(void *ctx)
100 gro_tbl_destroy_fn destroy_tbl_fn;
101 struct gro_ctx *gro_ctx = ctx;
102 uint64_t gro_type_flag;
105 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
106 gro_type_flag = 1ULL << i;
107 if ((gro_ctx->gro_types & gro_type_flag) == 0)
109 destroy_tbl_fn = tbl_destroy_fn[i];
111 destroy_tbl_fn(gro_ctx->tbls[i]);
117 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
119 const struct rte_gro_param *param)
121 /* allocate a reassembly table for TCP/IPv4 GRO */
122 struct gro_tcp4_tbl tcp_tbl;
123 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
124 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
126 /* Allocate a reassembly table for VXLAN GRO */
127 struct gro_vxlan_tcp4_tbl vxlan_tbl;
128 struct gro_vxlan_tcp4_flow vxlan_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
129 struct gro_vxlan_tcp4_item vxlan_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {
132 struct rte_mbuf *unprocess_pkts[nb_pkts];
135 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
136 uint8_t do_tcp4_gro = 0, do_vxlan_gro = 0;
138 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
139 RTE_GRO_TCP_IPV4)) == 0))
142 /* Get the maximum number of packets */
143 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
144 param->max_item_per_flow));
145 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
147 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
148 for (i = 0; i < item_num; i++)
149 vxlan_flows[i].start_index = INVALID_ARRAY_INDEX;
151 vxlan_tbl.flows = vxlan_flows;
152 vxlan_tbl.items = vxlan_items;
153 vxlan_tbl.flow_num = 0;
154 vxlan_tbl.item_num = 0;
155 vxlan_tbl.max_flow_num = item_num;
156 vxlan_tbl.max_item_num = item_num;
160 if (param->gro_types & RTE_GRO_TCP_IPV4) {
161 for (i = 0; i < item_num; i++)
162 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
164 tcp_tbl.flows = tcp_flows;
165 tcp_tbl.items = tcp_items;
166 tcp_tbl.flow_num = 0;
167 tcp_tbl.item_num = 0;
168 tcp_tbl.max_flow_num = item_num;
169 tcp_tbl.max_item_num = item_num;
173 for (i = 0; i < nb_pkts; i++) {
175 * The timestamp is ignored, since all packets
176 * will be flushed from the tables.
178 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
180 ret = gro_vxlan_tcp4_reassemble(pkts[i], &vxlan_tbl, 0);
182 /* Merge successfully */
185 unprocess_pkts[unprocess_num++] = pkts[i];
186 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
188 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
190 /* merge successfully */
193 unprocess_pkts[unprocess_num++] = pkts[i];
195 unprocess_pkts[unprocess_num++] = pkts[i];
198 if (nb_after_gro < nb_pkts) {
200 /* Flush all packets from the tables */
202 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tbl,
206 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
207 &pkts[i], nb_pkts - i);
209 /* Copy unprocessed packets */
210 if (unprocess_num > 0) {
211 memcpy(&pkts[i], unprocess_pkts,
212 sizeof(struct rte_mbuf *) *
221 rte_gro_reassemble(struct rte_mbuf **pkts,
225 struct rte_mbuf *unprocess_pkts[nb_pkts];
226 struct gro_ctx *gro_ctx = ctx;
227 void *tcp_tbl, *vxlan_tbl;
228 uint64_t current_time;
229 uint16_t i, unprocess_num = 0;
230 uint8_t do_tcp4_gro, do_vxlan_gro;
232 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
233 RTE_GRO_TCP_IPV4)) == 0))
236 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
237 vxlan_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
239 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
241 do_vxlan_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
242 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
244 current_time = rte_rdtsc();
246 for (i = 0; i < nb_pkts; i++) {
247 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
249 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tbl,
251 unprocess_pkts[unprocess_num++] = pkts[i];
252 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
254 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
256 unprocess_pkts[unprocess_num++] = pkts[i];
258 unprocess_pkts[unprocess_num++] = pkts[i];
260 if (unprocess_num > 0) {
261 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
265 return unprocess_num;
269 rte_gro_timeout_flush(void *ctx,
270 uint64_t timeout_cycles,
272 struct rte_mbuf **out,
275 struct gro_ctx *gro_ctx = ctx;
276 uint64_t flush_timestamp;
279 gro_types = gro_types & gro_ctx->gro_types;
280 flush_timestamp = rte_rdtsc() - timeout_cycles;
282 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
283 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
284 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
285 flush_timestamp, out, max_nb_out);
289 /* If no available space in 'out', stop flushing. */
290 if ((gro_types & RTE_GRO_TCP_IPV4) && max_nb_out > 0) {
291 num += gro_tcp4_tbl_timeout_flush(
292 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
294 &out[num], max_nb_out);
301 rte_gro_get_pkt_count(void *ctx)
303 struct gro_ctx *gro_ctx = ctx;
304 gro_tbl_pkt_count_fn pkt_count_fn;
305 uint64_t gro_types = gro_ctx->gro_types, flag;
306 uint64_t item_num = 0;
309 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
311 if ((gro_types & flag) == 0)
315 pkt_count_fn = tbl_pkt_count_fn[i];
317 item_num += pkt_count_fn(gro_ctx->tbls[i]);