1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
14 uint16_t max_flow_num,
15 uint16_t max_item_per_flow);
16 typedef void (*gro_tbl_destroy_fn)(void *tbl);
17 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
19 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
20 gro_tcp4_tbl_create, NULL};
21 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
22 gro_tcp4_tbl_destroy, NULL};
23 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
24 gro_tcp4_tbl_pkt_count, NULL};
27 * GRO context structure, which is used to merge packets. It keeps
28 * many reassembly tables of desired GRO types. Applications need to
29 * create GRO context objects before using rte_gro_reassemble to
33 /* GRO types to perform */
35 /* reassembly tables */
36 void *tbls[RTE_GRO_TYPE_MAX_NUM];
40 rte_gro_ctx_create(const struct rte_gro_param *param)
42 struct gro_ctx *gro_ctx;
43 gro_tbl_create_fn create_tbl_fn;
44 uint64_t gro_type_flag = 0;
45 uint64_t gro_types = 0;
48 gro_ctx = rte_zmalloc_socket(__func__,
49 sizeof(struct gro_ctx),
55 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
56 gro_type_flag = 1ULL << i;
57 if ((param->gro_types & gro_type_flag) == 0)
60 create_tbl_fn = tbl_create_fn[i];
61 if (create_tbl_fn == NULL)
64 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
66 param->max_item_per_flow);
67 if (gro_ctx->tbls[i] == NULL) {
68 /* destroy all created tables */
69 gro_ctx->gro_types = gro_types;
70 rte_gro_ctx_destroy(gro_ctx);
73 gro_types |= gro_type_flag;
75 gro_ctx->gro_types = param->gro_types;
81 rte_gro_ctx_destroy(void *ctx)
83 gro_tbl_destroy_fn destroy_tbl_fn;
84 struct gro_ctx *gro_ctx = ctx;
85 uint64_t gro_type_flag;
90 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
91 gro_type_flag = 1ULL << i;
92 if ((gro_ctx->gro_types & gro_type_flag) == 0)
94 destroy_tbl_fn = tbl_destroy_fn[i];
96 destroy_tbl_fn(gro_ctx->tbls[i]);
102 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
104 const struct rte_gro_param *param)
107 uint16_t nb_after_gro = nb_pkts;
110 /* allocate a reassembly table for TCP/IPv4 GRO */
111 struct gro_tcp4_tbl tcp_tbl;
112 struct gro_tcp4_key tcp_keys[RTE_GRO_MAX_BURST_ITEM_NUM];
113 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
115 struct rte_mbuf *unprocess_pkts[nb_pkts];
116 uint16_t unprocess_num = 0;
118 uint64_t current_time;
120 if ((param->gro_types & RTE_GRO_TCP_IPV4) == 0)
123 /* get the actual number of packets */
124 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
125 param->max_item_per_flow));
126 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
128 for (i = 0; i < item_num; i++)
129 tcp_keys[i].start_index = INVALID_ARRAY_INDEX;
131 tcp_tbl.keys = tcp_keys;
132 tcp_tbl.items = tcp_items;
134 tcp_tbl.item_num = 0;
135 tcp_tbl.max_key_num = item_num;
136 tcp_tbl.max_item_num = item_num;
138 current_time = rte_rdtsc();
140 for (i = 0; i < nb_pkts; i++) {
141 if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
142 RTE_PTYPE_L4_TCP)) ==
143 (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
144 ret = gro_tcp4_reassemble(pkts[i],
148 /* merge successfully */
151 unprocess_pkts[unprocess_num++] =
155 unprocess_pkts[unprocess_num++] = pkts[i];
158 /* re-arrange GROed packets */
159 if (nb_after_gro < nb_pkts) {
160 i = gro_tcp4_tbl_timeout_flush(&tcp_tbl, current_time,
162 if (unprocess_num > 0) {
163 memcpy(&pkts[i], unprocess_pkts,
164 sizeof(struct rte_mbuf *) *
173 rte_gro_reassemble(struct rte_mbuf **pkts,
177 uint16_t i, unprocess_num = 0;
178 struct rte_mbuf *unprocess_pkts[nb_pkts];
179 struct gro_ctx *gro_ctx = ctx;
180 uint64_t current_time;
182 if ((gro_ctx->gro_types & RTE_GRO_TCP_IPV4) == 0)
185 current_time = rte_rdtsc();
187 for (i = 0; i < nb_pkts; i++) {
188 if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
189 RTE_PTYPE_L4_TCP)) ==
190 (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
191 if (gro_tcp4_reassemble(pkts[i],
193 [RTE_GRO_TCP_IPV4_INDEX],
195 unprocess_pkts[unprocess_num++] = pkts[i];
197 unprocess_pkts[unprocess_num++] = pkts[i];
199 if (unprocess_num > 0) {
200 memcpy(pkts, unprocess_pkts,
201 sizeof(struct rte_mbuf *) *
205 return unprocess_num;
209 rte_gro_timeout_flush(void *ctx,
210 uint64_t timeout_cycles,
212 struct rte_mbuf **out,
215 struct gro_ctx *gro_ctx = ctx;
216 uint64_t flush_timestamp;
218 gro_types = gro_types & gro_ctx->gro_types;
219 flush_timestamp = rte_rdtsc() - timeout_cycles;
221 if (gro_types & RTE_GRO_TCP_IPV4) {
222 return gro_tcp4_tbl_timeout_flush(
223 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
231 rte_gro_get_pkt_count(void *ctx)
233 struct gro_ctx *gro_ctx = ctx;
234 gro_tbl_pkt_count_fn pkt_count_fn;
235 uint64_t item_num = 0;
236 uint64_t gro_type_flag;
239 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
240 gro_type_flag = 1ULL << i;
241 if ((gro_ctx->gro_types & gro_type_flag) == 0)
244 pkt_count_fn = tbl_pkt_count_fn[i];
245 if (pkt_count_fn == NULL)
247 item_num += pkt_count_fn(gro_ctx->tbls[i]);