sched: move grinder configuration
[dpdk.git] / lib / gro / gro_tcp4.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8
9 #include "gro_tcp4.h"
10
11 void *
12 gro_tcp4_tbl_create(uint16_t socket_id,
13                 uint16_t max_flow_num,
14                 uint16_t max_item_per_flow)
15 {
16         struct gro_tcp4_tbl *tbl;
17         size_t size;
18         uint32_t entries_num, i;
19
20         entries_num = max_flow_num * max_item_per_flow;
21         entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM);
22
23         if (entries_num == 0)
24                 return NULL;
25
26         tbl = rte_zmalloc_socket(__func__,
27                         sizeof(struct gro_tcp4_tbl),
28                         RTE_CACHE_LINE_SIZE,
29                         socket_id);
30         if (tbl == NULL)
31                 return NULL;
32
33         size = sizeof(struct gro_tcp4_item) * entries_num;
34         tbl->items = rte_zmalloc_socket(__func__,
35                         size,
36                         RTE_CACHE_LINE_SIZE,
37                         socket_id);
38         if (tbl->items == NULL) {
39                 rte_free(tbl);
40                 return NULL;
41         }
42         tbl->max_item_num = entries_num;
43
44         size = sizeof(struct gro_tcp4_flow) * entries_num;
45         tbl->flows = rte_zmalloc_socket(__func__,
46                         size,
47                         RTE_CACHE_LINE_SIZE,
48                         socket_id);
49         if (tbl->flows == NULL) {
50                 rte_free(tbl->items);
51                 rte_free(tbl);
52                 return NULL;
53         }
54         /* INVALID_ARRAY_INDEX indicates an empty flow */
55         for (i = 0; i < entries_num; i++)
56                 tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
57         tbl->max_flow_num = entries_num;
58
59         return tbl;
60 }
61
62 void
63 gro_tcp4_tbl_destroy(void *tbl)
64 {
65         struct gro_tcp4_tbl *tcp_tbl = tbl;
66
67         if (tcp_tbl) {
68                 rte_free(tcp_tbl->items);
69                 rte_free(tcp_tbl->flows);
70         }
71         rte_free(tcp_tbl);
72 }
73
74 static inline uint32_t
75 find_an_empty_item(struct gro_tcp4_tbl *tbl)
76 {
77         uint32_t i;
78         uint32_t max_item_num = tbl->max_item_num;
79
80         for (i = 0; i < max_item_num; i++)
81                 if (tbl->items[i].firstseg == NULL)
82                         return i;
83         return INVALID_ARRAY_INDEX;
84 }
85
86 static inline uint32_t
87 find_an_empty_flow(struct gro_tcp4_tbl *tbl)
88 {
89         uint32_t i;
90         uint32_t max_flow_num = tbl->max_flow_num;
91
92         for (i = 0; i < max_flow_num; i++)
93                 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
94                         return i;
95         return INVALID_ARRAY_INDEX;
96 }
97
98 static inline uint32_t
99 insert_new_item(struct gro_tcp4_tbl *tbl,
100                 struct rte_mbuf *pkt,
101                 uint64_t start_time,
102                 uint32_t prev_idx,
103                 uint32_t sent_seq,
104                 uint16_t ip_id,
105                 uint8_t is_atomic)
106 {
107         uint32_t item_idx;
108
109         item_idx = find_an_empty_item(tbl);
110         if (item_idx == INVALID_ARRAY_INDEX)
111                 return INVALID_ARRAY_INDEX;
112
113         tbl->items[item_idx].firstseg = pkt;
114         tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
115         tbl->items[item_idx].start_time = start_time;
116         tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
117         tbl->items[item_idx].sent_seq = sent_seq;
118         tbl->items[item_idx].ip_id = ip_id;
119         tbl->items[item_idx].nb_merged = 1;
120         tbl->items[item_idx].is_atomic = is_atomic;
121         tbl->item_num++;
122
123         /* if the previous packet exists, chain them together. */
124         if (prev_idx != INVALID_ARRAY_INDEX) {
125                 tbl->items[item_idx].next_pkt_idx =
126                         tbl->items[prev_idx].next_pkt_idx;
127                 tbl->items[prev_idx].next_pkt_idx = item_idx;
128         }
129
130         return item_idx;
131 }
132
133 static inline uint32_t
134 delete_item(struct gro_tcp4_tbl *tbl, uint32_t item_idx,
135                 uint32_t prev_item_idx)
136 {
137         uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;
138
139         /* NULL indicates an empty item */
140         tbl->items[item_idx].firstseg = NULL;
141         tbl->item_num--;
142         if (prev_item_idx != INVALID_ARRAY_INDEX)
143                 tbl->items[prev_item_idx].next_pkt_idx = next_idx;
144
145         return next_idx;
146 }
147
148 static inline uint32_t
149 insert_new_flow(struct gro_tcp4_tbl *tbl,
150                 struct tcp4_flow_key *src,
151                 uint32_t item_idx)
152 {
153         struct tcp4_flow_key *dst;
154         uint32_t flow_idx;
155
156         flow_idx = find_an_empty_flow(tbl);
157         if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
158                 return INVALID_ARRAY_INDEX;
159
160         dst = &(tbl->flows[flow_idx].key);
161
162         rte_ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr));
163         rte_ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr));
164         dst->ip_src_addr = src->ip_src_addr;
165         dst->ip_dst_addr = src->ip_dst_addr;
166         dst->recv_ack = src->recv_ack;
167         dst->src_port = src->src_port;
168         dst->dst_port = src->dst_port;
169
170         tbl->flows[flow_idx].start_index = item_idx;
171         tbl->flow_num++;
172
173         return flow_idx;
174 }
175
176 /*
177  * update the packet length for the flushed packet.
178  */
179 static inline void
180 update_header(struct gro_tcp4_item *item)
181 {
182         struct rte_ipv4_hdr *ipv4_hdr;
183         struct rte_mbuf *pkt = item->firstseg;
184
185         ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
186                         pkt->l2_len);
187         ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
188                         pkt->l2_len);
189 }
190
191 int32_t
192 gro_tcp4_reassemble(struct rte_mbuf *pkt,
193                 struct gro_tcp4_tbl *tbl,
194                 uint64_t start_time)
195 {
196         struct rte_ether_hdr *eth_hdr;
197         struct rte_ipv4_hdr *ipv4_hdr;
198         struct rte_tcp_hdr *tcp_hdr;
199         uint32_t sent_seq;
200         int32_t tcp_dl;
201         uint16_t ip_id, hdr_len, frag_off;
202         uint8_t is_atomic;
203
204         struct tcp4_flow_key key;
205         uint32_t cur_idx, prev_idx, item_idx;
206         uint32_t i, max_flow_num, remaining_flow_num;
207         int cmp;
208         uint8_t find;
209
210         /*
211          * Don't process the packet whose TCP header length is greater
212          * than 60 bytes or less than 20 bytes.
213          */
214         if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))
215                 return -1;
216
217         eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
218         ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
219         tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
220         hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
221
222         /*
223          * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
224          * or CWR set.
225          */
226         if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
227                 return -1;
228         /*
229          * Don't process the packet whose payload length is less than or
230          * equal to 0.
231          */
232         tcp_dl = pkt->pkt_len - hdr_len;
233         if (tcp_dl <= 0)
234                 return -1;
235
236         /*
237          * Save IPv4 ID for the packet whose DF bit is 0. For the packet
238          * whose DF bit is 1, IPv4 ID is ignored.
239          */
240         frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
241         is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
242         ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
243         sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
244
245         rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.eth_saddr));
246         rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.eth_daddr));
247         key.ip_src_addr = ipv4_hdr->src_addr;
248         key.ip_dst_addr = ipv4_hdr->dst_addr;
249         key.src_port = tcp_hdr->src_port;
250         key.dst_port = tcp_hdr->dst_port;
251         key.recv_ack = tcp_hdr->recv_ack;
252
253         /* Search for a matched flow. */
254         max_flow_num = tbl->max_flow_num;
255         remaining_flow_num = tbl->flow_num;
256         find = 0;
257         for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
258                 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
259                         if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
260                                 find = 1;
261                                 break;
262                         }
263                         remaining_flow_num--;
264                 }
265         }
266
267         /*
268          * Fail to find a matched flow. Insert a new flow and store the
269          * packet into the flow.
270          */
271         if (find == 0) {
272                 item_idx = insert_new_item(tbl, pkt, start_time,
273                                 INVALID_ARRAY_INDEX, sent_seq, ip_id,
274                                 is_atomic);
275                 if (item_idx == INVALID_ARRAY_INDEX)
276                         return -1;
277                 if (insert_new_flow(tbl, &key, item_idx) ==
278                                 INVALID_ARRAY_INDEX) {
279                         /*
280                          * Fail to insert a new flow, so delete the
281                          * stored packet.
282                          */
283                         delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
284                         return -1;
285                 }
286                 return 0;
287         }
288
289         /*
290          * Check all packets in the flow and try to find a neighbor for
291          * the input packet.
292          */
293         cur_idx = tbl->flows[i].start_index;
294         prev_idx = cur_idx;
295         do {
296                 cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
297                                 sent_seq, ip_id, pkt->l4_len, tcp_dl, 0,
298                                 is_atomic);
299                 if (cmp) {
300                         if (merge_two_tcp4_packets(&(tbl->items[cur_idx]),
301                                                 pkt, cmp, sent_seq, ip_id, 0))
302                                 return 1;
303                         /*
304                          * Fail to merge the two packets, as the packet
305                          * length is greater than the max value. Store
306                          * the packet into the flow.
307                          */
308                         if (insert_new_item(tbl, pkt, start_time, prev_idx,
309                                                 sent_seq, ip_id, is_atomic) ==
310                                         INVALID_ARRAY_INDEX)
311                                 return -1;
312                         return 0;
313                 }
314                 prev_idx = cur_idx;
315                 cur_idx = tbl->items[cur_idx].next_pkt_idx;
316         } while (cur_idx != INVALID_ARRAY_INDEX);
317
318         /* Fail to find a neighbor, so store the packet into the flow. */
319         if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
320                                 ip_id, is_atomic) == INVALID_ARRAY_INDEX)
321                 return -1;
322
323         return 0;
324 }
325
326 uint16_t
327 gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
328                 uint64_t flush_timestamp,
329                 struct rte_mbuf **out,
330                 uint16_t nb_out)
331 {
332         uint16_t k = 0;
333         uint32_t i, j;
334         uint32_t max_flow_num = tbl->max_flow_num;
335
336         for (i = 0; i < max_flow_num; i++) {
337                 if (unlikely(tbl->flow_num == 0))
338                         return k;
339
340                 j = tbl->flows[i].start_index;
341                 while (j != INVALID_ARRAY_INDEX) {
342                         if (tbl->items[j].start_time <= flush_timestamp) {
343                                 out[k++] = tbl->items[j].firstseg;
344                                 if (tbl->items[j].nb_merged > 1)
345                                         update_header(&(tbl->items[j]));
346                                 /*
347                                  * Delete the packet and get the next
348                                  * packet in the flow.
349                                  */
350                                 j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
351                                 tbl->flows[i].start_index = j;
352                                 if (j == INVALID_ARRAY_INDEX)
353                                         tbl->flow_num--;
354
355                                 if (unlikely(k == nb_out))
356                                         return k;
357                         } else
358                                 /*
359                                  * The left packets in this flow won't be
360                                  * timeout. Go to check other flows.
361                                  */
362                                 break;
363                 }
364         }
365         return k;
366 }
367
368 uint32_t
369 gro_tcp4_tbl_pkt_count(void *tbl)
370 {
371         struct gro_tcp4_tbl *gro_tbl = tbl;
372
373         if (gro_tbl)
374                 return gro_tbl->item_num;
375
376         return 0;
377 }