net/bnxt: fix mark id update to mbuf
[dpdk.git] / lib / librte_node / ip4_rewrite.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_debug.h>
6 #include <rte_ethdev.h>
7 #include <rte_ether.h>
8 #include <rte_graph.h>
9 #include <rte_graph_worker.h>
10 #include <rte_ip.h>
11 #include <rte_malloc.h>
12 #include <rte_mbuf.h>
13 #include <rte_tcp.h>
14 #include <rte_udp.h>
15 #include <rte_vect.h>
16
17 #include "rte_node_ip4_api.h"
18
19 #include "ip4_rewrite_priv.h"
20 #include "node_private.h"
21
22 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
23
24 static uint16_t
25 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
26                          void **objs, uint16_t nb_objs)
27 {
28         struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
29         struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
30         uint16_t next0, next1, next2, next3, next_index;
31         struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
32         uint16_t n_left_from, held = 0, last_spec = 0;
33         void *d0, *d1, *d2, *d3;
34         void **to_next, **from;
35         rte_xmm_t priv01;
36         rte_xmm_t priv23;
37         int i;
38
39         /* Speculative next as last next */
40         next_index = *(uint16_t *)node->ctx;
41         rte_prefetch0(nh);
42
43         pkts = (struct rte_mbuf **)objs;
44         from = objs;
45         n_left_from = nb_objs;
46
47         for (i = 0; i < 4 && i < n_left_from; i++)
48                 rte_prefetch0(pkts[i]);
49
50         /* Get stream for the speculated next node */
51         to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
52         /* Update Ethernet header of pkts */
53         while (n_left_from >= 4) {
54                 if (likely(n_left_from > 7)) {
55                         /* Prefetch only next-mbuf struct and priv area.
56                          * Data need not be prefetched as we only write.
57                          */
58                         rte_prefetch0(pkts[4]);
59                         rte_prefetch0(pkts[5]);
60                         rte_prefetch0(pkts[6]);
61                         rte_prefetch0(pkts[7]);
62                 }
63
64                 mbuf0 = pkts[0];
65                 mbuf1 = pkts[1];
66                 mbuf2 = pkts[2];
67                 mbuf3 = pkts[3];
68
69                 pkts += 4;
70                 n_left_from -= 4;
71                 priv01.u64[0] = node_mbuf_priv1(mbuf0)->u;
72                 priv01.u64[1] = node_mbuf_priv1(mbuf1)->u;
73                 priv23.u64[0] = node_mbuf_priv1(mbuf2)->u;
74                 priv23.u64[1] = node_mbuf_priv1(mbuf3)->u;
75
76                 /* Increment checksum by one. */
77                 priv01.u32[1] += rte_cpu_to_be_16(0x0100);
78                 priv01.u32[3] += rte_cpu_to_be_16(0x0100);
79                 priv23.u32[1] += rte_cpu_to_be_16(0x0100);
80                 priv23.u32[3] += rte_cpu_to_be_16(0x0100);
81
82                 /* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
83                 d0 = rte_pktmbuf_mtod(mbuf0, void *);
84                 rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
85                            nh[priv01.u16[0]].rewrite_len);
86
87                 next0 = nh[priv01.u16[0]].tx_node;
88                 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
89                                               sizeof(struct rte_ether_hdr));
90                 ip0->time_to_live = priv01.u16[1] - 1;
91                 ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
92
93                 /* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
94                 d1 = rte_pktmbuf_mtod(mbuf1, void *);
95                 rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
96                            nh[priv01.u16[4]].rewrite_len);
97
98                 next1 = nh[priv01.u16[4]].tx_node;
99                 ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
100                                               sizeof(struct rte_ether_hdr));
101                 ip1->time_to_live = priv01.u16[5] - 1;
102                 ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
103
104                 /* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
105                 d2 = rte_pktmbuf_mtod(mbuf2, void *);
106                 rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
107                            nh[priv23.u16[0]].rewrite_len);
108                 next2 = nh[priv23.u16[0]].tx_node;
109                 ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
110                                               sizeof(struct rte_ether_hdr));
111                 ip2->time_to_live = priv23.u16[1] - 1;
112                 ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
113
114                 /* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
115                 d3 = rte_pktmbuf_mtod(mbuf3, void *);
116                 rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
117                            nh[priv23.u16[4]].rewrite_len);
118
119                 next3 = nh[priv23.u16[4]].tx_node;
120                 ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
121                                               sizeof(struct rte_ether_hdr));
122                 ip3->time_to_live = priv23.u16[5] - 1;
123                 ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
124
125                 /* Enqueue four to next node */
126                 rte_edge_t fix_spec =
127                         ((next_index == next0) && (next0 == next1) &&
128                          (next1 == next2) && (next2 == next3));
129
130                 if (unlikely(fix_spec == 0)) {
131                         /* Copy things successfully speculated till now */
132                         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
133                         from += last_spec;
134                         to_next += last_spec;
135                         held += last_spec;
136                         last_spec = 0;
137
138                         /* next0 */
139                         if (next_index == next0) {
140                                 to_next[0] = from[0];
141                                 to_next++;
142                                 held++;
143                         } else {
144                                 rte_node_enqueue_x1(graph, node, next0,
145                                                     from[0]);
146                         }
147
148                         /* next1 */
149                         if (next_index == next1) {
150                                 to_next[0] = from[1];
151                                 to_next++;
152                                 held++;
153                         } else {
154                                 rte_node_enqueue_x1(graph, node, next1,
155                                                     from[1]);
156                         }
157
158                         /* next2 */
159                         if (next_index == next2) {
160                                 to_next[0] = from[2];
161                                 to_next++;
162                                 held++;
163                         } else {
164                                 rte_node_enqueue_x1(graph, node, next2,
165                                                     from[2]);
166                         }
167
168                         /* next3 */
169                         if (next_index == next3) {
170                                 to_next[0] = from[3];
171                                 to_next++;
172                                 held++;
173                         } else {
174                                 rte_node_enqueue_x1(graph, node, next3,
175                                                     from[3]);
176                         }
177
178                         from += 4;
179
180                         /* Change speculation if last two are same */
181                         if ((next_index != next3) && (next2 == next3)) {
182                                 /* Put the current speculated node */
183                                 rte_node_next_stream_put(graph, node,
184                                                          next_index, held);
185                                 held = 0;
186
187                                 /* Get next speculated stream */
188                                 next_index = next3;
189                                 to_next = rte_node_next_stream_get(
190                                         graph, node, next_index, nb_objs);
191                         }
192                 } else {
193                         last_spec += 4;
194                 }
195         }
196
197         while (n_left_from > 0) {
198                 uint16_t chksum;
199
200                 mbuf0 = pkts[0];
201
202                 pkts += 1;
203                 n_left_from -= 1;
204
205                 d0 = rte_pktmbuf_mtod(mbuf0, void *);
206                 rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0)->nh].rewrite_data,
207                            nh[node_mbuf_priv1(mbuf0)->nh].rewrite_len);
208
209                 next0 = nh[node_mbuf_priv1(mbuf0)->nh].tx_node;
210                 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
211                                               sizeof(struct rte_ether_hdr));
212                 chksum = node_mbuf_priv1(mbuf0)->cksum +
213                          rte_cpu_to_be_16(0x0100);
214                 chksum += chksum >= 0xffff;
215                 ip0->hdr_checksum = chksum;
216                 ip0->time_to_live = node_mbuf_priv1(mbuf0)->ttl - 1;
217
218                 if (unlikely(next_index ^ next0)) {
219                         /* Copy things successfully speculated till now */
220                         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
221                         from += last_spec;
222                         to_next += last_spec;
223                         held += last_spec;
224                         last_spec = 0;
225
226                         rte_node_enqueue_x1(graph, node, next0, from[0]);
227                         from += 1;
228                 } else {
229                         last_spec += 1;
230                 }
231         }
232
233         /* !!! Home run !!! */
234         if (likely(last_spec == nb_objs)) {
235                 rte_node_next_stream_move(graph, node, next_index);
236                 return nb_objs;
237         }
238
239         held += last_spec;
240         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
241         rte_node_next_stream_put(graph, node, next_index, held);
242         /* Save the last next used */
243         *(uint16_t *)node->ctx = next_index;
244
245         return nb_objs;
246 }
247
248 static int
249 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
250 {
251
252         RTE_SET_USED(graph);
253         RTE_SET_USED(node);
254         node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
255
256         return 0;
257 }
258
259 int
260 ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
261 {
262         if (ip4_rewrite_nm == NULL) {
263                 ip4_rewrite_nm = rte_zmalloc(
264                         "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
265                         RTE_CACHE_LINE_SIZE);
266                 if (ip4_rewrite_nm == NULL)
267                         return -ENOMEM;
268         }
269         ip4_rewrite_nm->next_index[port_id] = next_index;
270
271         return 0;
272 }
273
274 int
275 rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
276                          uint8_t rewrite_len, uint16_t dst_port)
277 {
278         struct ip4_rewrite_nh_header *nh;
279
280         if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
281                 return -EINVAL;
282
283         if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
284                 return -EINVAL;
285
286         if (ip4_rewrite_nm == NULL) {
287                 ip4_rewrite_nm = rte_zmalloc(
288                         "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
289                         RTE_CACHE_LINE_SIZE);
290                 if (ip4_rewrite_nm == NULL)
291                         return -ENOMEM;
292         }
293
294         /* Check if dst port doesn't exist as edge */
295         if (!ip4_rewrite_nm->next_index[dst_port])
296                 return -EINVAL;
297
298         /* Update next hop */
299         nh = &ip4_rewrite_nm->nh[next_hop];
300
301         memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
302         nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
303         nh->rewrite_len = rewrite_len;
304         nh->enabled = true;
305
306         return 0;
307 }
308
309 static struct rte_node_register ip4_rewrite_node = {
310         .process = ip4_rewrite_node_process,
311         .name = "ip4_rewrite",
312         /* Default edge i.e '0' is pkt drop */
313         .nb_edges = 1,
314         .next_nodes = {
315                 [0] = "pkt_drop",
316         },
317         .init = ip4_rewrite_node_init,
318 };
319
320 struct rte_node_register *
321 ip4_rewrite_node_get(void)
322 {
323         return &ip4_rewrite_node;
324 }
325
326 RTE_NODE_REGISTER(ip4_rewrite_node);