mbuf: extend meaning of QinQ stripped bit
[dpdk.git] / lib / librte_node / ip4_lookup.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <arpa/inet.h>
6 #include <sys/socket.h>
7
8 #include <rte_debug.h>
9 #include <rte_ethdev.h>
10 #include <rte_ether.h>
11 #include <rte_graph.h>
12 #include <rte_graph_worker.h>
13 #include <rte_ip.h>
14 #include <rte_lpm.h>
15 #include <rte_mbuf.h>
16 #include <rte_tcp.h>
17 #include <rte_udp.h>
18
19 #include "rte_node_ip4_api.h"
20
21 #include "node_private.h"
22
23 #define IPV4_L3FWD_LPM_MAX_RULES 1024
24 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
25
26 /* IP4 Lookup global data struct */
27 struct ip4_lookup_node_main {
28         struct rte_lpm *lpm_tbl[RTE_MAX_NUMA_NODES];
29 };
30
31 static struct ip4_lookup_node_main ip4_lookup_nm;
32
33 #if defined(RTE_MACHINE_CPUFLAG_NEON)
34 #include "ip4_lookup_neon.h"
35 #elif defined(RTE_ARCH_X86)
36 #include "ip4_lookup_sse.h"
37 #else
38
39 static uint16_t
40 ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node,
41                         void **objs, uint16_t nb_objs)
42 {
43         struct rte_ipv4_hdr *ipv4_hdr;
44         void **to_next, **from;
45         uint16_t last_spec = 0;
46         struct rte_mbuf *mbuf;
47         rte_edge_t next_index;
48         struct rte_lpm *lpm;
49         uint16_t held = 0;
50         uint32_t drop_nh;
51         int i, rc;
52
53         /* Speculative next */
54         next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
55         /* Drop node */
56         drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
57
58         /* Get socket specific LPM from ctx */
59         lpm = *((struct rte_lpm **)node->ctx);
60         from = objs;
61
62         /* Get stream for the speculated next node */
63         to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
64         for (i = 0; i < nb_objs; i++) {
65                 uint32_t next_hop;
66                 uint16_t next;
67
68                 mbuf = (struct rte_mbuf *)objs[i];
69
70                 /* Extract DIP of mbuf0 */
71                 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
72                                 sizeof(struct rte_ether_hdr));
73                 /* Extract cksum, ttl as ipv4 hdr is in cache */
74                 node_mbuf_priv1(mbuf)->cksum = ipv4_hdr->hdr_checksum;
75                 node_mbuf_priv1(mbuf)->ttl = ipv4_hdr->time_to_live;
76
77                 rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
78                                     &next_hop);
79                 next_hop = (rc == 0) ? next_hop : drop_nh;
80
81                 node_mbuf_priv1(mbuf)->nh = (uint16_t)next_hop;
82                 next_hop = next_hop >> 16;
83                 next = (uint16_t)next_hop;
84
85                 if (unlikely(next_index != next)) {
86                         /* Copy things successfully speculated till now */
87                         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
88                         from += last_spec;
89                         to_next += last_spec;
90                         held += last_spec;
91                         last_spec = 0;
92
93                         rte_node_enqueue_x1(graph, node, next, from[0]);
94                         from += 1;
95                 } else {
96                         last_spec += 1;
97                 }
98         }
99
100         /* !!! Home run !!! */
101         if (likely(last_spec == nb_objs)) {
102                 rte_node_next_stream_move(graph, node, next_index);
103                 return nb_objs;
104         }
105         held += last_spec;
106         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
107         rte_node_next_stream_put(graph, node, next_index, held);
108
109         return nb_objs;
110 }
111
112 #endif
113
114 int
115 rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop,
116                        enum rte_node_ip4_lookup_next next_node)
117 {
118         char abuf[INET6_ADDRSTRLEN];
119         struct in_addr in;
120         uint8_t socket;
121         uint32_t val;
122         int ret;
123
124         in.s_addr = htonl(ip);
125         inet_ntop(AF_INET, &in, abuf, sizeof(abuf));
126         /* Embedded next node id into 24 bit next hop */
127         val = ((next_node << 16) | next_hop) & ((1ull << 24) - 1);
128         node_dbg("ip4_lookup", "LPM: Adding route %s / %d nh (0x%x)", abuf,
129                  depth, val);
130
131         for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
132                 if (!ip4_lookup_nm.lpm_tbl[socket])
133                         continue;
134
135                 ret = rte_lpm_add(ip4_lookup_nm.lpm_tbl[socket],
136                                   ip, depth, val);
137                 if (ret < 0) {
138                         node_err("ip4_lookup",
139                                  "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n",
140                                  abuf, depth, val, socket, ret);
141                         return ret;
142                 }
143         }
144
145         return 0;
146 }
147
148 static int
149 setup_lpm(struct ip4_lookup_node_main *nm, int socket)
150 {
151         struct rte_lpm_config config_ipv4;
152         char s[RTE_LPM_NAMESIZE];
153
154         /* One LPM table per socket */
155         if (nm->lpm_tbl[socket])
156                 return 0;
157
158         /* create the LPM table */
159         config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
160         config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
161         config_ipv4.flags = 0;
162         snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socket);
163         nm->lpm_tbl[socket] = rte_lpm_create(s, socket, &config_ipv4);
164         if (nm->lpm_tbl[socket] == NULL)
165                 return -rte_errno;
166
167         return 0;
168 }
169
170 static int
171 ip4_lookup_node_init(const struct rte_graph *graph, struct rte_node *node)
172 {
173         struct rte_lpm **lpm_p = (struct rte_lpm **)&node->ctx;
174         uint16_t socket, lcore_id;
175         static uint8_t init_once;
176         int rc;
177
178         RTE_SET_USED(graph);
179         RTE_SET_USED(node);
180
181         if (!init_once) {
182                 /* Setup LPM tables for all sockets */
183                 RTE_LCORE_FOREACH(lcore_id)
184                 {
185                         socket = rte_lcore_to_socket_id(lcore_id);
186                         rc = setup_lpm(&ip4_lookup_nm, socket);
187                         if (rc) {
188                                 node_err("ip4_lookup",
189                                          "Failed to setup lpm tbl for sock %u, rc=%d",
190                                          socket, rc);
191                                 return rc;
192                         }
193                 }
194                 init_once = 1;
195         }
196         *lpm_p = ip4_lookup_nm.lpm_tbl[graph->socket];
197         node_dbg("ip4_lookup", "Initialized ip4_lookup node");
198
199         return 0;
200 }
201
202 static struct rte_node_register ip4_lookup_node = {
203         .process = ip4_lookup_node_process,
204         .name = "ip4_lookup",
205
206         .init = ip4_lookup_node_init,
207
208         .nb_edges = RTE_NODE_IP4_LOOKUP_NEXT_MAX,
209         .next_nodes = {
210                 [RTE_NODE_IP4_LOOKUP_NEXT_REWRITE] = "ip4_rewrite",
211                 [RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP] = "pkt_drop",
212         },
213 };
214
215 RTE_NODE_REGISTER(ip4_lookup_node);