8835aab9ddc402d5e81af0ad36d75002cd15d45d
[dpdk.git] / lib / librte_node / ip4_lookup.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4
5 #include <arpa/inet.h>
6 #include <sys/socket.h>
7
8 #include <rte_debug.h>
9 #include <rte_ethdev.h>
10 #include <rte_ether.h>
11 #include <rte_graph.h>
12 #include <rte_graph_worker.h>
13 #include <rte_ip.h>
14 #include <rte_lpm.h>
15 #include <rte_mbuf.h>
16 #include <rte_tcp.h>
17 #include <rte_udp.h>
18 #include <rte_vect.h>
19
20 #include "rte_node_ip4_api.h"
21
22 #include "node_private.h"
23
24 #define IPV4_L3FWD_LPM_MAX_RULES 1024
25 #define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
26
27 /* IP4 Lookup global data struct */
28 struct ip4_lookup_node_main {
29         struct rte_lpm *lpm_tbl[RTE_MAX_NUMA_NODES];
30 };
31
32 static struct ip4_lookup_node_main ip4_lookup_nm;
33
34 #if defined(__ARM_NEON)
35 #include "ip4_lookup_neon.h"
36 #elif defined(RTE_ARCH_X86)
37 #include "ip4_lookup_sse.h"
38 #endif
39
40 static uint16_t
41 ip4_lookup_node_process_scalar(struct rte_graph *graph, struct rte_node *node,
42                         void **objs, uint16_t nb_objs)
43 {
44         struct rte_ipv4_hdr *ipv4_hdr;
45         void **to_next, **from;
46         uint16_t last_spec = 0;
47         struct rte_mbuf *mbuf;
48         rte_edge_t next_index;
49         struct rte_lpm *lpm;
50         uint16_t held = 0;
51         uint32_t drop_nh;
52         int i, rc;
53
54         /* Speculative next */
55         next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
56         /* Drop node */
57         drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
58
59         /* Get socket specific LPM from ctx */
60         lpm = *((struct rte_lpm **)node->ctx);
61         from = objs;
62
63         /* Get stream for the speculated next node */
64         to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
65         for (i = 0; i < nb_objs; i++) {
66                 uint32_t next_hop;
67                 uint16_t next;
68
69                 mbuf = (struct rte_mbuf *)objs[i];
70
71                 /* Extract DIP of mbuf0 */
72                 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
73                                 sizeof(struct rte_ether_hdr));
74                 /* Extract cksum, ttl as ipv4 hdr is in cache */
75                 node_mbuf_priv1(mbuf)->cksum = ipv4_hdr->hdr_checksum;
76                 node_mbuf_priv1(mbuf)->ttl = ipv4_hdr->time_to_live;
77
78                 rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
79                                     &next_hop);
80                 next_hop = (rc == 0) ? next_hop : drop_nh;
81
82                 node_mbuf_priv1(mbuf)->nh = (uint16_t)next_hop;
83                 next_hop = next_hop >> 16;
84                 next = (uint16_t)next_hop;
85
86                 if (unlikely(next_index != next)) {
87                         /* Copy things successfully speculated till now */
88                         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
89                         from += last_spec;
90                         to_next += last_spec;
91                         held += last_spec;
92                         last_spec = 0;
93
94                         rte_node_enqueue_x1(graph, node, next, from[0]);
95                         from += 1;
96                 } else {
97                         last_spec += 1;
98                 }
99         }
100
101         /* !!! Home run !!! */
102         if (likely(last_spec == nb_objs)) {
103                 rte_node_next_stream_move(graph, node, next_index);
104                 return nb_objs;
105         }
106         held += last_spec;
107         rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
108         rte_node_next_stream_put(graph, node, next_index, held);
109
110         return nb_objs;
111 }
112
113 int
114 rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop,
115                        enum rte_node_ip4_lookup_next next_node)
116 {
117         char abuf[INET6_ADDRSTRLEN];
118         struct in_addr in;
119         uint8_t socket;
120         uint32_t val;
121         int ret;
122
123         in.s_addr = htonl(ip);
124         inet_ntop(AF_INET, &in, abuf, sizeof(abuf));
125         /* Embedded next node id into 24 bit next hop */
126         val = ((next_node << 16) | next_hop) & ((1ull << 24) - 1);
127         node_dbg("ip4_lookup", "LPM: Adding route %s / %d nh (0x%x)", abuf,
128                  depth, val);
129
130         for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
131                 if (!ip4_lookup_nm.lpm_tbl[socket])
132                         continue;
133
134                 ret = rte_lpm_add(ip4_lookup_nm.lpm_tbl[socket],
135                                   ip, depth, val);
136                 if (ret < 0) {
137                         node_err("ip4_lookup",
138                                  "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n",
139                                  abuf, depth, val, socket, ret);
140                         return ret;
141                 }
142         }
143
144         return 0;
145 }
146
147 static int
148 setup_lpm(struct ip4_lookup_node_main *nm, int socket)
149 {
150         struct rte_lpm_config config_ipv4;
151         char s[RTE_LPM_NAMESIZE];
152
153         /* One LPM table per socket */
154         if (nm->lpm_tbl[socket])
155                 return 0;
156
157         /* create the LPM table */
158         config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
159         config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
160         config_ipv4.flags = 0;
161         snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socket);
162         nm->lpm_tbl[socket] = rte_lpm_create(s, socket, &config_ipv4);
163         if (nm->lpm_tbl[socket] == NULL)
164                 return -rte_errno;
165
166         return 0;
167 }
168
169 static int
170 ip4_lookup_node_init(const struct rte_graph *graph, struct rte_node *node)
171 {
172         struct rte_lpm **lpm_p = (struct rte_lpm **)&node->ctx;
173         uint16_t socket, lcore_id;
174         static uint8_t init_once;
175         int rc;
176
177         RTE_SET_USED(graph);
178         RTE_SET_USED(node);
179
180         if (!init_once) {
181                 /* Setup LPM tables for all sockets */
182                 RTE_LCORE_FOREACH(lcore_id)
183                 {
184                         socket = rte_lcore_to_socket_id(lcore_id);
185                         rc = setup_lpm(&ip4_lookup_nm, socket);
186                         if (rc) {
187                                 node_err("ip4_lookup",
188                                          "Failed to setup lpm tbl for sock %u, rc=%d",
189                                          socket, rc);
190                                 return rc;
191                         }
192                 }
193                 init_once = 1;
194         }
195         *lpm_p = ip4_lookup_nm.lpm_tbl[graph->socket];
196
197 #if defined(__ARM_NEON) || defined(RTE_ARCH_X86)
198         if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
199                 node->process = ip4_lookup_node_process_vec;
200 #endif
201
202         node_dbg("ip4_lookup", "Initialized ip4_lookup node");
203
204         return 0;
205 }
206
207 static struct rte_node_register ip4_lookup_node = {
208         .process = ip4_lookup_node_process_scalar,
209         .name = "ip4_lookup",
210
211         .init = ip4_lookup_node_init,
212
213         .nb_edges = RTE_NODE_IP4_LOOKUP_NEXT_MAX,
214         .next_nodes = {
215                 [RTE_NODE_IP4_LOOKUP_NEXT_REWRITE] = "ip4_rewrite",
216                 [RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP] = "pkt_drop",
217         },
218 };
219
220 RTE_NODE_REGISTER(ip4_lookup_node);