examples/tep_term: remove redundant info get
[dpdk.git] / examples / tep_termination / vxlan_setup.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <getopt.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/virtio_net.h>
9 #include <linux/virtio_ring.h>
10 #include <sys/param.h>
11 #include <unistd.h>
12
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf.h>
17 #include <rte_malloc.h>
18 #include <rte_ip.h>
19 #include <rte_udp.h>
20 #include <rte_tcp.h>
21
22 #include "main.h"
23 #include "rte_vhost.h"
24 #include "vxlan.h"
25 #include "vxlan_setup.h"
26
27 #define IPV4_HEADER_LEN 20
28 #define UDP_HEADER_LEN  8
29 #define VXLAN_HEADER_LEN 8
30
31 #define IP_DEFTTL  64   /* from RFC 1340. */
32
33 #define IP_DN_FRAGMENT_FLAG 0x0040
34
35 /* Used to compare MAC addresses. */
36 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
37
38 /* Configurable number of RX/TX ring descriptors */
39 #define RTE_TEST_RX_DESC_DEFAULT 1024
40 #define RTE_TEST_TX_DESC_DEFAULT 512
41
42 /* Default inner VLAN ID */
43 #define INNER_VLAN_ID 100
44
45 /* VXLAN device */
46 struct vxlan_conf vxdev;
47
48 struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
49 struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
50
51 /* local VTEP IP address */
52 uint8_t vxlan_multicast_ips[2][4] = { {239, 1, 1, 1 }, {239, 1, 2, 1 } };
53
54 /* Remote VTEP IP address */
55 uint8_t vxlan_overlay_ips[2][4] = { {192, 168, 10, 1}, {192, 168, 30, 1} };
56
57 /* Remote VTEP MAC address */
58 uint8_t peer_mac[6] = {0x00, 0x11, 0x01, 0x00, 0x00, 0x01};
59
60 /* VXLAN RX filter type */
61 uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID,
62                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
63                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,};
64
65 /* Options for configuring ethernet port */
66 static struct rte_eth_conf port_conf = {
67         .rxmode = {
68                 .split_hdr_size = 0,
69         },
70         .txmode = {
71                 .mq_mode = ETH_MQ_TX_NONE,
72                 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
73                              DEV_TX_OFFLOAD_UDP_CKSUM |
74                              DEV_TX_OFFLOAD_TCP_CKSUM |
75                              DEV_TX_OFFLOAD_SCTP_CKSUM |
76                              DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
77                              DEV_TX_OFFLOAD_TCP_TSO |
78                              DEV_TX_OFFLOAD_MULTI_SEGS |
79                              DEV_TX_OFFLOAD_VXLAN_TNL_TSO),
80         },
81 };
82
83 /**
84  * The one or two device(s) that belongs to the same tenant ID can
85  * be assigned in a VM.
86  */
87 const uint16_t tenant_id_conf[] = {
88         1000, 1000, 1001, 1001, 1002, 1002, 1003, 1003,
89         1004, 1004, 1005, 1005, 1006, 1006, 1007, 1007,
90         1008, 1008, 1009, 1009, 1010, 1010, 1011, 1011,
91         1012, 1012, 1013, 1013, 1014, 1014, 1015, 1015,
92         1016, 1016, 1017, 1017, 1018, 1018, 1019, 1019,
93         1020, 1020, 1021, 1021, 1022, 1022, 1023, 1023,
94         1024, 1024, 1025, 1025, 1026, 1026, 1027, 1027,
95         1028, 1028, 1029, 1029, 1030, 1030, 1031, 1031,
96 };
97
98 /**
99  * Initialises a given port using global settings and with the rx buffers
100  * coming from the mbuf_pool passed as parameter
101  */
102 int
103 vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
104 {
105         int retval;
106         uint16_t q;
107         struct rte_eth_dev_info dev_info;
108         uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
109         uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
110         uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
111         struct rte_eth_udp_tunnel tunnel_udp;
112         struct rte_eth_rxconf *rxconf;
113         struct rte_eth_txconf *txconf;
114         struct vxlan_conf *pconf = &vxdev;
115         struct rte_eth_conf local_port_conf = port_conf;
116
117         pconf->dst_port = udp_port;
118
119         retval = rte_eth_dev_info_get(port, &dev_info);
120         if (retval != 0)
121                 rte_exit(EXIT_FAILURE,
122                         "Error during getting device (port %u) info: %s\n",
123                         port, strerror(-retval));
124
125         if (dev_info.max_rx_queues > MAX_QUEUES) {
126                 rte_exit(EXIT_FAILURE,
127                         "please define MAX_QUEUES no less than %u in %s\n",
128                         dev_info.max_rx_queues, __FILE__);
129         }
130
131         rxconf = &dev_info.default_rxconf;
132         txconf = &dev_info.default_txconf;
133
134         if (!rte_eth_dev_is_valid_port(port))
135                 return -1;
136
137         rx_rings = nb_devices;
138         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
139                 local_port_conf.txmode.offloads |=
140                         DEV_TX_OFFLOAD_MBUF_FAST_FREE;
141         /* Configure ethernet device. */
142         retval = rte_eth_dev_configure(port, rx_rings, tx_rings,
143                                        &local_port_conf);
144         if (retval != 0)
145                 return retval;
146
147         retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
148                         &tx_ring_size);
149         if (retval != 0)
150                 return retval;
151
152         /* Setup the queues. */
153         rxconf->offloads = local_port_conf.rxmode.offloads;
154         for (q = 0; q < rx_rings; q++) {
155                 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
156                                                 rte_eth_dev_socket_id(port),
157                                                 rxconf,
158                                                 mbuf_pool);
159                 if (retval < 0)
160                         return retval;
161         }
162         txconf->offloads = local_port_conf.txmode.offloads;
163         for (q = 0; q < tx_rings; q++) {
164                 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
165                                                 rte_eth_dev_socket_id(port),
166                                                 txconf);
167                 if (retval < 0)
168                         return retval;
169         }
170
171         /* Start the device. */
172         retval  = rte_eth_dev_start(port);
173         if (retval < 0)
174                 return retval;
175
176         /* Configure UDP port for UDP tunneling */
177         tunnel_udp.udp_port = udp_port;
178         tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
179         retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
180         if (retval < 0)
181                 return retval;
182         retval = rte_eth_macaddr_get(port, &ports_eth_addr[port]);
183         if (retval < 0)
184                 return retval;
185
186         RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
187                         " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
188                         port,
189                         ports_eth_addr[port].addr_bytes[0],
190                         ports_eth_addr[port].addr_bytes[1],
191                         ports_eth_addr[port].addr_bytes[2],
192                         ports_eth_addr[port].addr_bytes[3],
193                         ports_eth_addr[port].addr_bytes[4],
194                         ports_eth_addr[port].addr_bytes[5]);
195
196         if (tso_segsz != 0) {
197                 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
198                         RTE_LOG(WARNING, PORT,
199                                 "hardware TSO offload is not supported\n");
200         }
201         return 0;
202 }
203
204 static int
205 vxlan_rx_process(struct rte_mbuf *pkt)
206 {
207         int ret = 0;
208
209         if (rx_decap)
210                 ret = decapsulation(pkt);
211
212         return ret;
213 }
214
215 static void
216 vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
217 {
218         if (tx_encap)
219                 encapsulation(pkt, queue_id);
220
221         return;
222 }
223
224 /*
225  * This function learns the MAC address of the device and set init
226  * L2 header and L3 header info.
227  */
228 int
229 vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
230 {
231         int i, ret;
232         struct rte_ether_hdr *pkt_hdr;
233         uint64_t portid = vdev->vid;
234         struct rte_ipv4_hdr *ip;
235
236         struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
237
238         if (unlikely(portid >= VXLAN_N_PORTS)) {
239                 RTE_LOG(INFO, VHOST_DATA,
240                         "(%d) WARNING: Not configuring device,"
241                         "as already have %d ports for VXLAN.",
242                         vdev->vid, VXLAN_N_PORTS);
243                 return -1;
244         }
245
246         /* Learn MAC address of guest device from packet */
247         pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
248         if (rte_is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
249                 RTE_LOG(INFO, VHOST_DATA,
250                         "(%d) WARNING: This device is using an existing"
251                         " MAC address and has not been registered.\n",
252                         vdev->vid);
253                 return -1;
254         }
255
256         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
257                 vdev->mac_address.addr_bytes[i] =
258                         vxdev.port[portid].vport_mac.addr_bytes[i] =
259                         pkt_hdr->s_addr.addr_bytes[i];
260                 vxdev.port[portid].peer_mac.addr_bytes[i] = peer_mac[i];
261         }
262
263         memset(&tunnel_filter_conf, 0,
264                 sizeof(struct rte_eth_tunnel_filter_conf));
265
266         rte_ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
267         tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
268
269         /* inner MAC */
270         rte_ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
271
272         tunnel_filter_conf.queue_id = vdev->rx_q;
273         tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
274
275         if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID)
276                 tunnel_filter_conf.inner_vlan = INNER_VLAN_ID;
277
278         tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
279
280         ret = rte_eth_dev_filter_ctrl(ports[0],
281                 RTE_ETH_FILTER_TUNNEL,
282                 RTE_ETH_FILTER_ADD,
283                 &tunnel_filter_conf);
284         if (ret) {
285                 RTE_LOG(ERR, VHOST_DATA,
286                         "%d Failed to add device MAC address to cloud filter\n",
287                 vdev->rx_q);
288                 return -1;
289         }
290
291         /* Print out inner MAC and VNI info. */
292         RTE_LOG(INFO, VHOST_DATA,
293                 "(%d) MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VNI %d registered\n",
294                 vdev->rx_q,
295                 vdev->mac_address.addr_bytes[0],
296                 vdev->mac_address.addr_bytes[1],
297                 vdev->mac_address.addr_bytes[2],
298                 vdev->mac_address.addr_bytes[3],
299                 vdev->mac_address.addr_bytes[4],
300                 vdev->mac_address.addr_bytes[5],
301                 tenant_id_conf[vdev->rx_q]);
302
303         vxdev.port[portid].vport_id = portid;
304
305         for (i = 0; i < 4; i++) {
306                 /* Local VTEP IP */
307                 vxdev.port_ip |= vxlan_multicast_ips[portid][i] << (8 * i);
308                 /* Remote VTEP IP */
309                 vxdev.port[portid].peer_ip |=
310                         vxlan_overlay_ips[portid][i] << (8 * i);
311         }
312
313         vxdev.out_key = tenant_id_conf[vdev->rx_q];
314         rte_ether_addr_copy(&vxdev.port[portid].peer_mac,
315                         &app_l2_hdr[portid].d_addr);
316         rte_ether_addr_copy(&ports_eth_addr[0],
317                         &app_l2_hdr[portid].s_addr);
318         app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
319
320         ip = &app_ip_hdr[portid];
321         ip->version_ihl = RTE_IPV4_VHL_DEF;
322         ip->type_of_service = 0;
323         ip->total_length = 0;
324         ip->packet_id = 0;
325         ip->fragment_offset = IP_DN_FRAGMENT_FLAG;
326         ip->time_to_live = IP_DEFTTL;
327         ip->next_proto_id = IPPROTO_UDP;
328         ip->hdr_checksum = 0;
329         ip->src_addr = vxdev.port_ip;
330         ip->dst_addr = vxdev.port[portid].peer_ip;
331
332         /* Set device as ready for RX. */
333         vdev->ready = DEVICE_RX;
334
335         return 0;
336 }
337
338 /**
339  * Removes cloud filter. Ensures that nothing is adding buffers to the RX
340  * queue before disabling RX on the device.
341  */
342 void
343 vxlan_unlink(struct vhost_dev *vdev)
344 {
345         unsigned i = 0, rx_count;
346         int ret;
347         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
348         struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
349
350         if (vdev->ready == DEVICE_RX) {
351                 memset(&tunnel_filter_conf, 0,
352                         sizeof(struct rte_eth_tunnel_filter_conf));
353
354                 rte_ether_addr_copy(&ports_eth_addr[0],
355                                 &tunnel_filter_conf.outer_mac);
356                 rte_ether_addr_copy(&vdev->mac_address,
357                                 &tunnel_filter_conf.inner_mac);
358                 tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
359                 tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
360
361                 if (tep_filter_type[filter_idx] ==
362                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID)
363                         tunnel_filter_conf.inner_vlan = INNER_VLAN_ID;
364
365                 tunnel_filter_conf.queue_id = vdev->rx_q;
366                 tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
367
368                 ret = rte_eth_dev_filter_ctrl(ports[0],
369                                 RTE_ETH_FILTER_TUNNEL,
370                                 RTE_ETH_FILTER_DELETE,
371                                 &tunnel_filter_conf);
372                 if (ret) {
373                         RTE_LOG(ERR, VHOST_DATA,
374                                 "%d Failed to add device MAC address to cloud filter\n",
375                                 vdev->rx_q);
376                         return;
377                 }
378                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
379                         vdev->mac_address.addr_bytes[i] = 0;
380
381                 /* Clear out the receive buffers */
382                 rx_count = rte_eth_rx_burst(ports[0],
383                                 (uint16_t)vdev->rx_q,
384                                 pkts_burst, MAX_PKT_BURST);
385
386                 while (rx_count) {
387                         for (i = 0; i < rx_count; i++)
388                                 rte_pktmbuf_free(pkts_burst[i]);
389
390                         rx_count = rte_eth_rx_burst(ports[0],
391                                         (uint16_t)vdev->rx_q,
392                                         pkts_burst, MAX_PKT_BURST);
393                 }
394                 vdev->ready = DEVICE_MAC_LEARNING;
395         }
396 }
397
398 /* Transmit packets after encapsulating */
399 int
400 vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
401                 struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
402         int ret = 0;
403         uint16_t i;
404
405         for (i = 0; i < nb_pkts; i++)
406                 vxlan_tx_process(queue_id, tx_pkts[i]);
407
408         ret = rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts);
409
410         return ret;
411 }
412
413 /* Check for decapsulation and pass packets directly to VIRTIO device */
414 int
415 vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
416 {
417         uint32_t i = 0;
418         uint32_t count = 0;
419         int ret;
420         struct rte_mbuf *pkts_valid[rx_count];
421
422         for (i = 0; i < rx_count; i++) {
423                 if (enable_stats) {
424                         rte_atomic64_add(
425                                 &dev_statistics[vid].rx_bad_ip_csum,
426                                 (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
427                                 != 0);
428                         rte_atomic64_add(
429                                 &dev_statistics[vid].rx_bad_ip_csum,
430                                 (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
431                                 != 0);
432                 }
433                 ret = vxlan_rx_process(pkts_burst[i]);
434                 if (unlikely(ret < 0))
435                         continue;
436
437                 pkts_valid[count] = pkts_burst[i];
438                         count++;
439         }
440
441         ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
442         return ret;
443 }