1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2021 Intel Corporation
8 #include <rte_ethdev.h>
11 #define DO_RFC_1812_CHECKS
13 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
15 #if !defined(NO_HASH_MULTI_LOOKUP) && defined(__ARM_NEON)
16 #define NO_HASH_MULTI_LOOKUP 1
20 * Configurable number of RX/TX ring descriptors
22 #define RTE_TEST_RX_DESC_DEFAULT 1024
23 #define RTE_TEST_TX_DESC_DEFAULT 1024
25 #define MAX_PKT_BURST 32
26 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
28 #define MEMPOOL_CACHE_SIZE 256
29 #define MAX_RX_QUEUE_PER_LCORE 16
32 * Try to avoid TX buffering if we have at least MAX_TX_BURST packets to send.
34 #define MAX_TX_BURST (MAX_PKT_BURST / 2)
38 /* Configure how many packets ahead to prefetch, when reading packets */
39 #define PREFETCH_OFFSET 3
41 /* Used to mark destination port as 'invalid'. */
42 #define BAD_PORT ((uint16_t)-1)
46 /* replace first 12B of the ethernet header. */
49 /* Hash parameters. */
51 /* default to 4 million hash entries (approx) */
52 #define L3FWD_HASH_ENTRIES (1024*1024*4)
54 /* 32-bit has less address-space for hugepage memory, limit to 1M entries */
55 #define L3FWD_HASH_ENTRIES (1024*1024*1)
57 #define HASH_ENTRY_NUMBER_DEFAULT 4
61 struct rte_mbuf *m_table[MAX_PKT_BURST];
64 struct lcore_rx_queue {
67 } __rte_cache_aligned;
71 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
73 uint16_t tx_port_id[RTE_MAX_ETHPORTS];
74 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
75 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
76 void *ipv4_lookup_struct;
77 void *ipv6_lookup_struct;
78 } __rte_cache_aligned;
80 extern volatile bool force_quit;
82 /* ethernet addresses of ports */
83 extern uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
84 extern struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
86 /* mask of enabled ports */
87 extern uint32_t enabled_port_mask;
89 /* Used only in exact match mode. */
90 extern int ipv6; /**< ipv6 is false by default. */
91 extern uint32_t hash_entry_number;
93 extern xmm_t val_eth[RTE_MAX_ETHPORTS];
95 extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
97 /* Send burst of packets on an output interface */
99 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
101 struct rte_mbuf **m_table;
105 queueid = qconf->tx_queue_id[port];
106 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
108 ret = rte_eth_tx_burst(port, queueid, m_table, n);
109 if (unlikely(ret < n)) {
111 rte_pktmbuf_free(m_table[ret]);
118 /* Enqueue a single packet, and send burst if queue is filled */
120 send_single_packet(struct lcore_conf *qconf,
121 struct rte_mbuf *m, uint16_t port)
125 len = qconf->tx_mbufs[port].len;
126 qconf->tx_mbufs[port].m_table[len] = m;
129 /* enough pkts to be sent */
130 if (unlikely(len == MAX_PKT_BURST)) {
131 send_burst(qconf, MAX_PKT_BURST, port);
135 qconf->tx_mbufs[port].len = len;
139 #ifdef DO_RFC_1812_CHECKS
141 is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
143 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
145 * 1. The packet length reported by the Link Layer must be large
146 * enough to hold the minimum length legal IP datagram (20 bytes).
148 if (link_len < sizeof(struct rte_ipv4_hdr))
151 /* 2. The IP checksum must be correct. */
152 /* this is checked in H/W */
155 * 3. The IP version number must be 4. If the version number is not 4
156 * then the packet may be another version of IP, such as IPng or
159 if (((pkt->version_ihl) >> 4) != 4)
162 * 4. The IP header length field must be large enough to hold the
163 * minimum length legal IP datagram (20 bytes = 5 words).
165 if ((pkt->version_ihl & 0xf) < 5)
169 * 5. The IP total length field must be large enough to hold the IP
170 * datagram header, whose length is specified in the IP header length
173 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
178 #endif /* DO_RFC_1812_CHECKS */
181 init_mem(uint16_t portid, unsigned int nb_mbuf);
183 /* Function pointers for LPM, EM or FIB functionality. */
185 setup_lpm(const int socketid);
188 setup_hash(const int socketid);
191 setup_fib(const int socketid);
194 em_check_ptype(int portid);
197 lpm_check_ptype(int portid);
200 em_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
201 uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
204 lpm_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
205 uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
208 em_main_loop(__rte_unused void *dummy);
211 lpm_main_loop(__rte_unused void *dummy);
214 fib_main_loop(__rte_unused void *dummy);
217 lpm_event_main_loop_tx_d(__rte_unused void *dummy);
219 lpm_event_main_loop_tx_d_burst(__rte_unused void *dummy);
221 lpm_event_main_loop_tx_q(__rte_unused void *dummy);
223 lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy);
226 em_event_main_loop_tx_d(__rte_unused void *dummy);
228 em_event_main_loop_tx_d_burst(__rte_unused void *dummy);
230 em_event_main_loop_tx_q(__rte_unused void *dummy);
232 em_event_main_loop_tx_q_burst(__rte_unused void *dummy);
235 fib_event_main_loop_tx_d(__rte_unused void *dummy);
237 fib_event_main_loop_tx_d_burst(__rte_unused void *dummy);
239 fib_event_main_loop_tx_q(__rte_unused void *dummy);
241 fib_event_main_loop_tx_q_burst(__rte_unused void *dummy);
244 /* Return ipv4/ipv6 fwd lookup struct for LPM, EM or FIB. */
246 em_get_ipv4_l3fwd_lookup_struct(const int socketid);
249 em_get_ipv6_l3fwd_lookup_struct(const int socketid);
252 lpm_get_ipv4_l3fwd_lookup_struct(const int socketid);
255 lpm_get_ipv6_l3fwd_lookup_struct(const int socketid);
258 fib_get_ipv4_l3fwd_lookup_struct(const int socketid);
261 fib_get_ipv6_l3fwd_lookup_struct(const int socketid);
263 #endif /* __L3_FWD_H__ */