1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #ifndef _IPSEC_SECGW_H_
5 #define _IPSEC_SECGW_H_
9 #define MAX_RX_QUEUE_PER_LCORE 16
13 #define MAX_PKT_BURST 32
15 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
17 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
18 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
19 (((uint64_t)((a) & 0xff) << 56) | \
20 ((uint64_t)((b) & 0xff) << 48) | \
21 ((uint64_t)((c) & 0xff) << 40) | \
22 ((uint64_t)((d) & 0xff) << 32) | \
23 ((uint64_t)((e) & 0xff) << 24) | \
24 ((uint64_t)((f) & 0xff) << 16) | \
25 ((uint64_t)((g) & 0xff) << 8) | \
26 ((uint64_t)(h) & 0xff))
28 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
29 (((uint64_t)((h) & 0xff) << 56) | \
30 ((uint64_t)((g) & 0xff) << 48) | \
31 ((uint64_t)((f) & 0xff) << 40) | \
32 ((uint64_t)((e) & 0xff) << 32) | \
33 ((uint64_t)((d) & 0xff) << 24) | \
34 ((uint64_t)((c) & 0xff) << 16) | \
35 ((uint64_t)((b) & 0xff) << 8) | \
36 ((uint64_t)(a) & 0xff))
39 #define uint32_t_to_char(ip, a, b, c, d) do {\
40 *a = (uint8_t)(ip >> 24 & 0xff);\
41 *b = (uint8_t)(ip >> 16 & 0xff);\
42 *c = (uint8_t)(ip >> 8 & 0xff);\
43 *d = (uint8_t)(ip & 0xff);\
46 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
48 #define IPSEC_NAT_T_PORT 4500
49 #define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
52 const uint8_t *data[MAX_PKT_BURST * 2];
53 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
54 void *saptr[MAX_PKT_BURST * 2];
55 uint32_t res[MAX_PKT_BURST * 2];
59 struct ipsec_traffic {
60 struct traffic_type ipsec;
61 struct traffic_type ip4;
62 struct traffic_type ip6;
65 /* Fields optimized for devices without burst */
66 struct traffic_type_nb {
73 struct ipsec_traffic_nb {
74 struct traffic_type_nb ipsec;
75 struct traffic_type_nb ip4;
76 struct traffic_type_nb ip6;
79 /* port/source ethernet addr and destination ethernet addr */
84 struct ipsec_spd_stats {
90 struct ipsec_sa_stats {
95 struct ipsec_core_statistics {
104 struct ipsec_spd_stats spd4;
105 struct ipsec_spd_stats spd6;
106 struct ipsec_sa_stats sad;
110 struct ipsec_spd_stats spd4;
111 struct ipsec_spd_stats spd6;
112 struct ipsec_sa_stats sad;
122 } __rte_cache_aligned;
124 extern struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
126 extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
128 /* Port mask to identify the unprotected ports */
129 extern uint32_t unprotected_port_mask;
131 /* Index of SA in single mode */
132 extern uint32_t single_sa_idx;
134 extern volatile bool force_quit;
136 extern uint32_t nb_bufs_in_pool;
138 extern bool per_port_pool;
140 extern uint32_t mtu_size;
141 extern uint32_t frag_tbl_sz;
143 static inline uint8_t
144 is_unprotected_port(uint16_t port_id)
146 return unprotected_port_mask & (1 << port_id);
150 core_stats_update_rx(int n)
152 int lcore_id = rte_lcore_id();
153 core_statistics[lcore_id].rx += n;
154 core_statistics[lcore_id].rx_call++;
155 if (n == MAX_PKT_BURST)
156 core_statistics[lcore_id].burst_rx += n;
160 core_stats_update_tx(int n)
162 int lcore_id = rte_lcore_id();
163 core_statistics[lcore_id].tx += n;
164 core_statistics[lcore_id].tx_call++;
168 core_stats_update_drop(int n)
170 int lcore_id = rte_lcore_id();
171 core_statistics[lcore_id].dropped += n;
174 /* helper routine to free bulk of packets */
176 free_pkts(struct rte_mbuf *mb[], uint32_t n)
180 for (i = 0; i != n; i++)
181 rte_pktmbuf_free(mb[i]);
183 core_stats_update_drop(n);
186 #endif /* _IPSEC_SECGW_H_ */