1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #ifndef _IPSEC_SECGW_H_
5 #define _IPSEC_SECGW_H_
9 #define MAX_RX_QUEUE_PER_LCORE 16
13 #define MAX_PKT_BURST 32
14 #define MAX_PKT_BURST_VEC 256
17 ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \
18 MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2)
20 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
22 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
23 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
24 (((uint64_t)((a) & 0xff) << 56) | \
25 ((uint64_t)((b) & 0xff) << 48) | \
26 ((uint64_t)((c) & 0xff) << 40) | \
27 ((uint64_t)((d) & 0xff) << 32) | \
28 ((uint64_t)((e) & 0xff) << 24) | \
29 ((uint64_t)((f) & 0xff) << 16) | \
30 ((uint64_t)((g) & 0xff) << 8) | \
31 ((uint64_t)(h) & 0xff))
33 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
34 (((uint64_t)((h) & 0xff) << 56) | \
35 ((uint64_t)((g) & 0xff) << 48) | \
36 ((uint64_t)((f) & 0xff) << 40) | \
37 ((uint64_t)((e) & 0xff) << 32) | \
38 ((uint64_t)((d) & 0xff) << 24) | \
39 ((uint64_t)((c) & 0xff) << 16) | \
40 ((uint64_t)((b) & 0xff) << 8) | \
41 ((uint64_t)(a) & 0xff))
44 #define uint32_t_to_char(ip, a, b, c, d) do {\
45 *a = (uint8_t)(ip >> 24 & 0xff);\
46 *b = (uint8_t)(ip >> 16 & 0xff);\
47 *c = (uint8_t)(ip >> 8 & 0xff);\
48 *d = (uint8_t)(ip & 0xff);\
51 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
53 #define IPSEC_NAT_T_PORT 4500
54 #define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
58 struct rte_mbuf *pkts[MAX_PKTS];
59 const uint8_t *data[MAX_PKTS];
60 void *saptr[MAX_PKTS];
61 uint32_t res[MAX_PKTS];
62 } __rte_cache_aligned;
64 struct ipsec_traffic {
65 struct traffic_type ipsec;
66 struct traffic_type ip4;
67 struct traffic_type ip6;
70 /* Fields optimized for devices without burst */
71 struct traffic_type_nb {
78 struct ipsec_traffic_nb {
79 struct traffic_type_nb ipsec;
80 struct traffic_type_nb ip4;
81 struct traffic_type_nb ip6;
84 /* port/source ethernet addr and destination ethernet addr */
89 struct ipsec_spd_stats {
95 struct ipsec_sa_stats {
100 struct ipsec_core_statistics {
109 struct ipsec_spd_stats spd4;
110 struct ipsec_spd_stats spd6;
111 struct ipsec_sa_stats sad;
115 struct ipsec_spd_stats spd4;
116 struct ipsec_spd_stats spd6;
117 struct ipsec_sa_stats sad;
127 } __rte_cache_aligned;
129 extern struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
131 extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
133 /* Port mask to identify the unprotected ports */
134 extern uint32_t unprotected_port_mask;
136 /* Index of SA in single mode */
137 extern uint32_t single_sa_idx;
139 extern volatile bool force_quit;
141 extern uint32_t nb_bufs_in_pool;
143 extern bool per_port_pool;
145 extern uint32_t mtu_size;
146 extern uint32_t frag_tbl_sz;
148 static inline uint8_t
149 is_unprotected_port(uint16_t port_id)
151 return unprotected_port_mask & (1 << port_id);
155 core_stats_update_rx(int n)
157 int lcore_id = rte_lcore_id();
158 core_statistics[lcore_id].rx += n;
159 core_statistics[lcore_id].rx_call++;
160 if (n == MAX_PKT_BURST)
161 core_statistics[lcore_id].burst_rx += n;
165 core_stats_update_tx(int n)
167 int lcore_id = rte_lcore_id();
168 core_statistics[lcore_id].tx += n;
169 core_statistics[lcore_id].tx_call++;
173 core_stats_update_drop(int n)
175 int lcore_id = rte_lcore_id();
176 core_statistics[lcore_id].dropped += n;
179 /* helper routine to free bulk of packets */
181 free_pkts(struct rte_mbuf *mb[], uint32_t n)
185 for (i = 0; i != n; i++)
186 rte_pktmbuf_free(mb[i]);
188 core_stats_update_drop(n);
191 #endif /* _IPSEC_SECGW_H_ */