1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
4 #ifndef _IPSEC_SECGW_H_
5 #define _IPSEC_SECGW_H_
10 #define STATS_INTERVAL 0
15 #define MAX_PKT_BURST 32
17 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
19 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
20 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
21 (((uint64_t)((a) & 0xff) << 56) | \
22 ((uint64_t)((b) & 0xff) << 48) | \
23 ((uint64_t)((c) & 0xff) << 40) | \
24 ((uint64_t)((d) & 0xff) << 32) | \
25 ((uint64_t)((e) & 0xff) << 24) | \
26 ((uint64_t)((f) & 0xff) << 16) | \
27 ((uint64_t)((g) & 0xff) << 8) | \
28 ((uint64_t)(h) & 0xff))
30 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
31 (((uint64_t)((h) & 0xff) << 56) | \
32 ((uint64_t)((g) & 0xff) << 48) | \
33 ((uint64_t)((f) & 0xff) << 40) | \
34 ((uint64_t)((e) & 0xff) << 32) | \
35 ((uint64_t)((d) & 0xff) << 24) | \
36 ((uint64_t)((c) & 0xff) << 16) | \
37 ((uint64_t)((b) & 0xff) << 8) | \
38 ((uint64_t)(a) & 0xff))
41 #define uint32_t_to_char(ip, a, b, c, d) do {\
42 *a = (uint8_t)(ip >> 24 & 0xff);\
43 *b = (uint8_t)(ip >> 16 & 0xff);\
44 *c = (uint8_t)(ip >> 8 & 0xff);\
45 *d = (uint8_t)(ip & 0xff);\
48 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
51 const uint8_t *data[MAX_PKT_BURST * 2];
52 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
53 void *saptr[MAX_PKT_BURST * 2];
54 uint32_t res[MAX_PKT_BURST * 2];
58 struct ipsec_traffic {
59 struct traffic_type ipsec;
60 struct traffic_type ip4;
61 struct traffic_type ip6;
64 /* Fields optimized for devices without burst */
65 struct traffic_type_nb {
72 struct ipsec_traffic_nb {
73 struct traffic_type_nb ipsec;
74 struct traffic_type_nb ip4;
75 struct traffic_type_nb ip6;
78 /* port/source ethernet addr and destination ethernet addr */
83 #if (STATS_INTERVAL > 0)
84 struct ipsec_core_statistics {
91 } __rte_cache_aligned;
93 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
94 #endif /* STATS_INTERVAL */
96 extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
98 /* Port mask to identify the unprotected ports */
99 extern uint32_t unprotected_port_mask;
101 /* Index of SA in single mode */
102 extern uint32_t single_sa_idx;
104 extern volatile bool force_quit;
106 static inline uint8_t
107 is_unprotected_port(uint16_t port_id)
109 return unprotected_port_mask & (1 << port_id);
113 core_stats_update_rx(int n)
115 #if (STATS_INTERVAL > 0)
116 int lcore_id = rte_lcore_id();
117 core_statistics[lcore_id].rx += n;
118 core_statistics[lcore_id].rx_call++;
119 if (n == MAX_PKT_BURST)
120 core_statistics[lcore_id].burst_rx += n;
123 #endif /* STATS_INTERVAL */
127 core_stats_update_tx(int n)
129 #if (STATS_INTERVAL > 0)
130 int lcore_id = rte_lcore_id();
131 core_statistics[lcore_id].tx += n;
132 core_statistics[lcore_id].tx_call++;
135 #endif /* STATS_INTERVAL */
139 core_stats_update_drop(int n)
141 #if (STATS_INTERVAL > 0)
142 int lcore_id = rte_lcore_id();
143 core_statistics[lcore_id].dropped += n;
146 #endif /* STATS_INTERVAL */
149 /* helper routine to free bulk of packets */
151 free_pkts(struct rte_mbuf *mb[], uint32_t n)
155 for (i = 0; i != n; i++)
156 rte_pktmbuf_free(mb[i]);
158 core_stats_update_drop(n);
161 #endif /* _IPSEC_SECGW_H_ */