1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_eventdev.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
13 #include "otx2_ethdev.h"
14 #include "otx2_ethdev_sec.h"
15 #include "otx2_ipsec_fp.h"
17 #define ETH_SEC_MAX_PKT_LEN 1450
19 struct eth_sec_tag_const {
23 uint32_t rsvd_11_0 : 12;
25 uint32_t event_type : 4;
26 uint32_t rsvd_31_24 : 8;
32 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
34 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
36 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
38 .algo = RTE_CRYPTO_AEAD_AES_GCM,
64 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
66 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
68 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
84 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
86 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
88 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
103 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
106 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
107 { /* IPsec Inline Protocol ESP Tunnel Ingress */
108 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
109 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
111 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
112 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
113 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
116 .crypto_capabilities = otx2_eth_sec_crypto_caps,
117 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
119 { /* IPsec Inline Protocol ESP Tunnel Egress */
120 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
121 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
123 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
124 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
125 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
128 .crypto_capabilities = otx2_eth_sec_crypto_caps,
129 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
132 .action = RTE_SECURITY_ACTION_TYPE_NONE
137 in_sa_mz_name_get(char *name, int size, uint16_t port)
139 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
143 otx2_eth_sec_session_get_size(void *device __rte_unused)
145 return sizeof(struct otx2_sec_session);
148 static const struct rte_security_capability *
149 otx2_eth_sec_capabilities_get(void *device __rte_unused)
151 return otx2_eth_sec_capabilities;
154 static struct rte_security_ops otx2_eth_sec_ops = {
155 .session_get_size = otx2_eth_sec_session_get_size,
156 .capabilities_get = otx2_eth_sec_capabilities_get
160 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
162 struct rte_security_ctx *ctx;
164 ctx = rte_malloc("otx2_eth_sec_ctx",
165 sizeof(struct rte_security_ctx), 0);
171 ctx->device = eth_dev;
172 ctx->ops = &otx2_eth_sec_ops;
175 eth_dev->security_ctx = ctx;
181 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
183 rte_free(eth_dev->security_ctx);
187 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
189 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
190 uint16_t port = eth_dev->data->port_id;
191 struct nix_inline_ipsec_lf_cfg *req;
192 struct otx2_mbox *mbox = dev->mbox;
193 struct eth_sec_tag_const tag_const;
194 char name[RTE_MEMZONE_NAMESIZE];
195 const struct rte_memzone *mz;
197 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
198 mz = rte_memzone_lookup(name);
202 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
204 req->sa_base_addr = mz->iova;
206 req->ipsec_cfg0.tt = tt;
209 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
210 tag_const.port = port;
211 req->ipsec_cfg0.tag_const = tag_const.u32;
213 req->ipsec_cfg0.sa_pow2_size =
214 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
215 req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
217 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
218 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
220 return otx2_mbox_process(mbox);
224 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
226 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
227 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
228 uint16_t port = eth_dev->data->port_id;
229 char name[RTE_MEMZONE_NAMESIZE];
230 const struct rte_memzone *mz;
234 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
235 !RTE_IS_POWER_OF_2(sa_width));
237 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
238 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
241 nb_sa = dev->ipsec_in_max_spi;
242 mz_sz = nb_sa * sa_width;
243 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
244 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
245 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
248 otx2_err("Could not allocate inbound SA DB");
252 memset(mz->addr, 0, mz_sz);
254 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
256 otx2_err("Could not configure inline IPsec");
263 otx2_err("Could not configure device for security");
264 otx2_eth_sec_fini(eth_dev);
269 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
271 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
272 uint16_t port = eth_dev->data->port_id;
273 char name[RTE_MEMZONE_NAMESIZE];
275 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
276 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
279 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
280 rte_memzone_free(rte_memzone_lookup(name));