1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_eventdev.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
13 #include "otx2_cryptodev_qp.h"
14 #include "otx2_ethdev.h"
15 #include "otx2_ethdev_sec.h"
16 #include "otx2_ipsec_fp.h"
17 #include "otx2_sec_idev.h"
19 #define ETH_SEC_MAX_PKT_LEN 1450
21 struct eth_sec_tag_const {
25 uint32_t rsvd_11_0 : 12;
27 uint32_t event_type : 4;
28 uint32_t rsvd_31_24 : 8;
34 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
36 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
38 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
40 .algo = RTE_CRYPTO_AEAD_AES_GCM,
66 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
68 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
70 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
86 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
88 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
90 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
105 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
108 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
109 { /* IPsec Inline Protocol ESP Tunnel Ingress */
110 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
111 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
113 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
114 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
115 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
118 .crypto_capabilities = otx2_eth_sec_crypto_caps,
119 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
121 { /* IPsec Inline Protocol ESP Tunnel Egress */
122 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
123 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
125 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
126 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
127 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
130 .crypto_capabilities = otx2_eth_sec_crypto_caps,
131 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
134 .action = RTE_SECURITY_ACTION_TYPE_NONE
139 in_sa_mz_name_get(char *name, int size, uint16_t port)
141 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
145 otx2_eth_sec_session_get_size(void *device __rte_unused)
147 return sizeof(struct otx2_sec_session);
150 static const struct rte_security_capability *
151 otx2_eth_sec_capabilities_get(void *device __rte_unused)
153 return otx2_eth_sec_capabilities;
156 static struct rte_security_ops otx2_eth_sec_ops = {
157 .session_get_size = otx2_eth_sec_session_get_size,
158 .capabilities_get = otx2_eth_sec_capabilities_get
162 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
164 struct rte_security_ctx *ctx;
167 ctx = rte_malloc("otx2_eth_sec_ctx",
168 sizeof(struct rte_security_ctx), 0);
172 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
180 ctx->device = eth_dev;
181 ctx->ops = &otx2_eth_sec_ops;
184 eth_dev->security_ctx = ctx;
190 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
192 rte_free(eth_dev->security_ctx);
196 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
198 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
199 uint16_t port = eth_dev->data->port_id;
200 struct nix_inline_ipsec_lf_cfg *req;
201 struct otx2_mbox *mbox = dev->mbox;
202 struct eth_sec_tag_const tag_const;
203 char name[RTE_MEMZONE_NAMESIZE];
204 const struct rte_memzone *mz;
206 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
207 mz = rte_memzone_lookup(name);
211 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
213 req->sa_base_addr = mz->iova;
215 req->ipsec_cfg0.tt = tt;
218 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
219 tag_const.port = port;
220 req->ipsec_cfg0.tag_const = tag_const.u32;
222 req->ipsec_cfg0.sa_pow2_size =
223 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
224 req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
226 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
227 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
229 return otx2_mbox_process(mbox);
233 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
235 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
236 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
237 uint16_t port = eth_dev->data->port_id;
238 char name[RTE_MEMZONE_NAMESIZE];
239 const struct rte_memzone *mz;
243 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
244 !RTE_IS_POWER_OF_2(sa_width));
246 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
247 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
250 nb_sa = dev->ipsec_in_max_spi;
251 mz_sz = nb_sa * sa_width;
252 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
253 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
254 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
257 otx2_err("Could not allocate inbound SA DB");
261 memset(mz->addr, 0, mz_sz);
263 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
265 otx2_err("Could not configure inline IPsec");
272 otx2_err("Could not configure device for security");
273 otx2_eth_sec_fini(eth_dev);
278 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
280 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
281 uint16_t port = eth_dev->data->port_id;
282 char name[RTE_MEMZONE_NAMESIZE];
284 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
285 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
288 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
289 rte_memzone_free(rte_memzone_lookup(name));