1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
24 #define ERR_STR_SZ 256
26 struct eth_sec_tag_const {
30 uint32_t rsvd_11_0 : 12;
32 uint32_t event_type : 4;
33 uint32_t rsvd_31_24 : 8;
39 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
41 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
43 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
45 .algo = RTE_CRYPTO_AEAD_AES_GCM,
71 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
73 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
75 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
91 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
93 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
95 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
110 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
113 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
114 { /* IPsec Inline Protocol ESP Tunnel Ingress */
115 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
116 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
118 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
119 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
120 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
123 .crypto_capabilities = otx2_eth_sec_crypto_caps,
124 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
126 { /* IPsec Inline Protocol ESP Tunnel Egress */
127 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
128 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
130 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
131 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
132 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
135 .crypto_capabilities = otx2_eth_sec_crypto_caps,
136 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
139 .action = RTE_SECURITY_ACTION_TYPE_NONE
144 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
146 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
147 uint16_t port = eth_dev->data->port_id;
148 const struct rte_memzone *mz;
152 mz = rte_memzone_lookup(name);
158 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
159 if (sa_tbl[port] == NULL)
162 rte_free(sa_tbl[port]);
167 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa,
170 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
171 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
172 uint16_t port = eth_dev->data->port_id;
173 const struct rte_memzone *mz;
177 mz = rte_memzone_lookup(name);
179 snprintf(err_str, ERR_STR_SZ,
180 "Could not find fastpath lookup table");
186 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
188 if (sa_tbl[port] == NULL) {
189 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
190 sizeof(uint64_t), 0);
193 sa_tbl[port][spi] = (uint64_t)sa;
199 in_sa_mz_name_get(char *name, int size, uint16_t port)
201 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
204 static struct otx2_ipsec_fp_in_sa *
205 in_sa_get(uint16_t port, int sa_index)
207 char name[RTE_MEMZONE_NAMESIZE];
208 struct otx2_ipsec_fp_in_sa *sa;
209 const struct rte_memzone *mz;
211 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
212 mz = rte_memzone_lookup(name);
214 otx2_err("Could not get the memzone reserved for IN SA DB");
220 return sa + sa_index;
224 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
225 struct rte_crypto_sym_xform *xform,
226 struct otx2_sec_session_ipsec_ip *sess)
228 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
230 sess->partial_len = sizeof(struct rte_ipv4_hdr);
232 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
233 sess->partial_len += sizeof(struct rte_esp_hdr);
234 sess->roundup_len = sizeof(struct rte_esp_tail);
235 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
236 sess->partial_len += OTX2_SEC_AH_HDR_LEN;
241 if (ipsec->options.udp_encap)
242 sess->partial_len += sizeof(struct rte_udp_hdr);
244 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
245 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
246 sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
247 sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
248 sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
253 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
254 cipher_xform = xform;
255 auth_xform = xform->next;
256 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
258 cipher_xform = xform->next;
262 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
263 sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
264 sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
269 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
270 sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
278 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
279 const uint8_t *auth_key, int len, uint8_t *hmac_key)
282 struct otx2_cpt_res cpt_res;
286 volatile struct otx2_cpt_res *res;
287 uint64_t timeout, lmt_status;
288 struct otx2_cpt_inst_s inst;
292 memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
294 md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
298 memcpy(md->buffer, auth_key, len);
300 md_iova = rte_malloc_virt2iova(md);
301 if (md_iova == RTE_BAD_IOVA) {
306 inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
307 inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
308 inst.param2 = ctl->auth_type;
310 inst.dptr = md_iova + offsetof(struct inst_data, buffer);
311 inst.rptr = inst.dptr;
312 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
314 md->cpt_res.compcode = 0;
315 md->cpt_res.uc_compcode = 0xff;
317 timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
322 otx2_lmt_mov(qp->lmtline, &inst, 2);
323 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
324 } while (lmt_status == 0);
326 res = (volatile struct otx2_cpt_res *)&md->cpt_res;
328 /* Wait until instruction completes or times out */
329 while (res->uc_compcode == 0xff) {
330 if (rte_get_timer_cycles() > timeout)
334 if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
339 /* Retrieve the ipad and opad from rptr */
340 memcpy(hmac_key, md->buffer, 48);
350 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
351 struct rte_security_ipsec_xform *ipsec,
352 struct rte_crypto_sym_xform *crypto_xform,
353 struct rte_security_session *sec_sess)
355 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
356 struct otx2_sec_session_ipsec_ip *sess;
357 uint16_t port = eth_dev->data->port_id;
358 int cipher_key_len, auth_key_len, ret;
359 const uint8_t *cipher_key, *auth_key;
360 struct otx2_ipsec_fp_sa_ctl *ctl;
361 struct otx2_ipsec_fp_out_sa *sa;
362 struct otx2_sec_session *priv;
363 struct otx2_cpt_inst_s inst;
364 struct otx2_cpt_qp *qp;
366 priv = get_sec_session_private_data(sec_sess);
367 priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
368 sess = &priv->ipsec.ip;
373 otx2_err("SA already registered");
377 memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
381 ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
385 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
386 memcpy(sa->nonce, &ipsec->salt, 4);
388 if (ipsec->options.udp_encap == 1) {
393 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
394 /* Start ip id from 1 */
397 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
398 memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
399 sizeof(struct in_addr));
400 memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
401 sizeof(struct in_addr));
409 cipher_xform = crypto_xform;
410 auth_xform = crypto_xform->next;
416 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
417 cipher_key = crypto_xform->aead.key.data;
418 cipher_key_len = crypto_xform->aead.key.length;
420 cipher_key = cipher_xform->cipher.key.data;
421 cipher_key_len = cipher_xform->cipher.key.length;
422 auth_key = auth_xform->auth.key.data;
423 auth_key_len = auth_xform->auth.key.length;
426 if (cipher_key_len != 0)
427 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
431 /* Determine word 7 of CPT instruction */
433 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
434 inst.cptr = rte_mempool_virt2iova(sa);
435 sess->inst_w7 = inst.u64[7];
437 /* Get CPT QP to be used for this SA */
438 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
444 sess->cpt_lmtline = qp->lmtline;
445 sess->cpt_nq_reg = qp->lf_nq_reg;
447 /* Populate control word */
448 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
452 if (auth_key_len && auth_key) {
453 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
463 otx2_sec_idev_tx_cpt_qp_put(sess->qp);
468 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
469 struct rte_security_ipsec_xform *ipsec,
470 struct rte_crypto_sym_xform *crypto_xform,
471 struct rte_security_session *sec_sess)
473 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
474 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
475 struct otx2_sec_session_ipsec_ip *sess;
476 uint16_t port = eth_dev->data->port_id;
477 int cipher_key_len, auth_key_len, ret;
478 const uint8_t *cipher_key, *auth_key;
479 struct otx2_ipsec_fp_sa_ctl *ctl;
480 struct otx2_ipsec_fp_in_sa *sa;
481 struct otx2_sec_session *priv;
482 char err_str[ERR_STR_SZ];
483 struct otx2_cpt_qp *qp;
485 memset(err_str, 0, ERR_STR_SZ);
487 if (ipsec->spi >= dev->ipsec_in_max_spi) {
488 otx2_err("SPI exceeds max supported");
492 sa = in_sa_get(port, ipsec->spi);
498 priv = get_sec_session_private_data(sec_sess);
499 priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
500 sess = &priv->ipsec.ip;
502 rte_spinlock_lock(&dev->ipsec_tbl_lock);
505 snprintf(err_str, ERR_STR_SZ, "SA already registered");
510 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
512 auth_xform = crypto_xform;
513 cipher_xform = crypto_xform->next;
519 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
520 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
521 memcpy(sa->nonce, &ipsec->salt, 4);
522 cipher_key = crypto_xform->aead.key.data;
523 cipher_key_len = crypto_xform->aead.key.length;
525 cipher_key = cipher_xform->cipher.key.data;
526 cipher_key_len = cipher_xform->cipher.key.length;
527 auth_key = auth_xform->auth.key.data;
528 auth_key_len = auth_xform->auth.key.length;
531 if (cipher_key_len != 0) {
532 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
534 snprintf(err_str, ERR_STR_SZ, "Invalid cipher key len");
541 sa->userdata = priv->userdata;
543 sa->replay_win_sz = ipsec->replay_win_sz;
545 if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa, err_str)) {
550 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
552 snprintf(err_str, ERR_STR_SZ,
553 "Could not set SA CTL word (err: %d)", ret);
557 if (auth_key_len && auth_key) {
558 /* Get a queue pair for HMAC init */
559 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
561 snprintf(err_str, ERR_STR_SZ, "Could not get CPT QP");
565 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
566 otx2_sec_idev_tx_cpt_qp_put(qp);
568 snprintf(err_str, ERR_STR_SZ, "Could not put CPT QP");
573 if (sa->replay_win_sz) {
574 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
575 snprintf(err_str, ERR_STR_SZ,
576 "Replay window size is not supported");
580 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
582 if (sa->replay == NULL) {
583 snprintf(err_str, ERR_STR_SZ,
584 "Could not allocate memory");
589 rte_spinlock_init(&sa->replay->lock);
591 * Set window bottom to 1, base and top to size of
594 sa->replay->winb = 1;
595 sa->replay->wint = sa->replay_win_sz;
596 sa->replay->base = sa->replay_win_sz;
604 rte_spinlock_unlock(&dev->ipsec_tbl_lock);
608 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
611 rte_spinlock_unlock(&dev->ipsec_tbl_lock);
613 otx2_err("%s", err_str);
619 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
620 struct rte_security_ipsec_xform *ipsec,
621 struct rte_crypto_sym_xform *crypto_xform,
622 struct rte_security_session *sess)
626 ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
630 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
631 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
634 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
639 otx2_eth_sec_session_create(void *device,
640 struct rte_security_session_conf *conf,
641 struct rte_security_session *sess,
642 struct rte_mempool *mempool)
644 struct otx2_sec_session *priv;
647 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
650 if (rte_mempool_get(mempool, (void **)&priv)) {
651 otx2_err("Could not allocate security session private data");
655 set_sec_session_private_data(sess, priv);
658 * Save userdata provided by the application. For ingress packets, this
659 * could be used to identify the SA.
661 priv->userdata = conf->userdata;
663 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
664 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
676 rte_mempool_put(mempool, priv);
677 set_sec_session_private_data(sess, NULL);
682 otx2_eth_sec_free_anti_replay(struct otx2_ipsec_fp_in_sa *sa)
685 if (sa->replay_win_sz && sa->replay)
686 rte_free(sa->replay);
691 otx2_eth_sec_session_destroy(void *device,
692 struct rte_security_session *sess)
694 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(device);
695 struct otx2_sec_session_ipsec_ip *sess_ip;
696 struct otx2_ipsec_fp_in_sa *sa;
697 struct otx2_sec_session *priv;
698 struct rte_mempool *sess_mp;
701 priv = get_sec_session_private_data(sess);
705 sess_ip = &priv->ipsec.ip;
707 if (priv->ipsec.dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
708 rte_spinlock_lock(&dev->ipsec_tbl_lock);
711 /* Release the anti replay window */
712 otx2_eth_sec_free_anti_replay(sa);
714 /* Clear SA table entry */
720 rte_spinlock_unlock(&dev->ipsec_tbl_lock);
723 /* Release CPT LF used for this session */
724 if (sess_ip->qp != NULL) {
725 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
730 sess_mp = rte_mempool_from_obj(priv);
732 set_sec_session_private_data(sess, NULL);
733 rte_mempool_put(sess_mp, priv);
739 otx2_eth_sec_session_get_size(void *device __rte_unused)
741 return sizeof(struct otx2_sec_session);
744 static const struct rte_security_capability *
745 otx2_eth_sec_capabilities_get(void *device __rte_unused)
747 return otx2_eth_sec_capabilities;
750 static struct rte_security_ops otx2_eth_sec_ops = {
751 .session_create = otx2_eth_sec_session_create,
752 .session_destroy = otx2_eth_sec_session_destroy,
753 .session_get_size = otx2_eth_sec_session_get_size,
754 .capabilities_get = otx2_eth_sec_capabilities_get
758 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
760 struct rte_security_ctx *ctx;
763 ctx = rte_malloc("otx2_eth_sec_ctx",
764 sizeof(struct rte_security_ctx), 0);
768 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
776 ctx->device = eth_dev;
777 ctx->ops = &otx2_eth_sec_ops;
780 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
782 eth_dev->security_ctx = ctx;
788 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
790 rte_free(eth_dev->security_ctx);
794 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
796 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
797 uint16_t port = eth_dev->data->port_id;
798 struct nix_inline_ipsec_lf_cfg *req;
799 struct otx2_mbox *mbox = dev->mbox;
800 struct eth_sec_tag_const tag_const;
801 char name[RTE_MEMZONE_NAMESIZE];
802 const struct rte_memzone *mz;
804 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
805 mz = rte_memzone_lookup(name);
809 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
811 req->sa_base_addr = mz->iova;
813 req->ipsec_cfg0.tt = tt;
816 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
817 tag_const.port = port;
818 req->ipsec_cfg0.tag_const = tag_const.u32;
820 req->ipsec_cfg0.sa_pow2_size =
821 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
822 req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
824 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
825 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
827 return otx2_mbox_process(mbox);
831 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
833 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
834 struct otx2_mbox *mbox = dev->mbox;
835 struct nix_aq_enq_rsp *rsp;
836 struct nix_aq_enq_req *aq;
839 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
840 aq->qidx = 0; /* Read RQ:0 context */
841 aq->ctype = NIX_AQ_CTYPE_RQ;
842 aq->op = NIX_AQ_INSTOP_READ;
844 ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
846 otx2_err("Could not read RQ context");
850 /* Update tag type */
851 ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
853 otx2_err("Could not update sec eth tag type");
859 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
861 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
862 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
863 uint16_t port = eth_dev->data->port_id;
864 char name[RTE_MEMZONE_NAMESIZE];
865 const struct rte_memzone *mz;
869 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
870 !RTE_IS_POWER_OF_2(sa_width));
872 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
873 !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
876 if (rte_security_dynfield_register() < 0)
879 nb_sa = dev->ipsec_in_max_spi;
880 mz_sz = nb_sa * sa_width;
881 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
882 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
883 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
886 otx2_err("Could not allocate inbound SA DB");
890 memset(mz->addr, 0, mz_sz);
892 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
894 otx2_err("Could not configure inline IPsec");
898 rte_spinlock_init(&dev->ipsec_tbl_lock);
903 otx2_err("Could not configure device for security");
904 otx2_eth_sec_fini(eth_dev);
909 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
911 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
912 uint16_t port = eth_dev->data->port_id;
913 char name[RTE_MEMZONE_NAMESIZE];
915 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) &&
916 !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
919 lookup_mem_sa_tbl_clear(eth_dev);
921 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
922 rte_memzone_free(rte_memzone_lookup(name));