X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx2%2Fotx2_ethdev_sec.c;h=c2a36883cbf2eae682adb5a886b84046c0e084b0;hb=ad276d5c7eaa349b94a29db996c60bedd8c1d8e4;hp=d0b2dbad306643fc75a8fe4901430b52b3b354f7;hpb=bb810fd64d2028797cf1db6a21743df554167708;p=dpdk.git diff --git a/drivers/net/octeontx2/otx2_ethdev_sec.c b/drivers/net/octeontx2/otx2_ethdev_sec.c index d0b2dbad30..c2a36883cb 100644 --- a/drivers/net/octeontx2/otx2_ethdev_sec.c +++ b/drivers/net/octeontx2/otx2_ethdev_sec.c @@ -3,18 +3,25 @@ */ #include +#include #include #include +#include #include #include #include #include +#include +#include "otx2_common.h" +#include "otx2_cryptodev_qp.h" #include "otx2_ethdev.h" #include "otx2_ethdev_sec.h" #include "otx2_ipsec_fp.h" +#include "otx2_sec_idev.h" +#include "otx2_security.h" -#define ETH_SEC_MAX_PKT_LEN 1450 +#define ERR_STR_SZ 256 struct eth_sec_tag_const { RTE_STD_C11 @@ -133,18 +140,628 @@ static const struct rte_security_capability otx2_eth_sec_capabilities[] = { } }; +static void +lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) + return; + + mem = mz->addr; + + sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START); + if (sa_tbl[port] == NULL) + return; + + rte_free(sa_tbl[port]); + sa_tbl[port] = NULL; +} + +static int +lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa, + char *err_str) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) { + snprintf(err_str, ERR_STR_SZ, + "Could not find fastpath lookup table"); + return -EINVAL; + } + + mem = mz->addr; + + sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START); + + if (sa_tbl[port] == NULL) { + sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi * + sizeof(uint64_t), 0); + } + + sa_tbl[port][spi] = (uint64_t)sa; + + return 0; +} + static inline void in_sa_mz_name_get(char *name, int size, uint16_t port) { snprintf(name, size, "otx2_ipsec_in_sadb_%u", port); } +static struct otx2_ipsec_fp_in_sa * +in_sa_get(uint16_t port, int sa_index) +{ + char name[RTE_MEMZONE_NAMESIZE]; + struct otx2_ipsec_fp_in_sa *sa; + const struct rte_memzone *mz; + + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); + mz = rte_memzone_lookup(name); + if (mz == NULL) { + otx2_err("Could not get the memzone reserved for IN SA DB"); + return NULL; + } + + sa = mz->addr; + + return sa + sa_index; +} + +static int +ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *xform, + struct otx2_sec_session_ipsec_ip *sess) +{ + struct rte_crypto_sym_xform *cipher_xform, *auth_xform; + + sess->partial_len = sizeof(struct rte_ipv4_hdr); + + if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) { + sess->partial_len += sizeof(struct rte_esp_hdr); + sess->roundup_len = sizeof(struct rte_esp_tail); + } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) { + sess->partial_len += OTX2_SEC_AH_HDR_LEN; + } else { + return -EINVAL; + } + + if (ipsec->options.udp_encap) + sess->partial_len += sizeof(struct rte_udp_hdr); + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) { + sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN; + sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN; + sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN; + } + return 0; + } + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + cipher_xform = xform; + auth_xform = xform->next; + } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + auth_xform = xform; + cipher_xform = xform->next; + } else { + return -EINVAL; + } + if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) { + sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN; + sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN; + } else { + return -EINVAL; + } + + if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) + sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN; + else + return -EINVAL; + + return 0; +} + +static int +hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp, + const uint8_t *auth_key, int len, uint8_t *hmac_key) +{ + struct inst_data { + struct otx2_cpt_res cpt_res; + uint8_t buffer[64]; + } *md; + + volatile struct otx2_cpt_res *res; + uint64_t timeout, lmt_status; + struct otx2_cpt_inst_s inst; + rte_iova_t md_iova; + int ret; + + memset(&inst, 0, sizeof(struct otx2_cpt_inst_s)); + + md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN); + if (md == NULL) + return -ENOMEM; + + memcpy(md->buffer, auth_key, len); + + md_iova = rte_malloc_virt2iova(md); + if (md_iova == RTE_BAD_IOVA) { + ret = -EINVAL; + goto free_md; + } + + inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res); + inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD; + inst.param2 = ctl->auth_type; + inst.dlen = len; + inst.dptr = md_iova + offsetof(struct inst_data, buffer); + inst.rptr = inst.dptr; + inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC; + + md->cpt_res.compcode = 0; + md->cpt_res.uc_compcode = 0xff; + + timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz(); + + rte_io_wmb(); + + do { + otx2_lmt_mov(qp->lmtline, &inst, 2); + lmt_status = otx2_lmt_submit(qp->lf_nq_reg); + } while (lmt_status == 0); + + res = (volatile struct otx2_cpt_res *)&md->cpt_res; + + /* Wait until instruction completes or times out */ + while (res->uc_compcode == 0xff) { + if (rte_get_timer_cycles() > timeout) + break; + } + + if (res->u16[0] != OTX2_SEC_COMP_GOOD) { + ret = -EIO; + goto free_md; + } + + /* Retrieve the ipad and opad from rptr */ + memcpy(hmac_key, md->buffer, 48); + + ret = 0; + +free_md: + rte_free(md); + return ret; +} + +static int +eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sec_sess) +{ + struct rte_crypto_sym_xform *auth_xform, *cipher_xform; + struct otx2_sec_session_ipsec_ip *sess; + uint16_t port = eth_dev->data->port_id; + int cipher_key_len, auth_key_len, ret; + const uint8_t *cipher_key, *auth_key; + struct otx2_ipsec_fp_sa_ctl *ctl; + struct otx2_ipsec_fp_out_sa *sa; + struct otx2_sec_session *priv; + struct otx2_cpt_inst_s inst; + struct otx2_cpt_qp *qp; + + priv = get_sec_session_private_data(sec_sess); + priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; + sess = &priv->ipsec.ip; + + sa = &sess->out_sa; + ctl = &sa->ctl; + if (ctl->valid) { + otx2_err("SA already registered"); + return -EINVAL; + } + + memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip)); + + sess->seq = 1; + + ret = ipsec_sa_const_set(ipsec, crypto_xform, sess); + if (ret < 0) + return ret; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + memcpy(sa->nonce, &ipsec->salt, 4); + + if (ipsec->options.udp_encap == 1) { + sa->udp_src = 4500; + sa->udp_dst = 4500; + } + + if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + /* Start ip id from 1 */ + sess->ip_id = 1; + + if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip, + sizeof(struct in_addr)); + memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip, + sizeof(struct in_addr)); + } else { + return -EINVAL; + } + } else { + return -EINVAL; + } + + cipher_xform = crypto_xform; + auth_xform = crypto_xform->next; + + cipher_key_len = 0; + auth_key_len = 0; + auth_key = NULL; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + cipher_key = crypto_xform->aead.key.data; + cipher_key_len = crypto_xform->aead.key.length; + } else { + cipher_key = cipher_xform->cipher.key.data; + cipher_key_len = cipher_xform->cipher.key.length; + auth_key = auth_xform->auth.key.data; + auth_key_len = auth_xform->auth.key.length; + } + + if (cipher_key_len != 0) + memcpy(sa->cipher_key, cipher_key, cipher_key_len); + else + return -EINVAL; + + /* Determine word 7 of CPT instruction */ + inst.u64[7] = 0; + inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC; + inst.cptr = rte_mempool_virt2iova(sa); + sess->inst_w7 = inst.u64[7]; + + /* Get CPT QP to be used for this SA */ + ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp); + if (ret) + return ret; + + sess->qp = qp; + + sess->cpt_lmtline = qp->lmtline; + sess->cpt_nq_reg = qp->lf_nq_reg; + + /* Populate control word */ + ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); + if (ret) + goto cpt_put; + + if (auth_key_len && auth_key) { + ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key); + if (ret) + goto cpt_put; + } + + rte_io_wmb(); + ctl->valid = 1; + + return 0; +cpt_put: + otx2_sec_idev_tx_cpt_qp_put(sess->qp); + return ret; +} + +static int +eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sec_sess) +{ + struct rte_crypto_sym_xform *auth_xform, *cipher_xform; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_sec_session_ipsec_ip *sess; + uint16_t port = eth_dev->data->port_id; + int cipher_key_len, auth_key_len, ret; + const uint8_t *cipher_key, *auth_key; + struct otx2_ipsec_fp_sa_ctl *ctl; + struct otx2_ipsec_fp_in_sa *sa; + struct otx2_sec_session *priv; + char err_str[ERR_STR_SZ]; + struct otx2_cpt_qp *qp; + + memset(err_str, 0, ERR_STR_SZ); + + if (ipsec->spi >= dev->ipsec_in_max_spi) { + otx2_err("SPI exceeds max supported"); + return -EINVAL; + } + + sa = in_sa_get(port, ipsec->spi); + if (sa == NULL) + return -ENOMEM; + + ctl = &sa->ctl; + + priv = get_sec_session_private_data(sec_sess); + priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; + sess = &priv->ipsec.ip; + + rte_spinlock_lock(&dev->ipsec_tbl_lock); + + if (ctl->valid) { + snprintf(err_str, ERR_STR_SZ, "SA already registered"); + ret = -EEXIST; + goto tbl_unlock; + } + + memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa)); + + auth_xform = crypto_xform; + cipher_xform = crypto_xform->next; + + cipher_key_len = 0; + auth_key_len = 0; + auth_key = NULL; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + memcpy(sa->nonce, &ipsec->salt, 4); + cipher_key = crypto_xform->aead.key.data; + cipher_key_len = crypto_xform->aead.key.length; + } else { + cipher_key = cipher_xform->cipher.key.data; + cipher_key_len = cipher_xform->cipher.key.length; + auth_key = auth_xform->auth.key.data; + auth_key_len = auth_xform->auth.key.length; + } + + if (cipher_key_len != 0) { + memcpy(sa->cipher_key, cipher_key, cipher_key_len); + } else { + snprintf(err_str, ERR_STR_SZ, "Invalid cipher key len"); + ret = -EINVAL; + goto sa_clear; + } + + sess->in_sa = sa; + + sa->userdata = priv->userdata; + + sa->replay_win_sz = ipsec->replay_win_sz; + + if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa, err_str)) { + ret = -EINVAL; + goto sa_clear; + } + + ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); + if (ret) { + snprintf(err_str, ERR_STR_SZ, + "Could not set SA CTL word (err: %d)", ret); + goto sa_clear; + } + + if (auth_key_len && auth_key) { + /* Get a queue pair for HMAC init */ + ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp); + if (ret) { + snprintf(err_str, ERR_STR_SZ, "Could not get CPT QP"); + goto sa_clear; + } + + ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key); + otx2_sec_idev_tx_cpt_qp_put(qp); + if (ret) { + snprintf(err_str, ERR_STR_SZ, "Could not put CPT QP"); + goto sa_clear; + } + } + + if (sa->replay_win_sz) { + if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) { + snprintf(err_str, ERR_STR_SZ, + "Replay window size is not supported"); + ret = -ENOTSUP; + goto sa_clear; + } + sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay), + 0); + if (sa->replay == NULL) { + snprintf(err_str, ERR_STR_SZ, + "Could not allocate memory"); + ret = -ENOMEM; + goto sa_clear; + } + + rte_spinlock_init(&sa->replay->lock); + /* + * Set window bottom to 1, base and top to size of + * window + */ + sa->replay->winb = 1; + sa->replay->wint = sa->replay_win_sz; + sa->replay->base = sa->replay_win_sz; + sa->esn_low = 0; + sa->esn_hi = 0; + } + + rte_io_wmb(); + ctl->valid = 1; + + rte_spinlock_unlock(&dev->ipsec_tbl_lock); + return 0; + +sa_clear: + memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa)); + +tbl_unlock: + rte_spinlock_unlock(&dev->ipsec_tbl_lock); + + otx2_err("%s", err_str); + + return ret; +} + +static int +eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sess) +{ + int ret; + + ret = ipsec_fp_xform_verify(ipsec, crypto_xform); + if (ret) + return ret; + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) + return eth_sec_ipsec_in_sess_create(eth_dev, ipsec, + crypto_xform, sess); + else + return eth_sec_ipsec_out_sess_create(eth_dev, ipsec, + crypto_xform, sess); +} + +static int +otx2_eth_sec_session_create(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *sess, + struct rte_mempool *mempool) +{ + struct otx2_sec_session *priv; + int ret; + + if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + return -ENOTSUP; + + if (rte_mempool_get(mempool, (void **)&priv)) { + otx2_err("Could not allocate security session private data"); + return -ENOMEM; + } + + set_sec_session_private_data(sess, priv); + + /* + * Save userdata provided by the application. For ingress packets, this + * could be used to identify the SA. + */ + priv->userdata = conf->userdata; + + if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC) + ret = eth_sec_ipsec_sess_create(device, &conf->ipsec, + conf->crypto_xform, + sess); + else + ret = -ENOTSUP; + + if (ret) + goto mempool_put; + + return 0; + +mempool_put: + rte_mempool_put(mempool, priv); + set_sec_session_private_data(sess, NULL); + return ret; +} + +static void +otx2_eth_sec_free_anti_replay(struct otx2_ipsec_fp_in_sa *sa) +{ + if (sa != NULL) { + if (sa->replay_win_sz && sa->replay) + rte_free(sa->replay); + } +} + +static int +otx2_eth_sec_session_destroy(void *device, + struct rte_security_session *sess) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(device); + struct otx2_sec_session_ipsec_ip *sess_ip; + struct otx2_ipsec_fp_in_sa *sa; + struct otx2_sec_session *priv; + struct rte_mempool *sess_mp; + int ret; + + priv = get_sec_session_private_data(sess); + if (priv == NULL) + return -EINVAL; + + sess_ip = &priv->ipsec.ip; + + if (priv->ipsec.dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + rte_spinlock_lock(&dev->ipsec_tbl_lock); + sa = sess_ip->in_sa; + + /* Release the anti replay window */ + otx2_eth_sec_free_anti_replay(sa); + + /* Clear SA table entry */ + if (sa != NULL) { + sa->ctl.valid = 0; + rte_io_wmb(); + } + + rte_spinlock_unlock(&dev->ipsec_tbl_lock); + } + + /* Release CPT LF used for this session */ + if (sess_ip->qp != NULL) { + ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp); + if (ret) + return ret; + } + + sess_mp = rte_mempool_from_obj(priv); + + set_sec_session_private_data(sess, NULL); + rte_mempool_put(sess_mp, priv); + + return 0; +} + static unsigned int otx2_eth_sec_session_get_size(void *device __rte_unused) { return sizeof(struct otx2_sec_session); } +static int +otx2_eth_sec_set_pkt_mdata(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + /* Set security session as the pkt metadata */ + *rte_security_dynfield(m) = (rte_security_dynfield_t)session; + + return 0; +} + +static int +otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md, + void **userdata) +{ + /* Retrieve userdata */ + *userdata = (void *)md; + + return 0; +} + static const struct rte_security_capability * otx2_eth_sec_capabilities_get(void *device __rte_unused) { @@ -152,7 +769,11 @@ otx2_eth_sec_capabilities_get(void *device __rte_unused) } static struct rte_security_ops otx2_eth_sec_ops = { + .session_create = otx2_eth_sec_session_create, + .session_destroy = otx2_eth_sec_session_destroy, .session_get_size = otx2_eth_sec_session_get_size, + .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata, + .get_userdata = otx2_eth_sec_get_userdata, .capabilities_get = otx2_eth_sec_capabilities_get }; @@ -160,12 +781,19 @@ int otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev) { struct rte_security_ctx *ctx; + int ret; ctx = rte_malloc("otx2_eth_sec_ctx", sizeof(struct rte_security_ctx), 0); if (ctx == NULL) return -ENOMEM; + ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id); + if (ret) { + rte_free(ctx); + return ret; + } + /* Populate ctx */ ctx->device = eth_dev; @@ -212,7 +840,7 @@ eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt) req->ipsec_cfg0.sa_pow2_size = rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa)); - req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1; + req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1; req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi); req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1; @@ -220,6 +848,34 @@ eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt) return otx2_mbox_process(mbox); } +int +otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + int ret; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = 0; /* Read RQ:0 context */ + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + + ret = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (ret < 0) { + otx2_err("Could not read RQ context"); + return ret; + } + + /* Update tag type */ + ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt); + if (ret < 0) + otx2_err("Could not update sec eth tag type"); + + return ret; +} + int otx2_eth_sec_init(struct rte_eth_dev *eth_dev) { @@ -238,6 +894,9 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev) !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)) return 0; + if (rte_security_dynfield_register() < 0) + return -rte_errno; + nb_sa = dev->ipsec_in_max_spi; mz_sz = nb_sa * sa_width; in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); @@ -257,6 +916,8 @@ otx2_eth_sec_init(struct rte_eth_dev *eth_dev) goto sec_fini; } + rte_spinlock_init(&dev->ipsec_tbl_lock); + return 0; sec_fini: @@ -276,6 +937,8 @@ otx2_eth_sec_fini(struct rte_eth_dev *eth_dev) !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)) return; + lookup_mem_sa_tbl_clear(eth_dev); + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); rte_memzone_free(rte_memzone_lookup(name)); }