1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
24 struct eth_sec_tag_const {
28 uint32_t rsvd_11_0 : 12;
30 uint32_t event_type : 4;
31 uint32_t rsvd_31_24 : 8;
37 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
39 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
41 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
43 .algo = RTE_CRYPTO_AEAD_AES_GCM,
69 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
71 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
73 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
91 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
93 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
108 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
111 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
112 { /* IPsec Inline Protocol ESP Tunnel Ingress */
113 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
116 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
118 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
121 .crypto_capabilities = otx2_eth_sec_crypto_caps,
122 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
124 { /* IPsec Inline Protocol ESP Tunnel Egress */
125 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
128 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
130 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
133 .crypto_capabilities = otx2_eth_sec_crypto_caps,
134 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
137 .action = RTE_SECURITY_ACTION_TYPE_NONE
142 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
144 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
145 uint16_t port = eth_dev->data->port_id;
146 const struct rte_memzone *mz;
150 mz = rte_memzone_lookup(name);
156 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
157 if (sa_tbl[port] == NULL)
160 rte_free(sa_tbl[port]);
165 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
167 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
168 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
169 uint16_t port = eth_dev->data->port_id;
170 const struct rte_memzone *mz;
174 mz = rte_memzone_lookup(name);
176 otx2_err("Could not find fastpath lookup table");
182 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
184 if (sa_tbl[port] == NULL) {
185 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
186 sizeof(uint64_t), 0);
189 sa_tbl[port][spi] = (uint64_t)sa;
195 in_sa_mz_name_get(char *name, int size, uint16_t port)
197 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
200 static struct otx2_ipsec_fp_in_sa *
201 in_sa_get(uint16_t port, int sa_index)
203 char name[RTE_MEMZONE_NAMESIZE];
204 struct otx2_ipsec_fp_in_sa *sa;
205 const struct rte_memzone *mz;
207 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
208 mz = rte_memzone_lookup(name);
210 otx2_err("Could not get the memzone reserved for IN SA DB");
216 return sa + sa_index;
220 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
221 struct rte_crypto_sym_xform *xform,
222 struct otx2_sec_session_ipsec_ip *sess)
224 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
226 sess->partial_len = sizeof(struct rte_ipv4_hdr);
228 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
229 sess->partial_len += sizeof(struct rte_esp_hdr);
230 sess->roundup_len = sizeof(struct rte_esp_tail);
231 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
232 sess->partial_len += OTX2_SEC_AH_HDR_LEN;
237 if (ipsec->options.udp_encap)
238 sess->partial_len += sizeof(struct rte_udp_hdr);
240 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
241 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
242 sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
243 sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
244 sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
249 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
250 cipher_xform = xform;
251 auth_xform = xform->next;
252 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
254 cipher_xform = xform->next;
258 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
259 sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
260 sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
265 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
266 sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
274 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
275 const uint8_t *auth_key, int len, uint8_t *hmac_key)
278 struct otx2_cpt_res cpt_res;
282 volatile struct otx2_cpt_res *res;
283 uint64_t timeout, lmt_status;
284 struct otx2_cpt_inst_s inst;
288 memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
290 md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
294 memcpy(md->buffer, auth_key, len);
296 md_iova = rte_malloc_virt2iova(md);
297 if (md_iova == RTE_BAD_IOVA) {
302 inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
303 inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
304 inst.param2 = ctl->auth_type;
306 inst.dptr = md_iova + offsetof(struct inst_data, buffer);
307 inst.rptr = inst.dptr;
308 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
310 md->cpt_res.compcode = 0;
311 md->cpt_res.uc_compcode = 0xff;
313 timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
318 otx2_lmt_mov(qp->lmtline, &inst, 2);
319 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
320 } while (lmt_status == 0);
322 res = (volatile struct otx2_cpt_res *)&md->cpt_res;
324 /* Wait until instruction completes or times out */
325 while (res->uc_compcode == 0xff) {
326 if (rte_get_timer_cycles() > timeout)
330 if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
335 /* Retrieve the ipad and opad from rptr */
336 memcpy(hmac_key, md->buffer, 48);
346 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
347 struct rte_security_ipsec_xform *ipsec,
348 struct rte_crypto_sym_xform *crypto_xform,
349 struct rte_security_session *sec_sess)
351 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
352 struct otx2_sec_session_ipsec_ip *sess;
353 uint16_t port = eth_dev->data->port_id;
354 int cipher_key_len, auth_key_len, ret;
355 const uint8_t *cipher_key, *auth_key;
356 struct otx2_ipsec_fp_sa_ctl *ctl;
357 struct otx2_ipsec_fp_out_sa *sa;
358 struct otx2_sec_session *priv;
359 struct otx2_cpt_inst_s inst;
360 struct otx2_cpt_qp *qp;
362 priv = get_sec_session_private_data(sec_sess);
363 priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
364 sess = &priv->ipsec.ip;
369 otx2_err("SA already registered");
373 memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
377 ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
381 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
382 memcpy(sa->nonce, &ipsec->salt, 4);
384 if (ipsec->options.udp_encap == 1) {
389 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
390 /* Start ip id from 1 */
393 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
394 memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
395 sizeof(struct in_addr));
396 memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
397 sizeof(struct in_addr));
405 cipher_xform = crypto_xform;
406 auth_xform = crypto_xform->next;
412 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
413 cipher_key = crypto_xform->aead.key.data;
414 cipher_key_len = crypto_xform->aead.key.length;
416 cipher_key = cipher_xform->cipher.key.data;
417 cipher_key_len = cipher_xform->cipher.key.length;
418 auth_key = auth_xform->auth.key.data;
419 auth_key_len = auth_xform->auth.key.length;
422 if (cipher_key_len != 0)
423 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
427 /* Determine word 7 of CPT instruction */
429 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
430 inst.cptr = rte_mempool_virt2iova(sa);
431 sess->inst_w7 = inst.u64[7];
433 /* Get CPT QP to be used for this SA */
434 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
440 sess->cpt_lmtline = qp->lmtline;
441 sess->cpt_nq_reg = qp->lf_nq_reg;
443 /* Populate control word */
444 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
448 if (auth_key_len && auth_key) {
449 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
456 otx2_sec_idev_tx_cpt_qp_put(sess->qp);
461 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
462 struct rte_security_ipsec_xform *ipsec,
463 struct rte_crypto_sym_xform *crypto_xform,
464 struct rte_security_session *sec_sess)
466 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
467 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
468 struct otx2_sec_session_ipsec_ip *sess;
469 uint16_t port = eth_dev->data->port_id;
470 int cipher_key_len, auth_key_len, ret;
471 const uint8_t *cipher_key, *auth_key;
472 struct otx2_ipsec_fp_sa_ctl *ctl;
473 struct otx2_ipsec_fp_in_sa *sa;
474 struct otx2_sec_session *priv;
475 struct otx2_cpt_qp *qp;
477 if (ipsec->spi >= dev->ipsec_in_max_spi) {
478 otx2_err("SPI exceeds max supported");
482 sa = in_sa_get(port, ipsec->spi);
485 priv = get_sec_session_private_data(sec_sess);
486 priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
487 sess = &priv->ipsec.ip;
490 otx2_err("SA already registered");
494 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
496 auth_xform = crypto_xform;
497 cipher_xform = crypto_xform->next;
503 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
504 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
505 memcpy(sa->nonce, &ipsec->salt, 4);
506 cipher_key = crypto_xform->aead.key.data;
507 cipher_key_len = crypto_xform->aead.key.length;
509 cipher_key = cipher_xform->cipher.key.data;
510 cipher_key_len = cipher_xform->cipher.key.length;
511 auth_key = auth_xform->auth.key.data;
512 auth_key_len = auth_xform->auth.key.length;
515 if (cipher_key_len != 0)
516 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
522 sa->userdata = priv->userdata;
524 sa->replay_win_sz = ipsec->replay_win_sz;
526 if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
529 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
533 if (auth_key_len && auth_key) {
534 /* Get a queue pair for HMAC init */
535 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
538 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
539 otx2_sec_idev_tx_cpt_qp_put(qp);
544 if (sa->replay_win_sz) {
545 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
546 otx2_err("Replay window size is not supported");
549 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
551 if (sa->replay == NULL)
554 rte_spinlock_init(&sa->replay->lock);
556 * Set window bottom to 1, base and top to size of
559 sa->replay->winb = 1;
560 sa->replay->wint = sa->replay_win_sz;
561 sa->replay->base = sa->replay_win_sz;
570 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
571 struct rte_security_ipsec_xform *ipsec,
572 struct rte_crypto_sym_xform *crypto_xform,
573 struct rte_security_session *sess)
577 ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
581 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
582 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
585 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
590 otx2_eth_sec_session_create(void *device,
591 struct rte_security_session_conf *conf,
592 struct rte_security_session *sess,
593 struct rte_mempool *mempool)
595 struct otx2_sec_session *priv;
598 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
601 if (rte_mempool_get(mempool, (void **)&priv)) {
602 otx2_err("Could not allocate security session private data");
606 set_sec_session_private_data(sess, priv);
609 * Save userdata provided by the application. For ingress packets, this
610 * could be used to identify the SA.
612 priv->userdata = conf->userdata;
614 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
615 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
627 rte_mempool_put(mempool, priv);
628 set_sec_session_private_data(sess, NULL);
633 otx2_eth_sec_free_anti_replay(struct otx2_ipsec_fp_in_sa *sa)
636 if (sa->replay_win_sz && sa->replay)
637 rte_free(sa->replay);
642 otx2_eth_sec_session_destroy(void *device __rte_unused,
643 struct rte_security_session *sess)
645 struct otx2_sec_session_ipsec_ip *sess_ip;
646 struct otx2_sec_session *priv;
647 struct rte_mempool *sess_mp;
650 priv = get_sec_session_private_data(sess);
654 sess_ip = &priv->ipsec.ip;
656 /* Release the anti replay window */
657 if (priv->ipsec.dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
658 otx2_eth_sec_free_anti_replay(sess_ip->in_sa);
660 /* Release CPT LF used for this session */
661 if (sess_ip->qp != NULL) {
662 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
667 sess_mp = rte_mempool_from_obj(priv);
669 set_sec_session_private_data(sess, NULL);
670 rte_mempool_put(sess_mp, priv);
676 otx2_eth_sec_session_get_size(void *device __rte_unused)
678 return sizeof(struct otx2_sec_session);
682 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
683 struct rte_security_session *session,
684 struct rte_mbuf *m, void *params __rte_unused)
686 /* Set security session as the pkt metadata */
687 m->udata64 = (uint64_t)session;
693 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
696 /* Retrieve userdata */
697 *userdata = (void *)md;
702 static const struct rte_security_capability *
703 otx2_eth_sec_capabilities_get(void *device __rte_unused)
705 return otx2_eth_sec_capabilities;
708 static struct rte_security_ops otx2_eth_sec_ops = {
709 .session_create = otx2_eth_sec_session_create,
710 .session_destroy = otx2_eth_sec_session_destroy,
711 .session_get_size = otx2_eth_sec_session_get_size,
712 .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata,
713 .get_userdata = otx2_eth_sec_get_userdata,
714 .capabilities_get = otx2_eth_sec_capabilities_get
718 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
720 struct rte_security_ctx *ctx;
723 ctx = rte_malloc("otx2_eth_sec_ctx",
724 sizeof(struct rte_security_ctx), 0);
728 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
736 ctx->device = eth_dev;
737 ctx->ops = &otx2_eth_sec_ops;
740 eth_dev->security_ctx = ctx;
746 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
748 rte_free(eth_dev->security_ctx);
752 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
754 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
755 uint16_t port = eth_dev->data->port_id;
756 struct nix_inline_ipsec_lf_cfg *req;
757 struct otx2_mbox *mbox = dev->mbox;
758 struct eth_sec_tag_const tag_const;
759 char name[RTE_MEMZONE_NAMESIZE];
760 const struct rte_memzone *mz;
762 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
763 mz = rte_memzone_lookup(name);
767 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
769 req->sa_base_addr = mz->iova;
771 req->ipsec_cfg0.tt = tt;
774 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
775 tag_const.port = port;
776 req->ipsec_cfg0.tag_const = tag_const.u32;
778 req->ipsec_cfg0.sa_pow2_size =
779 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
780 req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
782 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
783 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
785 return otx2_mbox_process(mbox);
789 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
791 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
792 struct otx2_mbox *mbox = dev->mbox;
793 struct nix_aq_enq_rsp *rsp;
794 struct nix_aq_enq_req *aq;
797 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
798 aq->qidx = 0; /* Read RQ:0 context */
799 aq->ctype = NIX_AQ_CTYPE_RQ;
800 aq->op = NIX_AQ_INSTOP_READ;
802 ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
804 otx2_err("Could not read RQ context");
808 /* Update tag type */
809 ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
811 otx2_err("Could not update sec eth tag type");
817 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
819 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
820 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
821 uint16_t port = eth_dev->data->port_id;
822 char name[RTE_MEMZONE_NAMESIZE];
823 const struct rte_memzone *mz;
827 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
828 !RTE_IS_POWER_OF_2(sa_width));
830 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
831 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
834 nb_sa = dev->ipsec_in_max_spi;
835 mz_sz = nb_sa * sa_width;
836 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
837 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
838 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
841 otx2_err("Could not allocate inbound SA DB");
845 memset(mz->addr, 0, mz_sz);
847 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
849 otx2_err("Could not configure inline IPsec");
856 otx2_err("Could not configure device for security");
857 otx2_eth_sec_fini(eth_dev);
862 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
864 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
865 uint16_t port = eth_dev->data->port_id;
866 char name[RTE_MEMZONE_NAMESIZE];
868 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
869 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
872 lookup_mem_sa_tbl_clear(eth_dev);
874 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
875 rte_memzone_free(rte_memzone_lookup(name));