net/cnxk: add SoC specific PTP timestamp read
[dpdk.git] / drivers / net / cnxk / cn10k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
12
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
14         {       /* AES GCM */
15                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16                 {.sym = {
17                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
18                         {.aead = {
19                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
20                                 .block_size = 16,
21                                 .key_size = {
22                                         .min = 16,
23                                         .max = 32,
24                                         .increment = 8
25                                 },
26                                 .digest_size = {
27                                         .min = 16,
28                                         .max = 16,
29                                         .increment = 0
30                                 },
31                                 .aad_size = {
32                                         .min = 8,
33                                         .max = 12,
34                                         .increment = 4
35                                 },
36                                 .iv_size = {
37                                         .min = 12,
38                                         .max = 12,
39                                         .increment = 0
40                                 }
41                         }, }
42                 }, }
43         },
44         {       /* AES CBC */
45                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46                 {.sym = {
47                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48                         {.cipher = {
49                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
50                                 .block_size = 16,
51                                 .key_size = {
52                                         .min = 16,
53                                         .max = 32,
54                                         .increment = 8
55                                 },
56                                 .iv_size = {
57                                         .min = 16,
58                                         .max = 16,
59                                         .increment = 0
60                                 }
61                         }, }
62                 }, }
63         },
64         {       /* SHA1 HMAC */
65                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
66                 {.sym = {
67                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
68                         {.auth = {
69                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
70                                 .block_size = 64,
71                                 .key_size = {
72                                         .min = 20,
73                                         .max = 64,
74                                         .increment = 1
75                                 },
76                                 .digest_size = {
77                                         .min = 12,
78                                         .max = 12,
79                                         .increment = 0
80                                 },
81                         }, }
82                 }, }
83         },
84         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
85 };
86
87 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
88         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
89                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
90                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
91                 .ipsec = {
92                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
93                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
94                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
95                         .options = { 0 }
96                 },
97                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
98                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
99         },
100         {       /* IPsec Inline Protocol ESP Tunnel Egress */
101                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
102                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
103                 .ipsec = {
104                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
105                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
106                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
107                         .options = { 0 }
108                 },
109                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
110                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
111         },
112         {       /* IPsec Inline Protocol ESP Transport Egress */
113                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
115                 .ipsec = {
116                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
118                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
119                         .options = { 0 }
120                 },
121                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
122                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
123         },
124         {       /* IPsec Inline Protocol ESP Transport Ingress */
125                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
127                 .ipsec = {
128                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
130                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
131                         .options = { 0 }
132                 },
133                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
134                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
135         },
136         {
137                 .action = RTE_SECURITY_ACTION_TYPE_NONE
138         }
139 };
140
141 static inline void
142 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
143 {
144         struct rte_mbuf *next;
145
146         if (!mbuf)
147                 return;
148         do {
149                 next = mbuf->next;
150                 roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
151                 mbuf = next;
152         } while (mbuf != NULL);
153 }
154
155 void
156 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
157 {
158         struct rte_eth_event_ipsec_desc desc;
159         struct cn10k_sec_sess_priv sess_priv;
160         struct cn10k_outb_priv_data *priv;
161         struct roc_ot_ipsec_outb_sa *sa;
162         struct cpt_cn10k_res_s *res;
163         struct rte_eth_dev *eth_dev;
164         struct cnxk_eth_dev *dev;
165         static uint64_t warn_cnt;
166         uint16_t dlen_adj, rlen;
167         struct rte_mbuf *mbuf;
168         uintptr_t sa_base;
169         uintptr_t nixtx;
170         uint8_t port;
171
172         RTE_SET_USED(args);
173
174         switch ((gw[0] >> 28) & 0xF) {
175         case RTE_EVENT_TYPE_ETHDEV:
176                 /* Event from inbound inline dev due to IPSEC packet bad L4 */
177                 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
178                 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
179                 cnxk_pktmbuf_free_no_cache(mbuf);
180                 return;
181         case RTE_EVENT_TYPE_CPU:
182                 /* Check for subtype */
183                 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
184                         /* Event from outbound inline error */
185                         mbuf = (struct rte_mbuf *)gw[1];
186                         break;
187                 }
188                 /* Fall through */
189         default:
190                 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
191                         gw[0], gw[1]);
192                 return;
193         }
194
195         /* Get ethdev port from tag */
196         port = gw[0] & 0xFF;
197         eth_dev = &rte_eth_devices[port];
198         dev = cnxk_eth_pmd_priv(eth_dev);
199
200         sess_priv.u64 = *rte_security_dynfield(mbuf);
201         /* Calculate dlen adj */
202         dlen_adj = mbuf->pkt_len - mbuf->l2_len;
203         rlen = (dlen_adj + sess_priv.roundup_len) +
204                (sess_priv.roundup_byte - 1);
205         rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
206         rlen += sess_priv.partial_len;
207         dlen_adj = rlen - dlen_adj;
208
209         /* Find the res area residing on next cacheline after end of data */
210         nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
211         nixtx += BIT_ULL(7);
212         nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
213         res = (struct cpt_cn10k_res_s *)nixtx;
214
215         plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
216                     mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
217
218         sess_priv.u64 = *rte_security_dynfield(mbuf);
219
220         sa_base = dev->outb.sa_base;
221         sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
222         priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
223
224         memset(&desc, 0, sizeof(desc));
225
226         switch (res->uc_compcode) {
227         case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
228                 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
229                 break;
230         case ROC_IE_OT_UCC_ERR_PKT_IP:
231                 warn_cnt++;
232                 if (warn_cnt % 10000 == 0)
233                         plt_warn("Outbound error, bad ip pkt, mbuf %p,"
234                                  " sa_index %u (total warnings %" PRIu64 ")",
235                                  mbuf, sess_priv.sa_idx, warn_cnt);
236                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
237                 break;
238         default:
239                 warn_cnt++;
240                 if (warn_cnt % 10000 == 0)
241                         plt_warn("Outbound error, mbuf %p, sa_index %u,"
242                                  " compcode %x uc %x,"
243                                  " (total warnings %" PRIu64 ")",
244                                  mbuf, sess_priv.sa_idx, res->compcode,
245                                  res->uc_compcode, warn_cnt);
246                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
247                 break;
248         }
249
250         desc.metadata = (uint64_t)priv->userdata;
251         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
252         cnxk_pktmbuf_free_no_cache(mbuf);
253 }
254
255 static void
256 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
257 {
258         uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
259         char *iv_str = strdup(__iv_str);
260         char *iv_b = NULL, len = 16;
261         char *save;
262         int i;
263
264         if (!iv_str)
265                 return;
266
267         if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
268             outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
269             outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
270             outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
271                 memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
272                 memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
273
274                 iv_dbg = outb_sa->iv.s.iv_dbg1;
275                 for (i = 0; i < 4; i++) {
276                         iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
277                         if (!iv_b)
278                                 break;
279                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
280                 }
281                 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
282
283                 iv_dbg = outb_sa->iv.s.iv_dbg2;
284                 for (i = 0; i < 4; i++) {
285                         iv_b = strtok_r(NULL, ",", &save);
286                         if (!iv_b)
287                                 break;
288                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
289                 }
290                 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
291
292         } else {
293                 iv_dbg = outb_sa->iv.iv_dbg;
294                 memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
295
296                 for (i = 0; i < len; i++) {
297                         iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
298                         if (!iv_b)
299                                 break;
300                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
301                 }
302                 *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
303                 *(uint64_t *)&iv_dbg[8] =
304                         rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
305         }
306
307         /* Update source of IV */
308         outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
309         free(iv_str);
310 }
311
312 static int
313 cn10k_eth_sec_session_create(void *device,
314                              struct rte_security_session_conf *conf,
315                              struct rte_security_session *sess,
316                              struct rte_mempool *mempool)
317 {
318         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
319         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
320         struct rte_security_ipsec_xform *ipsec;
321         struct cn10k_sec_sess_priv sess_priv;
322         struct rte_crypto_sym_xform *crypto;
323         struct cnxk_eth_sec_sess *eth_sec;
324         struct roc_nix *nix = &dev->nix;
325         bool inbound, inl_dev;
326         rte_spinlock_t *lock;
327         char tbuf[128] = {0};
328         int rc = 0;
329
330         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
331                 return -ENOTSUP;
332
333         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
334                 return -ENOTSUP;
335
336         if (rte_security_dynfield_register() < 0)
337                 return -ENOTSUP;
338
339         ipsec = &conf->ipsec;
340         crypto = conf->crypto_xform;
341         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
342         inl_dev = !!dev->inb.inl_dev;
343
344         /* Search if a session already exits */
345         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
346                 plt_err("%s SA with SPI %u already in use",
347                         inbound ? "Inbound" : "Outbound", ipsec->spi);
348                 return -EEXIST;
349         }
350
351         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
352                 plt_err("Could not allocate security session private data");
353                 return -ENOMEM;
354         }
355
356         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
357         sess_priv.u64 = 0;
358
359         lock = inbound ? &dev->inb.lock : &dev->outb.lock;
360         rte_spinlock_lock(lock);
361
362         /* Acquire lock on inline dev for inbound */
363         if (inbound && inl_dev)
364                 roc_nix_inl_dev_lock();
365
366         if (inbound) {
367                 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
368                 struct cn10k_inb_priv_data *inb_priv;
369                 uint32_t spi_mask;
370                 uintptr_t sa;
371
372                 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
373                                   ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
374
375                 spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
376
377                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
378                 sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
379                 if (!sa && dev->inb.inl_dev) {
380                         snprintf(tbuf, sizeof(tbuf),
381                                  "Failed to create ingress sa, inline dev "
382                                  "not found or spi not in range");
383                         rc = -ENOTSUP;
384                         goto mempool_put;
385                 } else if (!sa) {
386                         snprintf(tbuf, sizeof(tbuf),
387                                  "Failed to create ingress sa");
388                         rc = -EFAULT;
389                         goto mempool_put;
390                 }
391
392                 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
393
394                 /* Check if SA is already in use */
395                 if (inb_sa->w2.s.valid) {
396                         snprintf(tbuf, sizeof(tbuf),
397                                  "Inbound SA with SPI %u already in use",
398                                  ipsec->spi);
399                         rc = -EBUSY;
400                         goto mempool_put;
401                 }
402
403                 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
404                 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
405
406                 /* Fill inbound sa params */
407                 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
408                                                true);
409                 if (rc) {
410                         snprintf(tbuf, sizeof(tbuf),
411                                  "Failed to init inbound sa, rc=%d", rc);
412                         goto mempool_put;
413                 }
414
415                 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
416                 /* Back pointer to get eth_sec */
417                 inb_priv->eth_sec = eth_sec;
418                 /* Save userdata in inb private area */
419                 inb_priv->userdata = conf->userdata;
420
421                 /* Save SA index/SPI in cookie for now */
422                 inb_sa_dptr->w1.s.cookie =
423                         rte_cpu_to_be_32(ipsec->spi & spi_mask);
424
425                 /* Prepare session priv */
426                 sess_priv.inb_sa = 1;
427                 sess_priv.sa_idx = ipsec->spi & spi_mask;
428
429                 /* Pointer from eth_sec -> inb_sa */
430                 eth_sec->sa = inb_sa;
431                 eth_sec->sess = sess;
432                 eth_sec->sa_idx = ipsec->spi & spi_mask;
433                 eth_sec->spi = ipsec->spi;
434                 eth_sec->inl_dev = !!dev->inb.inl_dev;
435                 eth_sec->inb = true;
436
437                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
438                 dev->inb.nb_sess++;
439                 /* Sync session in context cache */
440                 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
441                                            eth_sec->inb,
442                                            sizeof(struct roc_ot_ipsec_inb_sa));
443                 if (rc)
444                         goto mempool_put;
445         } else {
446                 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
447                 struct cn10k_outb_priv_data *outb_priv;
448                 struct cnxk_ipsec_outb_rlens *rlens;
449                 uint64_t sa_base = dev->outb.sa_base;
450                 const char *iv_str;
451                 uint32_t sa_idx;
452
453                 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
454                                   ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
455
456                 /* Alloc an sa index */
457                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
458                 if (rc)
459                         goto mempool_put;
460
461                 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
462                 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
463                 rlens = &outb_priv->rlens;
464
465                 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
466                 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
467
468                 /* Fill outbound sa params */
469                 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
470                 if (rc) {
471                         snprintf(tbuf, sizeof(tbuf),
472                                  "Failed to init outbound sa, rc=%d", rc);
473                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
474                         goto mempool_put;
475                 }
476
477                 iv_str = getenv("CN10K_ETH_SEC_IV_OVR");
478                 if (iv_str)
479                         outb_dbg_iv_update(outb_sa_dptr, iv_str);
480
481                 /* Save userdata */
482                 outb_priv->userdata = conf->userdata;
483                 outb_priv->sa_idx = sa_idx;
484                 outb_priv->eth_sec = eth_sec;
485
486                 /* Save rlen info */
487                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
488
489                 /* Prepare session priv */
490                 sess_priv.sa_idx = outb_priv->sa_idx;
491                 sess_priv.roundup_byte = rlens->roundup_byte;
492                 sess_priv.roundup_len = rlens->roundup_len;
493                 sess_priv.partial_len = rlens->partial_len;
494                 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
495                 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
496
497                 /* Pointer from eth_sec -> outb_sa */
498                 eth_sec->sa = outb_sa;
499                 eth_sec->sess = sess;
500                 eth_sec->sa_idx = sa_idx;
501                 eth_sec->spi = ipsec->spi;
502
503                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
504                 dev->outb.nb_sess++;
505                 /* Sync session in context cache */
506                 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
507                                            eth_sec->inb,
508                                            sizeof(struct roc_ot_ipsec_outb_sa));
509                 if (rc)
510                         goto mempool_put;
511         }
512         if (inbound && inl_dev)
513                 roc_nix_inl_dev_unlock();
514         rte_spinlock_unlock(lock);
515
516         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
517                     inbound ? "inbound" : "outbound", eth_sec->spi,
518                     eth_sec->sa_idx, eth_sec->inl_dev);
519         /*
520          * Update fast path info in priv area.
521          */
522         set_sec_session_private_data(sess, (void *)sess_priv.u64);
523
524         return 0;
525 mempool_put:
526         if (inbound && inl_dev)
527                 roc_nix_inl_dev_unlock();
528         rte_spinlock_unlock(lock);
529
530         rte_mempool_put(mempool, eth_sec);
531         if (rc)
532                 plt_err("%s", tbuf);
533         return rc;
534 }
535
536 static int
537 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
538 {
539         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
540         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
541         struct cnxk_eth_sec_sess *eth_sec;
542         struct rte_mempool *mp;
543         rte_spinlock_t *lock;
544         void *sa_dptr;
545
546         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
547         if (!eth_sec)
548                 return -ENOENT;
549
550         lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
551         rte_spinlock_lock(lock);
552
553         if (eth_sec->inl_dev)
554                 roc_nix_inl_dev_lock();
555
556         if (eth_sec->inb) {
557                 /* Disable SA */
558                 sa_dptr = dev->inb.sa_dptr;
559                 roc_ot_ipsec_inb_sa_init(sa_dptr, true);
560
561                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
562                                       eth_sec->inb,
563                                       sizeof(struct roc_ot_ipsec_inb_sa));
564                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
565                 dev->inb.nb_sess--;
566         } else {
567                 /* Disable SA */
568                 sa_dptr = dev->outb.sa_dptr;
569                 roc_ot_ipsec_outb_sa_init(sa_dptr);
570
571                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
572                                       eth_sec->inb,
573                                       sizeof(struct roc_ot_ipsec_outb_sa));
574                 /* Release Outbound SA index */
575                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
576                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
577                 dev->outb.nb_sess--;
578         }
579         if (eth_sec->inl_dev)
580                 roc_nix_inl_dev_unlock();
581
582         rte_spinlock_unlock(lock);
583
584         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
585                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
586                     eth_sec->sa_idx, eth_sec->inl_dev);
587
588         /* Put eth_sec object back to pool */
589         mp = rte_mempool_from_obj(eth_sec);
590         set_sec_session_private_data(sess, NULL);
591         rte_mempool_put(mp, eth_sec);
592         return 0;
593 }
594
595 static const struct rte_security_capability *
596 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
597 {
598         return cn10k_eth_sec_capabilities;
599 }
600
601 void
602 cn10k_eth_sec_ops_override(void)
603 {
604         static int init_once;
605
606         if (init_once)
607                 return;
608         init_once = 1;
609
610         /* Update platform specific ops */
611         cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
612         cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
613         cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
614 }