1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 #include <rte_pmd_cnxk.h>
11 #include <cn10k_ethdev.h>
12 #include <cnxk_security.h>
15 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
21 .algo = RTE_CRYPTO_AEAD_AES_GCM,
47 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
49 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
51 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
67 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
69 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
71 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
87 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
89 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
91 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
106 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
109 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
110 { /* IPsec Inline Protocol ESP Tunnel Ingress */
111 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
112 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
114 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
115 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
116 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
119 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
120 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
122 { /* IPsec Inline Protocol ESP Tunnel Egress */
123 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
124 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
126 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
127 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
128 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
131 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
132 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
134 { /* IPsec Inline Protocol ESP Transport Egress */
135 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
136 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
138 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
139 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
140 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
143 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
144 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
146 { /* IPsec Inline Protocol ESP Transport Ingress */
147 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
148 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
150 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
151 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
152 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
155 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
156 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
159 .action = RTE_SECURITY_ACTION_TYPE_NONE
164 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
166 struct rte_mbuf *next;
172 roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
174 } while (mbuf != NULL);
178 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
180 struct rte_eth_event_ipsec_desc desc;
181 struct cn10k_sec_sess_priv sess_priv;
182 struct cn10k_outb_priv_data *priv;
183 struct roc_ot_ipsec_outb_sa *sa;
184 struct cpt_cn10k_res_s *res;
185 struct rte_eth_dev *eth_dev;
186 struct cnxk_eth_dev *dev;
187 static uint64_t warn_cnt;
188 uint16_t dlen_adj, rlen;
189 struct rte_mbuf *mbuf;
196 switch ((gw[0] >> 28) & 0xF) {
197 case RTE_EVENT_TYPE_ETHDEV:
198 /* Event from inbound inline dev due to IPSEC packet bad L4 */
199 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
200 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
201 cnxk_pktmbuf_free_no_cache(mbuf);
203 case RTE_EVENT_TYPE_CPU:
204 /* Check for subtype */
205 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
206 /* Event from outbound inline error */
207 mbuf = (struct rte_mbuf *)gw[1];
212 if (soft_exp_event & 0x1) {
213 sa = (struct roc_ot_ipsec_outb_sa *)args;
214 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
215 desc.metadata = (uint64_t)priv->userdata;
216 desc.subtype = RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY;
217 eth_dev = &rte_eth_devices[soft_exp_event >> 8];
218 rte_eth_dev_callback_process(eth_dev,
219 RTE_ETH_EVENT_IPSEC, &desc);
221 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
227 /* Get ethdev port from tag */
229 eth_dev = &rte_eth_devices[port];
230 dev = cnxk_eth_pmd_priv(eth_dev);
232 sess_priv.u64 = *rte_security_dynfield(mbuf);
233 /* Calculate dlen adj */
234 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
235 rlen = (dlen_adj + sess_priv.roundup_len) +
236 (sess_priv.roundup_byte - 1);
237 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
238 rlen += sess_priv.partial_len;
239 dlen_adj = rlen - dlen_adj;
241 /* Find the res area residing on next cacheline after end of data */
242 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
244 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
245 res = (struct cpt_cn10k_res_s *)nixtx;
247 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
248 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
250 sess_priv.u64 = *rte_security_dynfield(mbuf);
252 sa_base = dev->outb.sa_base;
253 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
254 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
256 memset(&desc, 0, sizeof(desc));
258 switch (res->uc_compcode) {
259 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
260 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
262 case ROC_IE_OT_UCC_ERR_PKT_IP:
264 if (warn_cnt % 10000 == 0)
265 plt_warn("Outbound error, bad ip pkt, mbuf %p,"
266 " sa_index %u (total warnings %" PRIu64 ")",
267 mbuf, sess_priv.sa_idx, warn_cnt);
268 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
272 if (warn_cnt % 10000 == 0)
273 plt_warn("Outbound error, mbuf %p, sa_index %u,"
274 " compcode %x uc %x,"
275 " (total warnings %" PRIu64 ")",
276 mbuf, sess_priv.sa_idx, res->compcode,
277 res->uc_compcode, warn_cnt);
278 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
282 desc.metadata = (uint64_t)priv->userdata;
283 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
284 cnxk_pktmbuf_free_no_cache(mbuf);
288 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
290 uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
291 char *iv_str = strdup(__iv_str);
292 char *iv_b = NULL, len = 16;
299 if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
300 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
301 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
302 outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
303 memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
304 memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
306 iv_dbg = outb_sa->iv.s.iv_dbg1;
307 for (i = 0; i < 4; i++) {
308 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
311 iv_dbg[i] = strtoul(iv_b, NULL, 0);
313 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
315 iv_dbg = outb_sa->iv.s.iv_dbg2;
316 for (i = 0; i < 4; i++) {
317 iv_b = strtok_r(NULL, ",", &save);
320 iv_dbg[i] = strtoul(iv_b, NULL, 0);
322 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
325 iv_dbg = outb_sa->iv.iv_dbg;
326 memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
328 for (i = 0; i < len; i++) {
329 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
332 iv_dbg[i] = strtoul(iv_b, NULL, 0);
334 *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
335 *(uint64_t *)&iv_dbg[8] =
336 rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
339 /* Update source of IV */
340 outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
345 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
346 struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
347 struct rte_security_ipsec_xform *ipsec_xfrm,
350 uint64_t *ring_base, ring_addr;
352 if (ipsec_xfrm->life.bytes_soft_limit |
353 ipsec_xfrm->life.packets_soft_limit) {
354 ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
355 if (ring_base == NULL)
358 ring_addr = ring_base[sa_idx >>
359 ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
360 sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
361 sa->ctx.err_ctl.s.address = ring_addr >> 3;
362 sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
369 cn10k_eth_sec_session_create(void *device,
370 struct rte_security_session_conf *conf,
371 struct rte_security_session *sess,
372 struct rte_mempool *mempool)
374 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
375 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
376 struct rte_security_ipsec_xform *ipsec;
377 struct cn10k_sec_sess_priv sess_priv;
378 struct rte_crypto_sym_xform *crypto;
379 struct cnxk_eth_sec_sess *eth_sec;
380 struct roc_nix *nix = &dev->nix;
381 bool inbound, inl_dev;
382 rte_spinlock_t *lock;
383 char tbuf[128] = {0};
386 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
389 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
392 if (rte_security_dynfield_register() < 0)
395 if (conf->ipsec.options.ip_reassembly_en &&
396 dev->reass_dynfield_off < 0) {
397 if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
398 &dev->reass_dynflag_bit) < 0)
402 ipsec = &conf->ipsec;
403 crypto = conf->crypto_xform;
404 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
405 inl_dev = !!dev->inb.inl_dev;
407 /* Search if a session already exits */
408 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
409 plt_err("%s SA with SPI %u already in use",
410 inbound ? "Inbound" : "Outbound", ipsec->spi);
414 if (rte_mempool_get(mempool, (void **)ð_sec)) {
415 plt_err("Could not allocate security session private data");
419 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
422 lock = inbound ? &dev->inb.lock : &dev->outb.lock;
423 rte_spinlock_lock(lock);
425 /* Acquire lock on inline dev for inbound */
426 if (inbound && inl_dev)
427 roc_nix_inl_dev_lock();
430 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
431 struct cn10k_inb_priv_data *inb_priv;
435 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
436 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
438 spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
440 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
441 sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
442 if (!sa && dev->inb.inl_dev) {
443 snprintf(tbuf, sizeof(tbuf),
444 "Failed to create ingress sa, inline dev "
445 "not found or spi not in range");
449 snprintf(tbuf, sizeof(tbuf),
450 "Failed to create ingress sa");
455 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
457 /* Check if SA is already in use */
458 if (inb_sa->w2.s.valid) {
459 snprintf(tbuf, sizeof(tbuf),
460 "Inbound SA with SPI %u already in use",
466 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
467 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
469 /* Fill inbound sa params */
470 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
473 snprintf(tbuf, sizeof(tbuf),
474 "Failed to init inbound sa, rc=%d", rc);
478 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
479 /* Back pointer to get eth_sec */
480 inb_priv->eth_sec = eth_sec;
481 /* Save userdata in inb private area */
482 inb_priv->userdata = conf->userdata;
484 /* Save SA index/SPI in cookie for now */
485 inb_sa_dptr->w1.s.cookie =
486 rte_cpu_to_be_32(ipsec->spi & spi_mask);
488 /* Prepare session priv */
489 sess_priv.inb_sa = 1;
490 sess_priv.sa_idx = ipsec->spi & spi_mask;
492 /* Pointer from eth_sec -> inb_sa */
493 eth_sec->sa = inb_sa;
494 eth_sec->sess = sess;
495 eth_sec->sa_idx = ipsec->spi & spi_mask;
496 eth_sec->spi = ipsec->spi;
497 eth_sec->inl_dev = !!dev->inb.inl_dev;
500 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
502 /* Sync session in context cache */
503 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
505 sizeof(struct roc_ot_ipsec_inb_sa));
509 if (conf->ipsec.options.ip_reassembly_en) {
510 inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
511 inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
515 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
516 struct cn10k_outb_priv_data *outb_priv;
517 struct cnxk_ipsec_outb_rlens *rlens;
518 uint64_t sa_base = dev->outb.sa_base;
522 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
523 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
525 /* Alloc an sa index */
526 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
530 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
531 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
532 rlens = &outb_priv->rlens;
534 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
535 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
537 /* Fill outbound sa params */
538 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
540 snprintf(tbuf, sizeof(tbuf),
541 "Failed to init outbound sa, rc=%d", rc);
542 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
546 iv_str = getenv("CN10K_ETH_SEC_IV_OVR");
548 outb_dbg_iv_update(outb_sa_dptr, iv_str);
550 /* Fill outbound sa misc params */
551 rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
552 outb_sa, ipsec, sa_idx);
554 snprintf(tbuf, sizeof(tbuf),
555 "Failed to init outb sa misc params, rc=%d",
557 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
562 outb_priv->userdata = conf->userdata;
563 outb_priv->sa_idx = sa_idx;
564 outb_priv->eth_sec = eth_sec;
567 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
569 /* Prepare session priv */
570 sess_priv.sa_idx = outb_priv->sa_idx;
571 sess_priv.roundup_byte = rlens->roundup_byte;
572 sess_priv.roundup_len = rlens->roundup_len;
573 sess_priv.partial_len = rlens->partial_len;
574 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
575 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
576 /* Propagate inner checksum enable from SA to fast path */
577 sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
578 !ipsec->options.l4_csum_enable);
580 /* Pointer from eth_sec -> outb_sa */
581 eth_sec->sa = outb_sa;
582 eth_sec->sess = sess;
583 eth_sec->sa_idx = sa_idx;
584 eth_sec->spi = ipsec->spi;
586 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
588 /* Sync session in context cache */
589 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
591 sizeof(struct roc_ot_ipsec_outb_sa));
595 if (inbound && inl_dev)
596 roc_nix_inl_dev_unlock();
597 rte_spinlock_unlock(lock);
599 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
600 inbound ? "inbound" : "outbound", eth_sec->spi,
601 eth_sec->sa_idx, eth_sec->inl_dev);
603 * Update fast path info in priv area.
605 set_sec_session_private_data(sess, (void *)sess_priv.u64);
609 if (inbound && inl_dev)
610 roc_nix_inl_dev_unlock();
611 rte_spinlock_unlock(lock);
613 rte_mempool_put(mempool, eth_sec);
620 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
622 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
623 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
624 struct cnxk_eth_sec_sess *eth_sec;
625 struct rte_mempool *mp;
626 rte_spinlock_t *lock;
629 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
633 lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
634 rte_spinlock_lock(lock);
636 if (eth_sec->inl_dev)
637 roc_nix_inl_dev_lock();
641 sa_dptr = dev->inb.sa_dptr;
642 roc_ot_ipsec_inb_sa_init(sa_dptr, true);
644 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
646 sizeof(struct roc_ot_ipsec_inb_sa));
647 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
651 sa_dptr = dev->outb.sa_dptr;
652 roc_ot_ipsec_outb_sa_init(sa_dptr);
654 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
656 sizeof(struct roc_ot_ipsec_outb_sa));
657 /* Release Outbound SA index */
658 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
659 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
662 if (eth_sec->inl_dev)
663 roc_nix_inl_dev_unlock();
665 rte_spinlock_unlock(lock);
667 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
668 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
669 eth_sec->sa_idx, eth_sec->inl_dev);
671 /* Put eth_sec object back to pool */
672 mp = rte_mempool_from_obj(eth_sec);
673 set_sec_session_private_data(sess, NULL);
674 rte_mempool_put(mp, eth_sec);
678 static const struct rte_security_capability *
679 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
681 return cn10k_eth_sec_capabilities;
685 cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
686 struct rte_security_session_conf *conf)
688 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
689 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
690 struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
691 struct rte_security_ipsec_xform *ipsec;
692 struct rte_crypto_sym_xform *crypto;
693 struct cnxk_eth_sec_sess *eth_sec;
697 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
698 conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
701 ipsec = &conf->ipsec;
702 crypto = conf->crypto_xform;
703 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
705 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
709 eth_sec->spi = conf->ipsec.spi;
712 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
713 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
715 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
720 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
722 sizeof(struct roc_ot_ipsec_inb_sa));
726 struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
728 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
729 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
731 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
734 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
736 sizeof(struct roc_ot_ipsec_outb_sa));
745 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
746 void *data, uint32_t len)
748 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
749 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
750 struct cnxk_eth_sec_sess *eth_sec;
753 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
757 rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
758 ROC_NIX_INL_SA_OP_FLUSH);
762 memcpy(data, eth_sec->sa, len);
768 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
769 void *data, uint32_t len)
771 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
772 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
773 struct cnxk_eth_sec_sess *eth_sec;
776 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
779 rc = roc_nix_inl_ctx_write(&dev->nix, data, eth_sec->sa, eth_sec->inb,
788 cn10k_eth_sec_ops_override(void)
790 static int init_once;
796 /* Update platform specific ops */
797 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
798 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
799 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
800 cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;