1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
19 .algo = RTE_CRYPTO_AEAD_AES_GCM,
45 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
47 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
49 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
65 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
67 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
69 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
84 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
87 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
88 { /* IPsec Inline Protocol ESP Tunnel Ingress */
89 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
90 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
92 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
93 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
94 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
97 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
98 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
100 { /* IPsec Inline Protocol ESP Tunnel Egress */
101 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
102 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
104 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
105 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
106 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
109 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
110 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
112 { /* IPsec Inline Protocol ESP Transport Egress */
113 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
116 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
118 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
121 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
122 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
124 { /* IPsec Inline Protocol ESP Transport Ingress */
125 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
128 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
130 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
133 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
134 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
137 .action = RTE_SECURITY_ACTION_TYPE_NONE
142 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
144 struct rte_mbuf *next;
150 roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
152 } while (mbuf != NULL);
156 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
158 struct rte_eth_event_ipsec_desc desc;
159 struct cn10k_sec_sess_priv sess_priv;
160 struct cn10k_outb_priv_data *priv;
161 struct roc_ot_ipsec_outb_sa *sa;
162 struct cpt_cn10k_res_s *res;
163 struct rte_eth_dev *eth_dev;
164 struct cnxk_eth_dev *dev;
165 static uint64_t warn_cnt;
166 uint16_t dlen_adj, rlen;
167 struct rte_mbuf *mbuf;
174 switch ((gw[0] >> 28) & 0xF) {
175 case RTE_EVENT_TYPE_ETHDEV:
176 /* Event from inbound inline dev due to IPSEC packet bad L4 */
177 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
178 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
179 cnxk_pktmbuf_free_no_cache(mbuf);
181 case RTE_EVENT_TYPE_CPU:
182 /* Check for subtype */
183 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
184 /* Event from outbound inline error */
185 mbuf = (struct rte_mbuf *)gw[1];
190 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
195 /* Get ethdev port from tag */
197 eth_dev = &rte_eth_devices[port];
198 dev = cnxk_eth_pmd_priv(eth_dev);
200 sess_priv.u64 = *rte_security_dynfield(mbuf);
201 /* Calculate dlen adj */
202 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
203 rlen = (dlen_adj + sess_priv.roundup_len) +
204 (sess_priv.roundup_byte - 1);
205 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
206 rlen += sess_priv.partial_len;
207 dlen_adj = rlen - dlen_adj;
209 /* Find the res area residing on next cacheline after end of data */
210 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
212 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
213 res = (struct cpt_cn10k_res_s *)nixtx;
215 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
216 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
218 sess_priv.u64 = *rte_security_dynfield(mbuf);
220 sa_base = dev->outb.sa_base;
221 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
222 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
224 memset(&desc, 0, sizeof(desc));
226 switch (res->uc_compcode) {
227 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
228 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
230 case ROC_IE_OT_UCC_ERR_PKT_IP:
232 if (warn_cnt % 10000 == 0)
233 plt_warn("Outbound error, bad ip pkt, mbuf %p,"
234 " sa_index %u (total warnings %" PRIu64 ")",
235 mbuf, sess_priv.sa_idx, warn_cnt);
236 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
240 if (warn_cnt % 10000 == 0)
241 plt_warn("Outbound error, mbuf %p, sa_index %u,"
242 " compcode %x uc %x,"
243 " (total warnings %" PRIu64 ")",
244 mbuf, sess_priv.sa_idx, res->compcode,
245 res->uc_compcode, warn_cnt);
246 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
250 desc.metadata = (uint64_t)priv->userdata;
251 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
252 cnxk_pktmbuf_free_no_cache(mbuf);
256 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
258 uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
259 char *iv_str = strdup(__iv_str);
260 char *iv_b = NULL, len = 16;
267 if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
268 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
269 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
270 outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
271 memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
272 memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
274 iv_dbg = outb_sa->iv.s.iv_dbg1;
275 for (i = 0; i < 4; i++) {
276 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
279 iv_dbg[i] = strtoul(iv_b, NULL, 0);
281 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
283 iv_dbg = outb_sa->iv.s.iv_dbg2;
284 for (i = 0; i < 4; i++) {
285 iv_b = strtok_r(NULL, ",", &save);
288 iv_dbg[i] = strtoul(iv_b, NULL, 0);
290 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
293 iv_dbg = outb_sa->iv.iv_dbg;
294 memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
296 for (i = 0; i < len; i++) {
297 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
300 iv_dbg[i] = strtoul(iv_b, NULL, 0);
302 *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
303 *(uint64_t *)&iv_dbg[8] =
304 rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
307 /* Update source of IV */
308 outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
313 cn10k_eth_sec_session_create(void *device,
314 struct rte_security_session_conf *conf,
315 struct rte_security_session *sess,
316 struct rte_mempool *mempool)
318 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
319 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
320 struct rte_security_ipsec_xform *ipsec;
321 struct cn10k_sec_sess_priv sess_priv;
322 struct rte_crypto_sym_xform *crypto;
323 struct cnxk_eth_sec_sess *eth_sec;
324 struct roc_nix *nix = &dev->nix;
325 bool inbound, inl_dev;
326 rte_spinlock_t *lock;
327 char tbuf[128] = {0};
330 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
333 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
336 if (rte_security_dynfield_register() < 0)
339 ipsec = &conf->ipsec;
340 crypto = conf->crypto_xform;
341 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
342 inl_dev = !!dev->inb.inl_dev;
344 /* Search if a session already exits */
345 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
346 plt_err("%s SA with SPI %u already in use",
347 inbound ? "Inbound" : "Outbound", ipsec->spi);
351 if (rte_mempool_get(mempool, (void **)ð_sec)) {
352 plt_err("Could not allocate security session private data");
356 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
359 lock = inbound ? &dev->inb.lock : &dev->outb.lock;
360 rte_spinlock_lock(lock);
362 /* Acquire lock on inline dev for inbound */
363 if (inbound && inl_dev)
364 roc_nix_inl_dev_lock();
367 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
368 struct cn10k_inb_priv_data *inb_priv;
372 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
373 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
375 spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
377 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
378 sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
379 if (!sa && dev->inb.inl_dev) {
380 snprintf(tbuf, sizeof(tbuf),
381 "Failed to create ingress sa, inline dev "
382 "not found or spi not in range");
386 snprintf(tbuf, sizeof(tbuf),
387 "Failed to create ingress sa");
392 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
394 /* Check if SA is already in use */
395 if (inb_sa->w2.s.valid) {
396 snprintf(tbuf, sizeof(tbuf),
397 "Inbound SA with SPI %u already in use",
403 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
404 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
406 /* Fill inbound sa params */
407 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
410 snprintf(tbuf, sizeof(tbuf),
411 "Failed to init inbound sa, rc=%d", rc);
415 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
416 /* Back pointer to get eth_sec */
417 inb_priv->eth_sec = eth_sec;
418 /* Save userdata in inb private area */
419 inb_priv->userdata = conf->userdata;
421 /* Save SA index/SPI in cookie for now */
422 inb_sa_dptr->w1.s.cookie =
423 rte_cpu_to_be_32(ipsec->spi & spi_mask);
425 /* Prepare session priv */
426 sess_priv.inb_sa = 1;
427 sess_priv.sa_idx = ipsec->spi & spi_mask;
429 /* Pointer from eth_sec -> inb_sa */
430 eth_sec->sa = inb_sa;
431 eth_sec->sess = sess;
432 eth_sec->sa_idx = ipsec->spi & spi_mask;
433 eth_sec->spi = ipsec->spi;
434 eth_sec->inl_dev = !!dev->inb.inl_dev;
437 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
439 /* Sync session in context cache */
440 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
442 sizeof(struct roc_ot_ipsec_inb_sa));
446 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
447 struct cn10k_outb_priv_data *outb_priv;
448 struct cnxk_ipsec_outb_rlens *rlens;
449 uint64_t sa_base = dev->outb.sa_base;
453 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
454 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
456 /* Alloc an sa index */
457 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
461 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
462 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
463 rlens = &outb_priv->rlens;
465 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
466 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
468 /* Fill outbound sa params */
469 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
471 snprintf(tbuf, sizeof(tbuf),
472 "Failed to init outbound sa, rc=%d", rc);
473 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
477 iv_str = getenv("CN10K_ETH_SEC_IV_OVR");
479 outb_dbg_iv_update(outb_sa_dptr, iv_str);
482 outb_priv->userdata = conf->userdata;
483 outb_priv->sa_idx = sa_idx;
484 outb_priv->eth_sec = eth_sec;
487 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
489 /* Prepare session priv */
490 sess_priv.sa_idx = outb_priv->sa_idx;
491 sess_priv.roundup_byte = rlens->roundup_byte;
492 sess_priv.roundup_len = rlens->roundup_len;
493 sess_priv.partial_len = rlens->partial_len;
494 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
495 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
497 /* Pointer from eth_sec -> outb_sa */
498 eth_sec->sa = outb_sa;
499 eth_sec->sess = sess;
500 eth_sec->sa_idx = sa_idx;
501 eth_sec->spi = ipsec->spi;
503 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
505 /* Sync session in context cache */
506 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
508 sizeof(struct roc_ot_ipsec_outb_sa));
512 if (inbound && inl_dev)
513 roc_nix_inl_dev_unlock();
514 rte_spinlock_unlock(lock);
516 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
517 inbound ? "inbound" : "outbound", eth_sec->spi,
518 eth_sec->sa_idx, eth_sec->inl_dev);
520 * Update fast path info in priv area.
522 set_sec_session_private_data(sess, (void *)sess_priv.u64);
526 if (inbound && inl_dev)
527 roc_nix_inl_dev_unlock();
528 rte_spinlock_unlock(lock);
530 rte_mempool_put(mempool, eth_sec);
537 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
539 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
540 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
541 struct cnxk_eth_sec_sess *eth_sec;
542 struct rte_mempool *mp;
543 rte_spinlock_t *lock;
546 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
550 lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
551 rte_spinlock_lock(lock);
553 if (eth_sec->inl_dev)
554 roc_nix_inl_dev_lock();
558 sa_dptr = dev->inb.sa_dptr;
559 roc_ot_ipsec_inb_sa_init(sa_dptr, true);
561 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
563 sizeof(struct roc_ot_ipsec_inb_sa));
564 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
568 sa_dptr = dev->outb.sa_dptr;
569 roc_ot_ipsec_outb_sa_init(sa_dptr);
571 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
573 sizeof(struct roc_ot_ipsec_outb_sa));
574 /* Release Outbound SA index */
575 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
576 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
579 if (eth_sec->inl_dev)
580 roc_nix_inl_dev_unlock();
582 rte_spinlock_unlock(lock);
584 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
585 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
586 eth_sec->sa_idx, eth_sec->inl_dev);
588 /* Put eth_sec object back to pool */
589 mp = rte_mempool_from_obj(eth_sec);
590 set_sec_session_private_data(sess, NULL);
591 rte_mempool_put(mp, eth_sec);
595 static const struct rte_security_capability *
596 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
598 return cn10k_eth_sec_capabilities;
602 cn10k_eth_sec_ops_override(void)
604 static int init_once;
610 /* Update platform specific ops */
611 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
612 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
613 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;