1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
23 #define ETH_SEC_MAX_PKT_LEN 1450
26 #define AES_GCM_IV_LEN 8
27 #define AES_GCM_MAC_LEN 16
28 #define AES_CBC_IV_LEN 16
29 #define SHA1_HMAC_LEN 12
31 #define AES_GCM_ROUNDUP_BYTE_LEN 4
32 #define AES_CBC_ROUNDUP_BYTE_LEN 16
34 struct eth_sec_tag_const {
38 uint32_t rsvd_11_0 : 12;
40 uint32_t event_type : 4;
41 uint32_t rsvd_31_24 : 8;
47 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
49 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
51 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
53 .algo = RTE_CRYPTO_AEAD_AES_GCM,
79 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
83 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
99 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
101 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
103 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
118 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
121 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
122 { /* IPsec Inline Protocol ESP Tunnel Ingress */
123 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
124 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
126 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
127 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
128 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
131 .crypto_capabilities = otx2_eth_sec_crypto_caps,
132 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
134 { /* IPsec Inline Protocol ESP Tunnel Egress */
135 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
136 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
138 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
139 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
140 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
143 .crypto_capabilities = otx2_eth_sec_crypto_caps,
144 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
147 .action = RTE_SECURITY_ACTION_TYPE_NONE
152 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
154 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
155 uint16_t port = eth_dev->data->port_id;
156 const struct rte_memzone *mz;
160 mz = rte_memzone_lookup(name);
166 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
167 if (sa_tbl[port] == NULL)
170 rte_free(sa_tbl[port]);
175 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
177 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
178 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
179 uint16_t port = eth_dev->data->port_id;
180 const struct rte_memzone *mz;
184 mz = rte_memzone_lookup(name);
186 otx2_err("Could not find fastpath lookup table");
192 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
194 if (sa_tbl[port] == NULL) {
195 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
196 sizeof(uint64_t), 0);
199 sa_tbl[port][spi] = (uint64_t)sa;
205 in_sa_mz_name_get(char *name, int size, uint16_t port)
207 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
210 static struct otx2_ipsec_fp_in_sa *
211 in_sa_get(uint16_t port, int sa_index)
213 char name[RTE_MEMZONE_NAMESIZE];
214 struct otx2_ipsec_fp_in_sa *sa;
215 const struct rte_memzone *mz;
217 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
218 mz = rte_memzone_lookup(name);
220 otx2_err("Could not get the memzone reserved for IN SA DB");
226 return sa + sa_index;
230 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
231 struct rte_crypto_sym_xform *xform,
232 struct otx2_sec_session_ipsec_ip *sess)
234 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
236 sess->partial_len = sizeof(struct rte_ipv4_hdr);
238 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
239 sess->partial_len += sizeof(struct rte_esp_hdr);
240 sess->roundup_len = sizeof(struct rte_esp_tail);
241 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
242 sess->partial_len += AH_HDR_LEN;
247 if (ipsec->options.udp_encap)
248 sess->partial_len += sizeof(struct rte_udp_hdr);
250 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
251 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
252 sess->partial_len += AES_GCM_IV_LEN;
253 sess->partial_len += AES_GCM_MAC_LEN;
254 sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN;
259 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
260 cipher_xform = xform;
261 auth_xform = xform->next;
262 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
264 cipher_xform = xform->next;
268 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
269 sess->partial_len += AES_CBC_IV_LEN;
270 sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN;
275 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
276 sess->partial_len += SHA1_HMAC_LEN;
284 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
285 const uint8_t *auth_key, int len, uint8_t *hmac_key)
288 struct otx2_cpt_res cpt_res;
292 volatile struct otx2_cpt_res *res;
293 uint64_t timeout, lmt_status;
294 struct otx2_cpt_inst_s inst;
298 memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
300 md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
304 memcpy(md->buffer, auth_key, len);
306 md_iova = rte_malloc_virt2iova(md);
307 if (md_iova == RTE_BAD_IOVA) {
312 inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
313 inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
314 inst.param2 = ctl->auth_type;
316 inst.dptr = md_iova + offsetof(struct inst_data, buffer);
317 inst.rptr = inst.dptr;
318 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
320 md->cpt_res.compcode = 0;
321 md->cpt_res.uc_compcode = 0xff;
323 timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
328 otx2_lmt_mov(qp->lmtline, &inst, 2);
329 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
330 } while (lmt_status == 0);
332 res = (volatile struct otx2_cpt_res *)&md->cpt_res;
334 /* Wait until instruction completes or times out */
335 while (res->uc_compcode == 0xff) {
336 if (rte_get_timer_cycles() > timeout)
340 if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
345 /* Retrieve the ipad and opad from rptr */
346 memcpy(hmac_key, md->buffer, 48);
356 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
357 struct rte_security_ipsec_xform *ipsec,
358 struct rte_crypto_sym_xform *crypto_xform,
359 struct rte_security_session *sec_sess)
361 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
362 struct otx2_sec_session_ipsec_ip *sess;
363 uint16_t port = eth_dev->data->port_id;
364 int cipher_key_len, auth_key_len, ret;
365 const uint8_t *cipher_key, *auth_key;
366 struct otx2_ipsec_fp_sa_ctl *ctl;
367 struct otx2_ipsec_fp_out_sa *sa;
368 struct otx2_sec_session *priv;
369 struct otx2_cpt_inst_s inst;
370 struct otx2_cpt_qp *qp;
372 priv = get_sec_session_private_data(sec_sess);
373 sess = &priv->ipsec.ip;
378 otx2_err("SA already registered");
382 memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
386 ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
390 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
391 memcpy(sa->nonce, &ipsec->salt, 4);
393 if (ipsec->options.udp_encap == 1) {
398 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
399 /* Start ip id from 1 */
402 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
403 memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
404 sizeof(struct in_addr));
405 memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
406 sizeof(struct in_addr));
414 cipher_xform = crypto_xform;
415 auth_xform = crypto_xform->next;
421 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
422 cipher_key = crypto_xform->aead.key.data;
423 cipher_key_len = crypto_xform->aead.key.length;
425 cipher_key = cipher_xform->cipher.key.data;
426 cipher_key_len = cipher_xform->cipher.key.length;
427 auth_key = auth_xform->auth.key.data;
428 auth_key_len = auth_xform->auth.key.length;
431 if (cipher_key_len != 0)
432 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
436 /* Determine word 7 of CPT instruction */
438 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
439 inst.cptr = rte_mempool_virt2iova(sa);
440 sess->inst_w7 = inst.u64[7];
442 /* Get CPT QP to be used for this SA */
443 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
449 sess->cpt_lmtline = qp->lmtline;
450 sess->cpt_nq_reg = qp->lf_nq_reg;
452 /* Populate control word */
453 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
457 if (auth_key_len && auth_key) {
458 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
465 otx2_sec_idev_tx_cpt_qp_put(sess->qp);
470 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
471 struct rte_security_ipsec_xform *ipsec,
472 struct rte_crypto_sym_xform *crypto_xform,
473 struct rte_security_session *sec_sess)
475 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
476 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
477 struct otx2_sec_session_ipsec_ip *sess;
478 uint16_t port = eth_dev->data->port_id;
479 int cipher_key_len, auth_key_len, ret;
480 const uint8_t *cipher_key, *auth_key;
481 struct otx2_ipsec_fp_sa_ctl *ctl;
482 struct otx2_ipsec_fp_in_sa *sa;
483 struct otx2_sec_session *priv;
484 struct otx2_cpt_qp *qp;
486 if (ipsec->spi >= dev->ipsec_in_max_spi) {
487 otx2_err("SPI exceeds max supported");
491 sa = in_sa_get(port, ipsec->spi);
494 priv = get_sec_session_private_data(sec_sess);
495 sess = &priv->ipsec.ip;
498 otx2_err("SA already registered");
502 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
504 auth_xform = crypto_xform;
505 cipher_xform = crypto_xform->next;
511 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
512 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
513 memcpy(sa->nonce, &ipsec->salt, 4);
514 cipher_key = crypto_xform->aead.key.data;
515 cipher_key_len = crypto_xform->aead.key.length;
517 cipher_key = cipher_xform->cipher.key.data;
518 cipher_key_len = cipher_xform->cipher.key.length;
519 auth_key = auth_xform->auth.key.data;
520 auth_key_len = auth_xform->auth.key.length;
523 if (cipher_key_len != 0)
524 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
530 sa->userdata = priv->userdata;
532 if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
535 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
539 if (auth_key_len && auth_key) {
540 /* Get a queue pair for HMAC init */
541 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
544 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
545 otx2_sec_idev_tx_cpt_qp_put(qp);
551 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
552 struct rte_security_ipsec_xform *ipsec,
553 struct rte_crypto_sym_xform *crypto_xform,
554 struct rte_security_session *sess)
558 ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
562 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
563 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
566 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
571 otx2_eth_sec_session_create(void *device,
572 struct rte_security_session_conf *conf,
573 struct rte_security_session *sess,
574 struct rte_mempool *mempool)
576 struct otx2_sec_session *priv;
579 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
582 if (rte_mempool_get(mempool, (void **)&priv)) {
583 otx2_err("Could not allocate security session private data");
587 set_sec_session_private_data(sess, priv);
590 * Save userdata provided by the application. For ingress packets, this
591 * could be used to identify the SA.
593 priv->userdata = conf->userdata;
595 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
596 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
608 rte_mempool_put(mempool, priv);
609 set_sec_session_private_data(sess, NULL);
614 otx2_eth_sec_session_destroy(void *device __rte_unused,
615 struct rte_security_session *sess)
617 struct otx2_sec_session_ipsec_ip *sess_ip;
618 struct otx2_sec_session *priv;
619 struct rte_mempool *sess_mp;
622 priv = get_sec_session_private_data(sess);
626 sess_ip = &priv->ipsec.ip;
628 /* Release CPT LF used for this session */
629 if (sess_ip->qp != NULL) {
630 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
635 sess_mp = rte_mempool_from_obj(priv);
637 set_sec_session_private_data(sess, NULL);
638 rte_mempool_put(sess_mp, priv);
644 otx2_eth_sec_session_get_size(void *device __rte_unused)
646 return sizeof(struct otx2_sec_session);
650 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
651 struct rte_security_session *session,
652 struct rte_mbuf *m, void *params __rte_unused)
654 /* Set security session as the pkt metadata */
655 m->udata64 = (uint64_t)session;
661 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
664 /* Retrieve userdata */
665 *userdata = (void *)md;
670 static const struct rte_security_capability *
671 otx2_eth_sec_capabilities_get(void *device __rte_unused)
673 return otx2_eth_sec_capabilities;
676 static struct rte_security_ops otx2_eth_sec_ops = {
677 .session_create = otx2_eth_sec_session_create,
678 .session_destroy = otx2_eth_sec_session_destroy,
679 .session_get_size = otx2_eth_sec_session_get_size,
680 .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata,
681 .get_userdata = otx2_eth_sec_get_userdata,
682 .capabilities_get = otx2_eth_sec_capabilities_get
686 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
688 struct rte_security_ctx *ctx;
691 ctx = rte_malloc("otx2_eth_sec_ctx",
692 sizeof(struct rte_security_ctx), 0);
696 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
704 ctx->device = eth_dev;
705 ctx->ops = &otx2_eth_sec_ops;
708 eth_dev->security_ctx = ctx;
714 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
716 rte_free(eth_dev->security_ctx);
720 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
722 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
723 uint16_t port = eth_dev->data->port_id;
724 struct nix_inline_ipsec_lf_cfg *req;
725 struct otx2_mbox *mbox = dev->mbox;
726 struct eth_sec_tag_const tag_const;
727 char name[RTE_MEMZONE_NAMESIZE];
728 const struct rte_memzone *mz;
730 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
731 mz = rte_memzone_lookup(name);
735 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
737 req->sa_base_addr = mz->iova;
739 req->ipsec_cfg0.tt = tt;
742 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
743 tag_const.port = port;
744 req->ipsec_cfg0.tag_const = tag_const.u32;
746 req->ipsec_cfg0.sa_pow2_size =
747 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
748 req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
750 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
751 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
753 return otx2_mbox_process(mbox);
757 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
759 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
760 struct otx2_mbox *mbox = dev->mbox;
761 struct nix_aq_enq_rsp *rsp;
762 struct nix_aq_enq_req *aq;
765 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
766 aq->qidx = 0; /* Read RQ:0 context */
767 aq->ctype = NIX_AQ_CTYPE_RQ;
768 aq->op = NIX_AQ_INSTOP_READ;
770 ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
772 otx2_err("Could not read RQ context");
776 /* Update tag type */
777 ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
779 otx2_err("Could not update sec eth tag type");
785 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
787 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
788 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
789 uint16_t port = eth_dev->data->port_id;
790 char name[RTE_MEMZONE_NAMESIZE];
791 const struct rte_memzone *mz;
795 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
796 !RTE_IS_POWER_OF_2(sa_width));
798 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
799 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
802 nb_sa = dev->ipsec_in_max_spi;
803 mz_sz = nb_sa * sa_width;
804 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
805 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
806 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
809 otx2_err("Could not allocate inbound SA DB");
813 memset(mz->addr, 0, mz_sz);
815 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
817 otx2_err("Could not configure inline IPsec");
824 otx2_err("Could not configure device for security");
825 otx2_eth_sec_fini(eth_dev);
830 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
832 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
833 uint16_t port = eth_dev->data->port_id;
834 char name[RTE_MEMZONE_NAMESIZE];
836 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
837 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
840 lookup_mem_sa_tbl_clear(eth_dev);
842 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
843 rte_memzone_free(rte_memzone_lookup(name));