1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_eventdev.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
13 #include "otx2_common.h"
14 #include "otx2_cryptodev_qp.h"
15 #include "otx2_ethdev.h"
16 #include "otx2_ethdev_sec.h"
17 #include "otx2_ipsec_fp.h"
18 #include "otx2_sec_idev.h"
20 #define ETH_SEC_MAX_PKT_LEN 1450
22 struct eth_sec_tag_const {
26 uint32_t rsvd_11_0 : 12;
28 uint32_t event_type : 4;
29 uint32_t rsvd_31_24 : 8;
35 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
37 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
41 .algo = RTE_CRYPTO_AEAD_AES_GCM,
67 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
69 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
71 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
87 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
89 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
91 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
106 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
109 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
110 { /* IPsec Inline Protocol ESP Tunnel Ingress */
111 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
112 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
114 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
115 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
116 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
119 .crypto_capabilities = otx2_eth_sec_crypto_caps,
120 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
122 { /* IPsec Inline Protocol ESP Tunnel Egress */
123 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
124 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
126 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
127 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
128 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
131 .crypto_capabilities = otx2_eth_sec_crypto_caps,
132 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
135 .action = RTE_SECURITY_ACTION_TYPE_NONE
140 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
142 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
143 uint16_t port = eth_dev->data->port_id;
144 const struct rte_memzone *mz;
148 mz = rte_memzone_lookup(name);
154 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
155 if (sa_tbl[port] == NULL)
158 rte_free(sa_tbl[port]);
163 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
165 static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
166 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
167 uint16_t port = eth_dev->data->port_id;
168 const struct rte_memzone *mz;
172 mz = rte_memzone_lookup(name);
174 otx2_err("Could not find fastpath lookup table");
180 sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
182 if (sa_tbl[port] == NULL) {
183 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
184 sizeof(uint64_t), 0);
187 sa_tbl[port][spi] = (uint64_t)sa;
193 in_sa_mz_name_get(char *name, int size, uint16_t port)
195 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
198 static struct otx2_ipsec_fp_in_sa *
199 in_sa_get(uint16_t port, int sa_index)
201 char name[RTE_MEMZONE_NAMESIZE];
202 struct otx2_ipsec_fp_in_sa *sa;
203 const struct rte_memzone *mz;
205 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
206 mz = rte_memzone_lookup(name);
208 otx2_err("Could not get the memzone reserved for IN SA DB");
214 return sa + sa_index;
218 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
219 const uint8_t *auth_key, int len, uint8_t *hmac_key)
222 struct otx2_cpt_res cpt_res;
226 volatile struct otx2_cpt_res *res;
227 uint64_t timeout, lmt_status;
228 struct otx2_cpt_inst_s inst;
232 memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
234 md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
238 memcpy(md->buffer, auth_key, len);
240 md_iova = rte_malloc_virt2iova(md);
241 if (md_iova == RTE_BAD_IOVA) {
246 inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
247 inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
248 inst.param2 = ctl->auth_type;
250 inst.dptr = md_iova + offsetof(struct inst_data, buffer);
251 inst.rptr = inst.dptr;
252 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
254 md->cpt_res.compcode = 0;
255 md->cpt_res.uc_compcode = 0xff;
257 timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
262 otx2_lmt_mov(qp->lmtline, &inst, 2);
263 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
264 } while (lmt_status == 0);
266 res = (volatile struct otx2_cpt_res *)&md->cpt_res;
268 /* Wait until instruction completes or times out */
269 while (res->uc_compcode == 0xff) {
270 if (rte_get_timer_cycles() > timeout)
274 if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
279 /* Retrieve the ipad and opad from rptr */
280 memcpy(hmac_key, md->buffer, 48);
290 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
291 struct rte_security_ipsec_xform *ipsec,
292 struct rte_crypto_sym_xform *crypto_xform,
293 struct rte_security_session *sec_sess)
295 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
296 struct otx2_sec_session_ipsec_ip *sess;
297 uint16_t port = eth_dev->data->port_id;
298 int cipher_key_len, auth_key_len, ret;
299 const uint8_t *cipher_key, *auth_key;
300 struct otx2_ipsec_fp_sa_ctl *ctl;
301 struct otx2_ipsec_fp_out_sa *sa;
302 struct otx2_sec_session *priv;
303 struct otx2_cpt_qp *qp;
305 priv = get_sec_session_private_data(sec_sess);
306 sess = &priv->ipsec.ip;
311 otx2_err("SA already registered");
315 memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
317 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
318 memcpy(sa->nonce, &ipsec->salt, 4);
320 if (ipsec->options.udp_encap == 1) {
325 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
326 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
327 memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
328 sizeof(struct in_addr));
329 memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
330 sizeof(struct in_addr));
338 cipher_xform = crypto_xform;
339 auth_xform = crypto_xform->next;
345 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
346 cipher_key = crypto_xform->aead.key.data;
347 cipher_key_len = crypto_xform->aead.key.length;
349 cipher_key = cipher_xform->cipher.key.data;
350 cipher_key_len = cipher_xform->cipher.key.length;
351 auth_key = auth_xform->auth.key.data;
352 auth_key_len = auth_xform->auth.key.length;
355 if (cipher_key_len != 0)
356 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
360 /* Get CPT QP to be used for this SA */
361 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
367 sess->cpt_lmtline = qp->lmtline;
368 sess->cpt_nq_reg = qp->lf_nq_reg;
370 /* Populate control word */
371 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
375 if (auth_key_len && auth_key) {
376 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
383 otx2_sec_idev_tx_cpt_qp_put(sess->qp);
388 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
389 struct rte_security_ipsec_xform *ipsec,
390 struct rte_crypto_sym_xform *crypto_xform,
391 struct rte_security_session *sec_sess)
393 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
394 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
395 struct otx2_sec_session_ipsec_ip *sess;
396 uint16_t port = eth_dev->data->port_id;
397 int cipher_key_len, auth_key_len, ret;
398 const uint8_t *cipher_key, *auth_key;
399 struct otx2_ipsec_fp_sa_ctl *ctl;
400 struct otx2_ipsec_fp_in_sa *sa;
401 struct otx2_sec_session *priv;
402 struct otx2_cpt_qp *qp;
404 if (ipsec->spi >= dev->ipsec_in_max_spi) {
405 otx2_err("SPI exceeds max supported");
409 sa = in_sa_get(port, ipsec->spi);
412 priv = get_sec_session_private_data(sec_sess);
413 sess = &priv->ipsec.ip;
416 otx2_err("SA already registered");
420 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
422 auth_xform = crypto_xform;
423 cipher_xform = crypto_xform->next;
429 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
430 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
431 memcpy(sa->nonce, &ipsec->salt, 4);
432 cipher_key = crypto_xform->aead.key.data;
433 cipher_key_len = crypto_xform->aead.key.length;
435 cipher_key = cipher_xform->cipher.key.data;
436 cipher_key_len = cipher_xform->cipher.key.length;
437 auth_key = auth_xform->auth.key.data;
438 auth_key_len = auth_xform->auth.key.length;
441 if (cipher_key_len != 0)
442 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
448 sa->userdata = priv->userdata;
450 if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
453 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
457 if (auth_key_len && auth_key) {
458 /* Get a queue pair for HMAC init */
459 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
462 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
463 otx2_sec_idev_tx_cpt_qp_put(qp);
469 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
470 struct rte_security_ipsec_xform *ipsec,
471 struct rte_crypto_sym_xform *crypto_xform,
472 struct rte_security_session *sess)
476 ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
480 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
481 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
484 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
489 otx2_eth_sec_session_create(void *device,
490 struct rte_security_session_conf *conf,
491 struct rte_security_session *sess,
492 struct rte_mempool *mempool)
494 struct otx2_sec_session *priv;
497 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
500 if (rte_mempool_get(mempool, (void **)&priv)) {
501 otx2_err("Could not allocate security session private data");
505 set_sec_session_private_data(sess, priv);
508 * Save userdata provided by the application. For ingress packets, this
509 * could be used to identify the SA.
511 priv->userdata = conf->userdata;
513 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
514 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
526 rte_mempool_put(mempool, priv);
527 set_sec_session_private_data(sess, NULL);
532 otx2_eth_sec_session_destroy(void *device __rte_unused,
533 struct rte_security_session *sess)
535 struct otx2_sec_session_ipsec_ip *sess_ip;
536 struct otx2_sec_session *priv;
537 struct rte_mempool *sess_mp;
540 priv = get_sec_session_private_data(sess);
544 sess_ip = &priv->ipsec.ip;
546 /* Release CPT LF used for this session */
547 if (sess_ip->qp != NULL) {
548 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
553 sess_mp = rte_mempool_from_obj(priv);
555 set_sec_session_private_data(sess, NULL);
556 rte_mempool_put(sess_mp, priv);
562 otx2_eth_sec_session_get_size(void *device __rte_unused)
564 return sizeof(struct otx2_sec_session);
568 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
569 struct rte_security_session *session,
570 struct rte_mbuf *m, void *params __rte_unused)
572 /* Set security session as the pkt metadata */
573 m->udata64 = (uint64_t)session;
579 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
582 /* Retrieve userdata */
583 *userdata = (void *)md;
588 static const struct rte_security_capability *
589 otx2_eth_sec_capabilities_get(void *device __rte_unused)
591 return otx2_eth_sec_capabilities;
594 static struct rte_security_ops otx2_eth_sec_ops = {
595 .session_create = otx2_eth_sec_session_create,
596 .session_destroy = otx2_eth_sec_session_destroy,
597 .session_get_size = otx2_eth_sec_session_get_size,
598 .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata,
599 .get_userdata = otx2_eth_sec_get_userdata,
600 .capabilities_get = otx2_eth_sec_capabilities_get
604 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
606 struct rte_security_ctx *ctx;
609 ctx = rte_malloc("otx2_eth_sec_ctx",
610 sizeof(struct rte_security_ctx), 0);
614 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
622 ctx->device = eth_dev;
623 ctx->ops = &otx2_eth_sec_ops;
626 eth_dev->security_ctx = ctx;
632 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
634 rte_free(eth_dev->security_ctx);
638 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
640 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
641 uint16_t port = eth_dev->data->port_id;
642 struct nix_inline_ipsec_lf_cfg *req;
643 struct otx2_mbox *mbox = dev->mbox;
644 struct eth_sec_tag_const tag_const;
645 char name[RTE_MEMZONE_NAMESIZE];
646 const struct rte_memzone *mz;
648 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
649 mz = rte_memzone_lookup(name);
653 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
655 req->sa_base_addr = mz->iova;
657 req->ipsec_cfg0.tt = tt;
660 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
661 tag_const.port = port;
662 req->ipsec_cfg0.tag_const = tag_const.u32;
664 req->ipsec_cfg0.sa_pow2_size =
665 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
666 req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
668 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
669 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
671 return otx2_mbox_process(mbox);
675 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
677 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
678 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
679 uint16_t port = eth_dev->data->port_id;
680 char name[RTE_MEMZONE_NAMESIZE];
681 const struct rte_memzone *mz;
685 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
686 !RTE_IS_POWER_OF_2(sa_width));
688 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
689 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
692 nb_sa = dev->ipsec_in_max_spi;
693 mz_sz = nb_sa * sa_width;
694 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
695 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
696 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
699 otx2_err("Could not allocate inbound SA DB");
703 memset(mz->addr, 0, mz_sz);
705 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
707 otx2_err("Could not configure inline IPsec");
714 otx2_err("Could not configure device for security");
715 otx2_eth_sec_fini(eth_dev);
720 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
722 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
723 uint16_t port = eth_dev->data->port_id;
724 char name[RTE_MEMZONE_NAMESIZE];
726 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
727 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
730 lookup_mem_sa_tbl_clear(eth_dev);
732 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
733 rte_memzone_free(rte_memzone_lookup(name));