1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_eventdev.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
13 #include "otx2_cryptodev_qp.h"
14 #include "otx2_ethdev.h"
15 #include "otx2_ethdev_sec.h"
16 #include "otx2_ipsec_fp.h"
17 #include "otx2_sec_idev.h"
19 #define ETH_SEC_MAX_PKT_LEN 1450
21 struct eth_sec_tag_const {
25 uint32_t rsvd_11_0 : 12;
27 uint32_t event_type : 4;
28 uint32_t rsvd_31_24 : 8;
34 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
36 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
38 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
40 .algo = RTE_CRYPTO_AEAD_AES_GCM,
66 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
68 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
70 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
86 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
88 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
90 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
105 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
108 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
109 { /* IPsec Inline Protocol ESP Tunnel Ingress */
110 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
111 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
113 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
114 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
115 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
118 .crypto_capabilities = otx2_eth_sec_crypto_caps,
119 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
121 { /* IPsec Inline Protocol ESP Tunnel Egress */
122 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
123 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
125 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
126 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
127 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
130 .crypto_capabilities = otx2_eth_sec_crypto_caps,
131 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
134 .action = RTE_SECURITY_ACTION_TYPE_NONE
139 in_sa_mz_name_get(char *name, int size, uint16_t port)
141 snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
144 static struct otx2_ipsec_fp_in_sa *
145 in_sa_get(uint16_t port, int sa_index)
147 char name[RTE_MEMZONE_NAMESIZE];
148 struct otx2_ipsec_fp_in_sa *sa;
149 const struct rte_memzone *mz;
151 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
152 mz = rte_memzone_lookup(name);
154 otx2_err("Could not get the memzone reserved for IN SA DB");
160 return sa + sa_index;
164 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
165 const uint8_t *auth_key, int len, uint8_t *hmac_key)
168 struct otx2_cpt_res cpt_res;
172 volatile struct otx2_cpt_res *res;
173 uint64_t timeout, lmt_status;
174 struct otx2_cpt_inst_s inst;
178 memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
180 md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
184 memcpy(md->buffer, auth_key, len);
186 md_iova = rte_malloc_virt2iova(md);
187 if (md_iova == RTE_BAD_IOVA) {
192 inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
193 inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
194 inst.param2 = ctl->auth_type;
196 inst.dptr = md_iova + offsetof(struct inst_data, buffer);
197 inst.rptr = inst.dptr;
198 inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
200 md->cpt_res.compcode = 0;
201 md->cpt_res.uc_compcode = 0xff;
203 timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
208 otx2_lmt_mov(qp->lmtline, &inst, 2);
209 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
210 } while (lmt_status == 0);
212 res = (volatile struct otx2_cpt_res *)&md->cpt_res;
214 /* Wait until instruction completes or times out */
215 while (res->uc_compcode == 0xff) {
216 if (rte_get_timer_cycles() > timeout)
220 if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
225 /* Retrieve the ipad and opad from rptr */
226 memcpy(hmac_key, md->buffer, 48);
236 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
237 struct rte_security_ipsec_xform *ipsec,
238 struct rte_crypto_sym_xform *crypto_xform,
239 struct rte_security_session *sec_sess)
241 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
242 struct otx2_sec_session_ipsec_ip *sess;
243 uint16_t port = eth_dev->data->port_id;
244 int cipher_key_len, auth_key_len, ret;
245 const uint8_t *cipher_key, *auth_key;
246 struct otx2_ipsec_fp_sa_ctl *ctl;
247 struct otx2_ipsec_fp_out_sa *sa;
248 struct otx2_sec_session *priv;
249 struct otx2_cpt_qp *qp;
251 priv = get_sec_session_private_data(sec_sess);
252 sess = &priv->ipsec.ip;
257 otx2_err("SA already registered");
261 memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
263 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
264 memcpy(sa->nonce, &ipsec->salt, 4);
266 if (ipsec->options.udp_encap == 1) {
271 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
272 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
273 memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
274 sizeof(struct in_addr));
275 memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
276 sizeof(struct in_addr));
284 cipher_xform = crypto_xform;
285 auth_xform = crypto_xform->next;
291 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
292 cipher_key = crypto_xform->aead.key.data;
293 cipher_key_len = crypto_xform->aead.key.length;
295 cipher_key = cipher_xform->cipher.key.data;
296 cipher_key_len = cipher_xform->cipher.key.length;
297 auth_key = auth_xform->auth.key.data;
298 auth_key_len = auth_xform->auth.key.length;
301 if (cipher_key_len != 0)
302 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
306 /* Get CPT QP to be used for this SA */
307 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
313 sess->cpt_lmtline = qp->lmtline;
314 sess->cpt_nq_reg = qp->lf_nq_reg;
316 /* Populate control word */
317 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
321 if (auth_key_len && auth_key) {
322 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
329 otx2_sec_idev_tx_cpt_qp_put(sess->qp);
334 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
335 struct rte_security_ipsec_xform *ipsec,
336 struct rte_crypto_sym_xform *crypto_xform,
337 struct rte_security_session *sec_sess)
339 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
340 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
341 struct otx2_sec_session_ipsec_ip *sess;
342 uint16_t port = eth_dev->data->port_id;
343 int cipher_key_len, auth_key_len, ret;
344 const uint8_t *cipher_key, *auth_key;
345 struct otx2_ipsec_fp_sa_ctl *ctl;
346 struct otx2_ipsec_fp_in_sa *sa;
347 struct otx2_sec_session *priv;
348 struct otx2_cpt_qp *qp;
350 if (ipsec->spi >= dev->ipsec_in_max_spi) {
351 otx2_err("SPI exceeds max supported");
355 sa = in_sa_get(port, ipsec->spi);
358 priv = get_sec_session_private_data(sec_sess);
359 sess = &priv->ipsec.ip;
362 otx2_err("SA already registered");
366 memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
368 auth_xform = crypto_xform;
369 cipher_xform = crypto_xform->next;
375 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
376 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
377 memcpy(sa->nonce, &ipsec->salt, 4);
378 cipher_key = crypto_xform->aead.key.data;
379 cipher_key_len = crypto_xform->aead.key.length;
381 cipher_key = cipher_xform->cipher.key.data;
382 cipher_key_len = cipher_xform->cipher.key.length;
383 auth_key = auth_xform->auth.key.data;
384 auth_key_len = auth_xform->auth.key.length;
387 if (cipher_key_len != 0)
388 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
394 sa->userdata = priv->userdata;
396 ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
400 if (auth_key_len && auth_key) {
401 /* Get a queue pair for HMAC init */
402 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
405 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
406 otx2_sec_idev_tx_cpt_qp_put(qp);
412 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
413 struct rte_security_ipsec_xform *ipsec,
414 struct rte_crypto_sym_xform *crypto_xform,
415 struct rte_security_session *sess)
419 ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
423 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
424 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
427 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
432 otx2_eth_sec_session_create(void *device,
433 struct rte_security_session_conf *conf,
434 struct rte_security_session *sess,
435 struct rte_mempool *mempool)
437 struct otx2_sec_session *priv;
440 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
443 if (rte_mempool_get(mempool, (void **)&priv)) {
444 otx2_err("Could not allocate security session private data");
448 set_sec_session_private_data(sess, priv);
451 * Save userdata provided by the application. For ingress packets, this
452 * could be used to identify the SA.
454 priv->userdata = conf->userdata;
456 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
457 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
469 rte_mempool_put(mempool, priv);
470 set_sec_session_private_data(sess, NULL);
475 otx2_eth_sec_session_destroy(void *device __rte_unused,
476 struct rte_security_session *sess)
478 struct otx2_sec_session_ipsec_ip *sess_ip;
479 struct otx2_sec_session *priv;
480 struct rte_mempool *sess_mp;
483 priv = get_sec_session_private_data(sess);
487 sess_ip = &priv->ipsec.ip;
489 /* Release CPT LF used for this session */
490 if (sess_ip->qp != NULL) {
491 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
496 sess_mp = rte_mempool_from_obj(priv);
498 set_sec_session_private_data(sess, NULL);
499 rte_mempool_put(sess_mp, priv);
505 otx2_eth_sec_session_get_size(void *device __rte_unused)
507 return sizeof(struct otx2_sec_session);
510 static const struct rte_security_capability *
511 otx2_eth_sec_capabilities_get(void *device __rte_unused)
513 return otx2_eth_sec_capabilities;
516 static struct rte_security_ops otx2_eth_sec_ops = {
517 .session_create = otx2_eth_sec_session_create,
518 .session_destroy = otx2_eth_sec_session_destroy,
519 .session_get_size = otx2_eth_sec_session_get_size,
520 .capabilities_get = otx2_eth_sec_capabilities_get
524 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
526 struct rte_security_ctx *ctx;
529 ctx = rte_malloc("otx2_eth_sec_ctx",
530 sizeof(struct rte_security_ctx), 0);
534 ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
542 ctx->device = eth_dev;
543 ctx->ops = &otx2_eth_sec_ops;
546 eth_dev->security_ctx = ctx;
552 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
554 rte_free(eth_dev->security_ctx);
558 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
560 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
561 uint16_t port = eth_dev->data->port_id;
562 struct nix_inline_ipsec_lf_cfg *req;
563 struct otx2_mbox *mbox = dev->mbox;
564 struct eth_sec_tag_const tag_const;
565 char name[RTE_MEMZONE_NAMESIZE];
566 const struct rte_memzone *mz;
568 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
569 mz = rte_memzone_lookup(name);
573 req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
575 req->sa_base_addr = mz->iova;
577 req->ipsec_cfg0.tt = tt;
580 tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
581 tag_const.port = port;
582 req->ipsec_cfg0.tag_const = tag_const.u32;
584 req->ipsec_cfg0.sa_pow2_size =
585 rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
586 req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
588 req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
589 req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
591 return otx2_mbox_process(mbox);
595 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
597 const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
598 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
599 uint16_t port = eth_dev->data->port_id;
600 char name[RTE_MEMZONE_NAMESIZE];
601 const struct rte_memzone *mz;
605 RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
606 !RTE_IS_POWER_OF_2(sa_width));
608 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
609 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
612 nb_sa = dev->ipsec_in_max_spi;
613 mz_sz = nb_sa * sa_width;
614 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
615 mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
616 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
619 otx2_err("Could not allocate inbound SA DB");
623 memset(mz->addr, 0, mz_sz);
625 ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
627 otx2_err("Could not configure inline IPsec");
634 otx2_err("Could not configure device for security");
635 otx2_eth_sec_fini(eth_dev);
640 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
642 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
643 uint16_t port = eth_dev->data->port_id;
644 char name[RTE_MEMZONE_NAMESIZE];
646 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
647 !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
650 in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
651 rte_memzone_free(rte_memzone_lookup(name));