1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23 struct rte_crypto_sym_xform *xform,
24 struct otx2_sec_session_ipsec_lp *lp)
26 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
29 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
30 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
31 lp->partial_len = sizeof(struct rte_ipv4_hdr);
32 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
33 lp->partial_len = sizeof(struct rte_ipv6_hdr);
38 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
39 lp->partial_len += sizeof(struct rte_esp_hdr);
40 lp->roundup_len = sizeof(struct rte_esp_tail);
41 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
42 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
47 if (ipsec->options.udp_encap)
48 lp->partial_len += sizeof(struct rte_udp_hdr);
50 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
51 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
52 lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
53 lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
54 lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
61 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
63 auth_xform = xform->next;
64 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
66 cipher_xform = xform->next;
71 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
72 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
73 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
78 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
79 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
80 else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
81 lp->partial_len += OTX2_SEC_SHA2_HMAC_LEN;
89 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
90 struct otx2_cpt_qp *qptr, uint8_t opcode)
92 uint64_t lmt_status, time_out;
93 void *lmtline = qptr->lmtline;
94 struct otx2_cpt_inst_s inst;
95 struct otx2_cpt_res *res;
99 if (unlikely(rte_mempool_get(qptr->meta_info.pool,
100 (void **)&mdata) < 0))
103 res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
104 res->compcode = CPT_9X_COMP_E_NOTDONE;
106 inst.opcode = opcode | (lp->ctx_len << 8);
109 inst.dlen = lp->ctx_len << 3;
110 inst.dptr = rte_mempool_virt2iova(lp);
112 inst.cptr = rte_mempool_virt2iova(lp);
113 inst.egrp = OTX2_CPT_EGRP_SE;
118 inst.res_addr = rte_mempool_virt2iova(res);
123 /* Copy CPT command to LMTLINE */
124 otx2_lmt_mov(lmtline, &inst, 2);
125 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
126 } while (lmt_status == 0);
128 time_out = rte_get_timer_cycles() +
129 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
131 while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
132 if (rte_get_timer_cycles() > time_out) {
133 rte_mempool_put(qptr->meta_info.pool, mdata);
134 otx2_err("Request timed out");
140 if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
143 case CPT_9X_COMP_E_INSTERR:
144 otx2_err("Request failed with instruction error");
146 case CPT_9X_COMP_E_FAULT:
147 otx2_err("Request failed with DMA fault");
149 case CPT_9X_COMP_E_HWERR:
150 otx2_err("Request failed with hardware error");
153 otx2_err("Request failed with unknown hardware "
154 "completion code : 0x%x", ret);
159 if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
160 ret = res->uc_compcode;
162 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
163 otx2_err("Invalid auth type");
165 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
166 otx2_err("Invalid encrypt type");
169 otx2_err("Request failed with unknown microcode "
170 "completion code : 0x%x", ret);
175 rte_mempool_put(qptr->meta_info.pool, mdata);
180 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
181 struct rte_crypto_sym_xform *crypto_xform,
182 struct rte_crypto_sym_xform *auth_xform,
183 struct rte_crypto_sym_xform *cipher_xform)
185 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
186 sess->iv_offset = crypto_xform->aead.iv.offset;
187 sess->iv_length = crypto_xform->aead.iv.length;
188 sess->aad_length = crypto_xform->aead.aad_length;
189 sess->mac_len = crypto_xform->aead.digest_length;
191 sess->iv_offset = cipher_xform->cipher.iv.offset;
192 sess->iv_length = cipher_xform->cipher.iv.length;
193 sess->auth_iv_offset = auth_xform->auth.iv.offset;
194 sess->auth_iv_length = auth_xform->auth.iv.length;
195 sess->mac_len = auth_xform->auth.digest_length;
198 sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
199 sess->ucmd_param2 = 0;
203 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
204 struct rte_security_ipsec_xform *ipsec,
205 struct rte_crypto_sym_xform *crypto_xform,
206 struct rte_security_session *sec_sess)
208 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
209 struct otx2_ipsec_po_ip_template *template = NULL;
210 const uint8_t *cipher_key, *auth_key;
211 struct otx2_sec_session_ipsec_lp *lp;
212 struct otx2_ipsec_po_sa_ctl *ctl;
213 int cipher_key_len, auth_key_len;
214 struct otx2_ipsec_po_out_sa *sa;
215 struct otx2_sec_session *sess;
216 struct otx2_cpt_inst_s inst;
217 struct rte_ipv6_hdr *ip6;
218 struct rte_ipv4_hdr *ip;
221 sess = get_sec_session_private_data(sec_sess);
222 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
223 lp = &sess->ipsec.lp;
228 otx2_err("SA already registered");
232 memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
234 /* Initialize lookaside ipsec private data */
235 lp->mode_type = OTX2_IPSEC_PO_TRANSPORT;
240 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
244 ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
248 /* Start ip id from 1 */
251 if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
252 template = &sa->aes_gcm.template;
253 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
254 aes_gcm.template) + sizeof(
255 sa->aes_gcm.template.ip4);
256 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
257 lp->ctx_len = ctx_len >> 3;
258 } else if (ctl->auth_type ==
259 OTX2_IPSEC_PO_SA_AUTH_SHA1) {
260 template = &sa->sha1.template;
261 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
262 sha1.template) + sizeof(
263 sa->sha1.template.ip4);
264 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
265 lp->ctx_len = ctx_len >> 3;
266 } else if (ctl->auth_type ==
267 OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
268 template = &sa->sha2.template;
269 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
270 sha2.template) + sizeof(
271 sa->sha2.template.ip4);
272 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
273 lp->ctx_len = ctx_len >> 3;
277 ip = &template->ip4.ipv4_hdr;
278 if (ipsec->options.udp_encap) {
279 ip->next_proto_id = IPPROTO_UDP;
280 template->ip4.udp_src = rte_be_to_cpu_16(4500);
281 template->ip4.udp_dst = rte_be_to_cpu_16(4500);
283 ip->next_proto_id = IPPROTO_ESP;
286 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
287 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
288 lp->mode_type = OTX2_IPSEC_PO_TUNNEL_IPV4;
289 ip->version_ihl = RTE_IPV4_VHL_DEF;
290 ip->time_to_live = ipsec->tunnel.ipv4.ttl;
291 ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
292 if (ipsec->tunnel.ipv4.df)
293 ip->fragment_offset = BIT(14);
294 memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
295 sizeof(struct in_addr));
296 memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
297 sizeof(struct in_addr));
298 } else if (ipsec->tunnel.type ==
299 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
301 lp->mode_type = OTX2_IPSEC_PO_TUNNEL_IPV6;
302 if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
303 template = &sa->aes_gcm.template;
304 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
305 aes_gcm.template) + sizeof(
306 sa->aes_gcm.template.ip6);
307 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
308 lp->ctx_len = ctx_len >> 3;
309 } else if (ctl->auth_type ==
310 OTX2_IPSEC_PO_SA_AUTH_SHA1) {
311 template = &sa->sha1.template;
312 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
313 sha1.template) + sizeof(
314 sa->sha1.template.ip6);
315 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
316 lp->ctx_len = ctx_len >> 3;
317 } else if (ctl->auth_type ==
318 OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
319 template = &sa->sha2.template;
320 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
321 sha2.template) + sizeof(
322 sa->sha2.template.ip6);
323 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
324 lp->ctx_len = ctx_len >> 3;
329 ip6 = &template->ip6.ipv6_hdr;
330 if (ipsec->options.udp_encap) {
331 ip6->proto = IPPROTO_UDP;
332 template->ip6.udp_src = rte_be_to_cpu_16(4500);
333 template->ip6.udp_dst = rte_be_to_cpu_16(4500);
335 ip6->proto = (ipsec->proto ==
336 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
337 IPPROTO_ESP : IPPROTO_AH;
339 ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
340 ((ipsec->tunnel.ipv6.dscp <<
341 RTE_IPV6_HDR_TC_SHIFT) &
342 RTE_IPV6_HDR_TC_MASK) |
343 ((ipsec->tunnel.ipv6.flabel <<
344 RTE_IPV6_HDR_FL_SHIFT) &
345 RTE_IPV6_HDR_FL_MASK));
346 ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
347 memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
348 sizeof(struct in6_addr));
349 memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
350 sizeof(struct in6_addr));
354 cipher_xform = crypto_xform;
355 auth_xform = crypto_xform->next;
360 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
361 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
362 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
363 cipher_key = crypto_xform->aead.key.data;
364 cipher_key_len = crypto_xform->aead.key.length;
366 cipher_key = cipher_xform->cipher.key.data;
367 cipher_key_len = cipher_xform->cipher.key.length;
368 auth_key = auth_xform->auth.key.data;
369 auth_key_len = auth_xform->auth.key.length;
371 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
372 memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
373 else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
374 memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
377 if (cipher_key_len != 0)
378 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
383 inst.egrp = OTX2_CPT_EGRP_SE;
384 inst.cptr = rte_mempool_virt2iova(sa);
386 lp->cpt_inst_w7 = inst.u64[7];
387 lp->ucmd_opcode = (lp->ctx_len << 8) |
388 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
390 set_session_misc_attributes(lp, crypto_xform,
391 auth_xform, cipher_xform);
393 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
394 OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
398 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
399 struct rte_security_ipsec_xform *ipsec,
400 struct rte_crypto_sym_xform *crypto_xform,
401 struct rte_security_session *sec_sess)
403 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
404 const uint8_t *cipher_key, *auth_key;
405 struct otx2_sec_session_ipsec_lp *lp;
406 struct otx2_ipsec_po_sa_ctl *ctl;
407 int cipher_key_len, auth_key_len;
408 struct otx2_ipsec_po_in_sa *sa;
409 struct otx2_sec_session *sess;
410 struct otx2_cpt_inst_s inst;
413 sess = get_sec_session_private_data(sec_sess);
414 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
415 lp = &sess->ipsec.lp;
421 otx2_err("SA already registered");
425 memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
426 sa->replay_win_sz = ipsec->replay_win_sz;
428 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
432 lp->mode_type = OTX2_IPSEC_PO_TRANSPORT;
434 auth_xform = crypto_xform;
435 cipher_xform = crypto_xform->next;
440 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
441 lp->mode_type = (ipsec->tunnel.type ==
442 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
443 OTX2_IPSEC_PO_TUNNEL_IPV4 :
444 OTX2_IPSEC_PO_TUNNEL_IPV6;
446 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
447 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
448 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
449 cipher_key = crypto_xform->aead.key.data;
450 cipher_key_len = crypto_xform->aead.key.length;
452 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
453 aes_gcm.hmac_key[0]) >> 3;
454 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
456 cipher_key = cipher_xform->cipher.key.data;
457 cipher_key_len = cipher_xform->cipher.key.length;
458 auth_key = auth_xform->auth.key.data;
459 auth_key_len = auth_xform->auth.key.length;
461 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
462 memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
463 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
464 aes_gcm.selector) >> 3;
465 } else if (auth_xform->auth.algo ==
466 RTE_CRYPTO_AUTH_SHA256_HMAC) {
467 memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
468 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
473 if (cipher_key_len != 0)
474 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
479 inst.egrp = OTX2_CPT_EGRP_SE;
480 inst.cptr = rte_mempool_virt2iova(sa);
482 lp->cpt_inst_w7 = inst.u64[7];
483 lp->ucmd_opcode = (lp->ctx_len << 8) |
484 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
486 set_session_misc_attributes(lp, crypto_xform,
487 auth_xform, cipher_xform);
489 if (sa->replay_win_sz) {
490 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
491 otx2_err("Replay window size is not supported");
494 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
496 if (sa->replay == NULL)
499 /* Set window bottom to 1, base and top to size of window */
500 sa->replay->winb = 1;
501 sa->replay->wint = sa->replay_win_sz;
502 sa->replay->base = sa->replay_win_sz;
507 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
508 OTX2_IPSEC_PO_WRITE_IPSEC_INB);
512 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
513 struct rte_security_ipsec_xform *ipsec,
514 struct rte_crypto_sym_xform *crypto_xform,
515 struct rte_security_session *sess)
519 if (crypto_dev->data->queue_pairs[0] == NULL) {
520 otx2_err("Setup cpt queue pair before creating sec session");
524 ret = ipsec_po_xform_verify(ipsec, crypto_xform);
528 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
529 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
532 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
537 otx2_crypto_sec_session_create(void *device,
538 struct rte_security_session_conf *conf,
539 struct rte_security_session *sess,
540 struct rte_mempool *mempool)
542 struct otx2_sec_session *priv;
545 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
548 if (rte_security_dynfield_register() < 0)
551 if (rte_mempool_get(mempool, (void **)&priv)) {
552 otx2_err("Could not allocate security session private data");
556 set_sec_session_private_data(sess, priv);
558 priv->userdata = conf->userdata;
560 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
561 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
573 rte_mempool_put(mempool, priv);
574 set_sec_session_private_data(sess, NULL);
579 otx2_crypto_sec_session_destroy(void *device __rte_unused,
580 struct rte_security_session *sess)
582 struct otx2_sec_session *priv;
583 struct rte_mempool *sess_mp;
585 priv = get_sec_session_private_data(sess);
590 sess_mp = rte_mempool_from_obj(priv);
592 set_sec_session_private_data(sess, NULL);
593 rte_mempool_put(sess_mp, priv);
599 otx2_crypto_sec_session_get_size(void *device __rte_unused)
601 return sizeof(struct otx2_sec_session);
605 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
606 struct rte_security_session *session,
607 struct rte_mbuf *m, void *params __rte_unused)
609 /* Set security session as the pkt metadata */
610 *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
616 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
619 /* Retrieve userdata */
620 *userdata = (void *)md;
625 static struct rte_security_ops otx2_crypto_sec_ops = {
626 .session_create = otx2_crypto_sec_session_create,
627 .session_destroy = otx2_crypto_sec_session_destroy,
628 .session_get_size = otx2_crypto_sec_session_get_size,
629 .set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
630 .get_userdata = otx2_crypto_sec_get_userdata,
631 .capabilities_get = otx2_crypto_sec_capabilities_get
635 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
637 struct rte_security_ctx *ctx;
639 ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
640 sizeof(struct rte_security_ctx), 0);
647 ctx->ops = &otx2_crypto_sec_ops;
650 cdev->security_ctx = ctx;
656 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
658 rte_free(cdev->security_ctx);