1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23 struct rte_crypto_sym_xform *xform,
24 struct otx2_sec_session_ipsec_lp *lp)
26 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
28 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
29 lp->partial_len = sizeof(struct rte_ipv4_hdr);
30 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
31 lp->partial_len = sizeof(struct rte_ipv6_hdr);
35 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
36 lp->partial_len += sizeof(struct rte_esp_hdr);
37 lp->roundup_len = sizeof(struct rte_esp_tail);
38 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
39 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
44 if (ipsec->options.udp_encap)
45 lp->partial_len += sizeof(struct rte_udp_hdr);
47 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
49 lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
50 lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
51 lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
58 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
60 auth_xform = xform->next;
61 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
63 cipher_xform = xform->next;
68 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
69 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
70 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
75 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
76 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
84 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
85 struct otx2_cpt_qp *qptr, uint8_t opcode)
87 uint64_t lmt_status, time_out;
88 void *lmtline = qptr->lmtline;
89 struct otx2_cpt_inst_s inst;
90 struct otx2_cpt_res *res;
94 if (unlikely(rte_mempool_get(qptr->meta_info.pool,
95 (void **)&mdata) < 0))
98 res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
99 res->compcode = CPT_9X_COMP_E_NOTDONE;
101 inst.opcode = opcode | (lp->ctx_len << 8);
104 inst.dlen = lp->ctx_len << 3;
105 inst.dptr = rte_mempool_virt2iova(lp);
107 inst.cptr = rte_mempool_virt2iova(lp);
108 inst.egrp = OTX2_CPT_EGRP_SE;
113 inst.res_addr = rte_mempool_virt2iova(res);
118 /* Copy CPT command to LMTLINE */
119 otx2_lmt_mov(lmtline, &inst, 2);
120 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
121 } while (lmt_status == 0);
123 time_out = rte_get_timer_cycles() +
124 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
126 while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
127 if (rte_get_timer_cycles() > time_out) {
128 rte_mempool_put(qptr->meta_info.pool, mdata);
129 otx2_err("Request timed out");
135 if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
138 case CPT_9X_COMP_E_INSTERR:
139 otx2_err("Request failed with instruction error");
141 case CPT_9X_COMP_E_FAULT:
142 otx2_err("Request failed with DMA fault");
144 case CPT_9X_COMP_E_HWERR:
145 otx2_err("Request failed with hardware error");
148 otx2_err("Request failed with unknown hardware "
149 "completion code : 0x%x", ret);
154 if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
155 ret = res->uc_compcode;
157 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
158 otx2_err("Invalid auth type");
160 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
161 otx2_err("Invalid encrypt type");
164 otx2_err("Request failed with unknown microcode "
165 "completion code : 0x%x", ret);
170 rte_mempool_put(qptr->meta_info.pool, mdata);
175 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
176 struct rte_crypto_sym_xform *crypto_xform,
177 struct rte_crypto_sym_xform *auth_xform,
178 struct rte_crypto_sym_xform *cipher_xform)
180 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
181 sess->iv_offset = crypto_xform->aead.iv.offset;
182 sess->iv_length = crypto_xform->aead.iv.length;
183 sess->aad_length = crypto_xform->aead.aad_length;
184 sess->mac_len = crypto_xform->aead.digest_length;
186 sess->iv_offset = cipher_xform->cipher.iv.offset;
187 sess->iv_length = cipher_xform->cipher.iv.length;
188 sess->auth_iv_offset = auth_xform->auth.iv.offset;
189 sess->auth_iv_length = auth_xform->auth.iv.length;
190 sess->mac_len = auth_xform->auth.digest_length;
193 sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
194 sess->ucmd_param2 = 0;
198 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
199 struct rte_security_ipsec_xform *ipsec,
200 struct rte_crypto_sym_xform *crypto_xform,
201 struct rte_security_session *sec_sess)
203 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
204 const uint8_t *cipher_key, *auth_key;
205 struct otx2_sec_session_ipsec_lp *lp;
206 struct otx2_ipsec_po_sa_ctl *ctl;
207 int cipher_key_len, auth_key_len;
208 struct otx2_ipsec_po_out_sa *sa;
209 struct rte_ipv6_hdr *ip6 = NULL;
210 struct rte_ipv4_hdr *ip = NULL;
211 struct otx2_sec_session *sess;
212 struct otx2_cpt_inst_s inst;
215 sess = get_sec_session_private_data(sec_sess);
216 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
217 lp = &sess->ipsec.lp;
222 otx2_err("SA already registered");
226 memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
228 /* Initialize lookaside ipsec private data */
232 lp->tunnel_type = ipsec->tunnel.type;
234 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
238 ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
242 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
243 /* Start ip id from 1 */
246 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
248 if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
249 if (ipsec->options.udp_encap) {
250 sa->aes_gcm.template.ip4.udp_src = 4500;
251 sa->aes_gcm.template.ip4.udp_dst = 4500;
253 ip = &sa->aes_gcm.template.ip4.ipv4_hdr;
254 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
255 aes_gcm.template) + sizeof(
256 sa->aes_gcm.template.ip4);
257 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
258 lp->ctx_len = ctx_len >> 3;
259 } else if (ctl->auth_type ==
260 OTX2_IPSEC_PO_SA_AUTH_SHA1) {
261 if (ipsec->options.udp_encap) {
262 sa->sha1.template.ip4.udp_src = 4500;
263 sa->sha1.template.ip4.udp_dst = 4500;
265 ip = &sa->sha1.template.ip4.ipv4_hdr;
266 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
267 sha1.template) + sizeof(
268 sa->sha1.template.ip4);
269 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
270 lp->ctx_len = ctx_len >> 3;
272 ip->version_ihl = RTE_IPV4_VHL_DEF;
273 ip->next_proto_id = IPPROTO_ESP;
274 ip->time_to_live = ipsec->tunnel.ipv4.ttl;
275 ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
276 if (ipsec->tunnel.ipv4.df)
277 ip->fragment_offset = BIT(14);
278 memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
279 sizeof(struct in_addr));
280 memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
281 sizeof(struct in_addr));
282 } else if (ipsec->tunnel.type ==
283 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
285 if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
286 if (ipsec->options.udp_encap) {
287 sa->aes_gcm.template.ip6.udp_src = 4500;
288 sa->aes_gcm.template.ip6.udp_dst = 4500;
290 ip6 = &sa->aes_gcm.template.ip6.ipv6_hdr;
291 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
292 aes_gcm.template) + sizeof(
293 sa->aes_gcm.template.ip6);
294 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
295 lp->ctx_len = ctx_len >> 3;
296 } else if (ctl->auth_type ==
297 OTX2_IPSEC_PO_SA_AUTH_SHA1) {
298 if (ipsec->options.udp_encap) {
299 sa->sha1.template.ip6.udp_src = 4500;
300 sa->sha1.template.ip6.udp_dst = 4500;
302 ip6 = &sa->sha1.template.ip6.ipv6_hdr;
303 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
304 sha1.template) + sizeof(
305 sa->sha1.template.ip6);
306 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
307 lp->ctx_len = ctx_len >> 3;
310 ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
311 ((ipsec->tunnel.ipv6.dscp <<
312 RTE_IPV6_HDR_TC_SHIFT) &
313 RTE_IPV6_HDR_TC_MASK) |
314 ((ipsec->tunnel.ipv6.flabel <<
315 RTE_IPV6_HDR_FL_SHIFT) &
316 RTE_IPV6_HDR_FL_MASK));
317 ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
318 ip6->proto = (ipsec->proto ==
319 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
320 IPPROTO_ESP : IPPROTO_AH;
321 memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
322 sizeof(struct in6_addr));
323 memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
324 sizeof(struct in6_addr));
332 cipher_xform = crypto_xform;
333 auth_xform = crypto_xform->next;
338 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
339 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
340 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
341 cipher_key = crypto_xform->aead.key.data;
342 cipher_key_len = crypto_xform->aead.key.length;
344 cipher_key = cipher_xform->cipher.key.data;
345 cipher_key_len = cipher_xform->cipher.key.length;
346 auth_key = auth_xform->auth.key.data;
347 auth_key_len = auth_xform->auth.key.length;
349 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
350 memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
353 if (cipher_key_len != 0)
354 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
359 inst.egrp = OTX2_CPT_EGRP_SE;
360 inst.cptr = rte_mempool_virt2iova(sa);
362 lp->cpt_inst_w7 = inst.u64[7];
363 lp->ucmd_opcode = (lp->ctx_len << 8) |
364 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
366 set_session_misc_attributes(lp, crypto_xform,
367 auth_xform, cipher_xform);
369 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
370 OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
374 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
375 struct rte_security_ipsec_xform *ipsec,
376 struct rte_crypto_sym_xform *crypto_xform,
377 struct rte_security_session *sec_sess)
379 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
380 const uint8_t *cipher_key, *auth_key;
381 struct otx2_sec_session_ipsec_lp *lp;
382 struct otx2_ipsec_po_sa_ctl *ctl;
383 int cipher_key_len, auth_key_len;
384 struct otx2_ipsec_po_in_sa *sa;
385 struct otx2_sec_session *sess;
386 struct otx2_cpt_inst_s inst;
389 sess = get_sec_session_private_data(sec_sess);
390 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
391 lp = &sess->ipsec.lp;
397 otx2_err("SA already registered");
401 memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
402 sa->replay_win_sz = ipsec->replay_win_sz;
404 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
408 lp->tunnel_type = ipsec->tunnel.type;
409 auth_xform = crypto_xform;
410 cipher_xform = crypto_xform->next;
415 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
416 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
417 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
418 cipher_key = crypto_xform->aead.key.data;
419 cipher_key_len = crypto_xform->aead.key.length;
421 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
422 aes_gcm.hmac_key[0]) >> 3;
423 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
425 cipher_key = cipher_xform->cipher.key.data;
426 cipher_key_len = cipher_xform->cipher.key.length;
427 auth_key = auth_xform->auth.key.data;
428 auth_key_len = auth_xform->auth.key.length;
430 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
431 memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
433 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
434 aes_gcm.selector) >> 3;
437 if (cipher_key_len != 0)
438 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
443 inst.egrp = OTX2_CPT_EGRP_SE;
444 inst.cptr = rte_mempool_virt2iova(sa);
446 lp->cpt_inst_w7 = inst.u64[7];
447 lp->ucmd_opcode = (lp->ctx_len << 8) |
448 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
450 set_session_misc_attributes(lp, crypto_xform,
451 auth_xform, cipher_xform);
453 if (sa->replay_win_sz) {
454 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
455 otx2_err("Replay window size is not supported");
458 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
460 if (sa->replay == NULL)
463 /* Set window bottom to 1, base and top to size of window */
464 sa->replay->winb = 1;
465 sa->replay->wint = sa->replay_win_sz;
466 sa->replay->base = sa->replay_win_sz;
471 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
472 OTX2_IPSEC_PO_WRITE_IPSEC_INB);
476 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
477 struct rte_security_ipsec_xform *ipsec,
478 struct rte_crypto_sym_xform *crypto_xform,
479 struct rte_security_session *sess)
483 if (crypto_dev->data->queue_pairs[0] == NULL) {
484 otx2_err("Setup cpt queue pair before creating sec session");
488 ret = ipsec_po_xform_verify(ipsec, crypto_xform);
492 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
493 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
496 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
501 otx2_crypto_sec_session_create(void *device,
502 struct rte_security_session_conf *conf,
503 struct rte_security_session *sess,
504 struct rte_mempool *mempool)
506 struct otx2_sec_session *priv;
509 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
512 if (rte_security_dynfield_register() < 0)
515 if (rte_mempool_get(mempool, (void **)&priv)) {
516 otx2_err("Could not allocate security session private data");
520 set_sec_session_private_data(sess, priv);
522 priv->userdata = conf->userdata;
524 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
525 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
537 rte_mempool_put(mempool, priv);
538 set_sec_session_private_data(sess, NULL);
543 otx2_crypto_sec_session_destroy(void *device __rte_unused,
544 struct rte_security_session *sess)
546 struct otx2_sec_session *priv;
547 struct rte_mempool *sess_mp;
549 priv = get_sec_session_private_data(sess);
554 sess_mp = rte_mempool_from_obj(priv);
556 set_sec_session_private_data(sess, NULL);
557 rte_mempool_put(sess_mp, priv);
563 otx2_crypto_sec_session_get_size(void *device __rte_unused)
565 return sizeof(struct otx2_sec_session);
569 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
570 struct rte_security_session *session,
571 struct rte_mbuf *m, void *params __rte_unused)
573 /* Set security session as the pkt metadata */
574 *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
580 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
583 /* Retrieve userdata */
584 *userdata = (void *)md;
589 static struct rte_security_ops otx2_crypto_sec_ops = {
590 .session_create = otx2_crypto_sec_session_create,
591 .session_destroy = otx2_crypto_sec_session_destroy,
592 .session_get_size = otx2_crypto_sec_session_get_size,
593 .set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
594 .get_userdata = otx2_crypto_sec_get_userdata,
595 .capabilities_get = otx2_crypto_sec_capabilities_get
599 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
601 struct rte_security_ctx *ctx;
603 ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
604 sizeof(struct rte_security_ctx), 0);
611 ctx->ops = &otx2_crypto_sec_ops;
614 cdev->security_ctx = ctx;
620 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
622 rte_free(cdev->security_ctx);