1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23 struct rte_crypto_sym_xform *xform,
24 struct otx2_sec_session_ipsec_lp *lp)
26 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
28 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
29 lp->partial_len = sizeof(struct rte_ipv4_hdr);
30 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
31 lp->partial_len = sizeof(struct rte_ipv6_hdr);
35 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
36 lp->partial_len += sizeof(struct rte_esp_hdr);
37 lp->roundup_len = sizeof(struct rte_esp_tail);
38 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
39 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
44 if (ipsec->options.udp_encap)
45 lp->partial_len += sizeof(struct rte_udp_hdr);
47 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
49 lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
50 lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
51 lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
58 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
60 auth_xform = xform->next;
61 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
63 cipher_xform = xform->next;
68 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
69 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
70 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
75 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
76 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
84 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
85 struct otx2_cpt_qp *qptr, uint8_t opcode)
87 uint64_t lmt_status, time_out;
88 void *lmtline = qptr->lmtline;
89 struct otx2_cpt_inst_s inst;
90 struct otx2_cpt_res *res;
94 if (unlikely(rte_mempool_get(qptr->meta_info.pool,
95 (void **)&mdata) < 0))
98 res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
99 res->compcode = CPT_9X_COMP_E_NOTDONE;
101 inst.opcode = opcode | (lp->ctx_len << 8);
104 inst.dlen = lp->ctx_len << 3;
105 inst.dptr = rte_mempool_virt2iova(lp);
107 inst.cptr = rte_mempool_virt2iova(lp);
108 inst.egrp = OTX2_CPT_EGRP_SE;
113 inst.res_addr = rte_mempool_virt2iova(res);
118 /* Copy CPT command to LMTLINE */
119 otx2_lmt_mov(lmtline, &inst, 2);
120 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
121 } while (lmt_status == 0);
123 time_out = rte_get_timer_cycles() +
124 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
126 while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
127 if (rte_get_timer_cycles() > time_out) {
128 rte_mempool_put(qptr->meta_info.pool, mdata);
129 otx2_err("Request timed out");
135 if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
138 case CPT_9X_COMP_E_INSTERR:
139 otx2_err("Request failed with instruction error");
141 case CPT_9X_COMP_E_FAULT:
142 otx2_err("Request failed with DMA fault");
144 case CPT_9X_COMP_E_HWERR:
145 otx2_err("Request failed with hardware error");
148 otx2_err("Request failed with unknown hardware "
149 "completion code : 0x%x", ret);
154 if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
155 ret = res->uc_compcode;
157 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
158 otx2_err("Invalid auth type");
160 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
161 otx2_err("Invalid encrypt type");
164 otx2_err("Request failed with unknown microcode "
165 "completion code : 0x%x", ret);
170 rte_mempool_put(qptr->meta_info.pool, mdata);
175 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
176 struct rte_crypto_sym_xform *crypto_xform,
177 struct rte_crypto_sym_xform *auth_xform,
178 struct rte_crypto_sym_xform *cipher_xform)
180 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
181 sess->iv_offset = crypto_xform->aead.iv.offset;
182 sess->iv_length = crypto_xform->aead.iv.length;
183 sess->aad_length = crypto_xform->aead.aad_length;
184 sess->mac_len = crypto_xform->aead.digest_length;
186 sess->iv_offset = cipher_xform->cipher.iv.offset;
187 sess->iv_length = cipher_xform->cipher.iv.length;
188 sess->auth_iv_offset = auth_xform->auth.iv.offset;
189 sess->auth_iv_length = auth_xform->auth.iv.length;
190 sess->mac_len = auth_xform->auth.digest_length;
193 sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
194 sess->ucmd_param2 = 0;
198 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
199 struct rte_security_ipsec_xform *ipsec,
200 struct rte_crypto_sym_xform *crypto_xform,
201 struct rte_security_session *sec_sess)
203 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
204 const uint8_t *cipher_key, *auth_key;
205 struct otx2_sec_session_ipsec_lp *lp;
206 struct otx2_ipsec_po_sa_ctl *ctl;
207 int cipher_key_len, auth_key_len;
208 struct otx2_ipsec_po_out_sa *sa;
209 struct otx2_sec_session *sess;
210 struct otx2_cpt_inst_s inst;
211 struct rte_ipv6_hdr *ip6;
212 struct rte_ipv4_hdr *ip;
215 sess = get_sec_session_private_data(sec_sess);
216 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
217 lp = &sess->ipsec.lp;
222 otx2_err("SA already registered");
226 memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
228 /* Initialize lookaside ipsec private data */
232 lp->tunnel_type = ipsec->tunnel.type;
234 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
238 ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
242 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
244 if (ipsec->options.udp_encap) {
249 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
250 /* Start ip id from 1 */
253 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
254 ip = &sa->template.ipv4_hdr;
255 ip->version_ihl = RTE_IPV4_VHL_DEF;
256 ip->next_proto_id = IPPROTO_ESP;
257 ip->time_to_live = ipsec->tunnel.ipv4.ttl;
258 ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
259 if (ipsec->tunnel.ipv4.df)
260 ip->fragment_offset = BIT(14);
261 memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
262 sizeof(struct in_addr));
263 memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
264 sizeof(struct in_addr));
265 } else if (ipsec->tunnel.type ==
266 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
267 ip6 = &sa->template.ipv6_hdr;
268 ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
269 ((ipsec->tunnel.ipv6.dscp <<
270 RTE_IPV6_HDR_TC_SHIFT) &
271 RTE_IPV6_HDR_TC_MASK) |
272 ((ipsec->tunnel.ipv6.flabel <<
273 RTE_IPV6_HDR_FL_SHIFT) &
274 RTE_IPV6_HDR_FL_MASK));
275 ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
276 ip6->proto = (ipsec->proto ==
277 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
278 IPPROTO_ESP : IPPROTO_AH;
279 memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
280 sizeof(struct in6_addr));
281 memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
282 sizeof(struct in6_addr));
290 cipher_xform = crypto_xform;
291 auth_xform = crypto_xform->next;
296 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
297 cipher_key = crypto_xform->aead.key.data;
298 cipher_key_len = crypto_xform->aead.key.length;
300 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa);
302 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN);
304 cipher_key = cipher_xform->cipher.key.data;
305 cipher_key_len = cipher_xform->cipher.key.length;
306 auth_key = auth_xform->auth.key.data;
307 auth_key_len = auth_xform->auth.key.length;
309 /* TODO: check the ctx len for supporting ALGO */
310 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa) >> 3;
311 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN);
314 if (cipher_key_len != 0)
315 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
319 /* Use OPAD & IPAD */
320 RTE_SET_USED(auth_key);
321 RTE_SET_USED(auth_key_len);
324 inst.egrp = OTX2_CPT_EGRP_SE;
325 inst.cptr = rte_mempool_virt2iova(sa);
327 lp->cpt_inst_w7 = inst.u64[7];
328 lp->ucmd_opcode = (lp->ctx_len << 8) |
329 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
331 set_session_misc_attributes(lp, crypto_xform,
332 auth_xform, cipher_xform);
334 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
335 OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
339 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
340 struct rte_security_ipsec_xform *ipsec,
341 struct rte_crypto_sym_xform *crypto_xform,
342 struct rte_security_session *sec_sess)
344 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
345 struct otx2_sec_session_ipsec_lp *lp;
346 struct otx2_ipsec_po_sa_ctl *ctl;
347 const uint8_t *cipher_key, *auth_key;
348 int cipher_key_len, auth_key_len;
349 struct otx2_ipsec_po_in_sa *sa;
350 struct otx2_sec_session *sess;
351 struct otx2_cpt_inst_s inst;
354 sess = get_sec_session_private_data(sec_sess);
355 sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
356 lp = &sess->ipsec.lp;
362 otx2_err("SA already registered");
366 memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
367 sa->replay_win_sz = ipsec->replay_win_sz;
369 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
373 lp->tunnel_type = ipsec->tunnel.type;
374 auth_xform = crypto_xform;
375 cipher_xform = crypto_xform->next;
380 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
381 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
382 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
383 cipher_key = crypto_xform->aead.key.data;
384 cipher_key_len = crypto_xform->aead.key.length;
386 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
387 aes_gcm.hmac_key[0]) >> 3;
388 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
390 cipher_key = cipher_xform->cipher.key.data;
391 cipher_key_len = cipher_xform->cipher.key.length;
392 auth_key = auth_xform->auth.key.data;
393 auth_key_len = auth_xform->auth.key.length;
395 /* TODO: check the ctx len for supporting ALGO */
396 lp->ctx_len = sizeof(struct otx2_ipsec_po_in_sa) >> 2;
397 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_INB_CTX_LEN);
400 if (cipher_key_len != 0)
401 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
405 /* Use OPAD & IPAD */
406 RTE_SET_USED(auth_key);
407 RTE_SET_USED(auth_key_len);
410 inst.egrp = OTX2_CPT_EGRP_SE;
411 inst.cptr = rte_mempool_virt2iova(sa);
413 lp->cpt_inst_w7 = inst.u64[7];
414 lp->ucmd_opcode = (lp->ctx_len << 8) |
415 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
417 set_session_misc_attributes(lp, crypto_xform,
418 auth_xform, cipher_xform);
420 if (sa->replay_win_sz) {
421 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
422 otx2_err("Replay window size is not supported");
425 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
427 if (sa->replay == NULL)
430 /* Set window bottom to 1, base and top to size of window */
431 sa->replay->winb = 1;
432 sa->replay->wint = sa->replay_win_sz;
433 sa->replay->base = sa->replay_win_sz;
438 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
439 OTX2_IPSEC_PO_WRITE_IPSEC_INB);
443 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
444 struct rte_security_ipsec_xform *ipsec,
445 struct rte_crypto_sym_xform *crypto_xform,
446 struct rte_security_session *sess)
450 if (crypto_dev->data->queue_pairs[0] == NULL) {
451 otx2_err("Setup cpt queue pair before creating sec session");
455 ret = ipsec_po_xform_verify(ipsec, crypto_xform);
459 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
460 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
463 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
468 otx2_crypto_sec_session_create(void *device,
469 struct rte_security_session_conf *conf,
470 struct rte_security_session *sess,
471 struct rte_mempool *mempool)
473 struct otx2_sec_session *priv;
476 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
479 if (rte_security_dynfield_register() < 0)
482 if (rte_mempool_get(mempool, (void **)&priv)) {
483 otx2_err("Could not allocate security session private data");
487 set_sec_session_private_data(sess, priv);
489 priv->userdata = conf->userdata;
491 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
492 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
504 rte_mempool_put(mempool, priv);
505 set_sec_session_private_data(sess, NULL);
510 otx2_crypto_sec_session_destroy(void *device __rte_unused,
511 struct rte_security_session *sess)
513 struct otx2_sec_session *priv;
514 struct rte_mempool *sess_mp;
516 priv = get_sec_session_private_data(sess);
521 sess_mp = rte_mempool_from_obj(priv);
523 set_sec_session_private_data(sess, NULL);
524 rte_mempool_put(sess_mp, priv);
530 otx2_crypto_sec_session_get_size(void *device __rte_unused)
532 return sizeof(struct otx2_sec_session);
536 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
537 struct rte_security_session *session,
538 struct rte_mbuf *m, void *params __rte_unused)
540 /* Set security session as the pkt metadata */
541 *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
547 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
550 /* Retrieve userdata */
551 *userdata = (void *)md;
556 static struct rte_security_ops otx2_crypto_sec_ops = {
557 .session_create = otx2_crypto_sec_session_create,
558 .session_destroy = otx2_crypto_sec_session_destroy,
559 .session_get_size = otx2_crypto_sec_session_get_size,
560 .set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
561 .get_userdata = otx2_crypto_sec_get_userdata,
562 .capabilities_get = otx2_crypto_sec_capabilities_get
566 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
568 struct rte_security_ctx *ctx;
570 ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
571 sizeof(struct rte_security_ctx), 0);
578 ctx->ops = &otx2_crypto_sec_ops;
581 cdev->security_ctx = ctx;
587 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
589 rte_free(cdev->security_ctx);