1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_cryptodev.h>
7 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23 struct rte_crypto_sym_xform *xform,
24 struct otx2_sec_session_ipsec_lp *lp)
26 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
28 lp->partial_len = sizeof(struct rte_ipv4_hdr);
30 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
31 lp->partial_len += sizeof(struct rte_esp_hdr);
32 lp->roundup_len = sizeof(struct rte_esp_tail);
33 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
34 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
39 if (ipsec->options.udp_encap)
40 lp->partial_len += sizeof(struct rte_udp_hdr);
42 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
43 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
44 lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
45 lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
46 lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
53 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
55 auth_xform = xform->next;
56 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
58 cipher_xform = xform->next;
63 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
64 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
65 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
70 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
71 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
79 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
80 struct otx2_cpt_qp *qptr, uint8_t opcode)
82 uint64_t lmt_status, time_out;
83 void *lmtline = qptr->lmtline;
84 struct otx2_cpt_inst_s inst;
85 struct otx2_cpt_res *res;
89 if (unlikely(rte_mempool_get(qptr->meta_info.pool,
90 (void **)&mdata) < 0))
93 res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
94 res->compcode = CPT_9X_COMP_E_NOTDONE;
96 inst.opcode = opcode | (lp->ctx_len << 8);
99 inst.dlen = lp->ctx_len << 3;
100 inst.dptr = rte_mempool_virt2iova(lp);
102 inst.cptr = rte_mempool_virt2iova(lp);
103 inst.egrp = OTX2_CPT_EGRP_SE;
108 inst.res_addr = rte_mempool_virt2iova(res);
113 /* Copy CPT command to LMTLINE */
114 otx2_lmt_mov(lmtline, &inst, 2);
115 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
116 } while (lmt_status == 0);
118 time_out = rte_get_timer_cycles() +
119 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
121 while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
122 if (rte_get_timer_cycles() > time_out) {
123 rte_mempool_put(qptr->meta_info.pool, mdata);
124 otx2_err("Request timed out");
130 if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
133 case CPT_9X_COMP_E_INSTERR:
134 otx2_err("Request failed with instruction error");
136 case CPT_9X_COMP_E_FAULT:
137 otx2_err("Request failed with DMA fault");
139 case CPT_9X_COMP_E_HWERR:
140 otx2_err("Request failed with hardware error");
143 otx2_err("Request failed with unknown hardware "
144 "completion code : 0x%x", ret);
149 if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
150 ret = res->uc_compcode;
152 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
153 otx2_err("Invalid auth type");
155 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
156 otx2_err("Invalid encrypt type");
159 otx2_err("Request failed with unknown microcode "
160 "completion code : 0x%x", ret);
165 rte_mempool_put(qptr->meta_info.pool, mdata);
170 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
171 struct rte_crypto_sym_xform *crypto_xform,
172 struct rte_crypto_sym_xform *auth_xform,
173 struct rte_crypto_sym_xform *cipher_xform)
175 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
176 sess->iv_offset = crypto_xform->aead.iv.offset;
177 sess->iv_length = crypto_xform->aead.iv.length;
178 sess->aad_length = crypto_xform->aead.aad_length;
179 sess->mac_len = crypto_xform->aead.digest_length;
181 sess->iv_offset = cipher_xform->cipher.iv.offset;
182 sess->iv_length = cipher_xform->cipher.iv.length;
183 sess->auth_iv_offset = auth_xform->auth.iv.offset;
184 sess->auth_iv_length = auth_xform->auth.iv.length;
185 sess->mac_len = auth_xform->auth.digest_length;
188 sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
189 sess->ucmd_param2 = 0;
193 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
194 struct rte_security_ipsec_xform *ipsec,
195 struct rte_crypto_sym_xform *crypto_xform,
196 struct rte_security_session *sec_sess)
198 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
199 const uint8_t *cipher_key, *auth_key;
200 struct otx2_sec_session_ipsec_lp *lp;
201 struct otx2_ipsec_po_sa_ctl *ctl;
202 int cipher_key_len, auth_key_len;
203 struct otx2_ipsec_po_out_sa *sa;
204 struct otx2_sec_session *sess;
205 struct otx2_cpt_inst_s inst;
206 struct rte_ipv4_hdr *ip;
209 sess = get_sec_session_private_data(sec_sess);
210 lp = &sess->ipsec.lp;
215 otx2_err("SA already registered");
219 memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
221 /* Initialize lookaside ipsec private data */
226 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
230 ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
234 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
236 if (ipsec->options.udp_encap) {
241 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
242 /* Start ip id from 1 */
245 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
246 ip = &sa->template.ipv4_hdr;
247 ip->version_ihl = RTE_IPV4_VHL_DEF;
248 ip->next_proto_id = IPPROTO_ESP;
249 ip->time_to_live = ipsec->tunnel.ipv4.ttl;
250 ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
251 if (ipsec->tunnel.ipv4.df)
252 ip->fragment_offset = BIT(14);
253 memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
254 sizeof(struct in_addr));
255 memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
256 sizeof(struct in_addr));
264 cipher_xform = crypto_xform;
265 auth_xform = crypto_xform->next;
270 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
271 cipher_key = crypto_xform->aead.key.data;
272 cipher_key_len = crypto_xform->aead.key.length;
274 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa);
276 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN);
278 cipher_key = cipher_xform->cipher.key.data;
279 cipher_key_len = cipher_xform->cipher.key.length;
280 auth_key = auth_xform->auth.key.data;
281 auth_key_len = auth_xform->auth.key.length;
283 /* TODO: check the ctx len for supporting ALGO */
284 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa) >> 3;
285 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN);
288 if (cipher_key_len != 0)
289 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
293 /* Use OPAD & IPAD */
294 RTE_SET_USED(auth_key);
295 RTE_SET_USED(auth_key_len);
298 inst.egrp = OTX2_CPT_EGRP_SE;
299 inst.cptr = rte_mempool_virt2iova(sa);
301 lp->ucmd_w3 = inst.u64[7];
302 lp->ucmd_opcode = (lp->ctx_len << 8) |
303 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
305 set_session_misc_attributes(lp, crypto_xform,
306 auth_xform, cipher_xform);
308 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
309 OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
313 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
314 struct rte_security_ipsec_xform *ipsec,
315 struct rte_crypto_sym_xform *crypto_xform,
316 struct rte_security_session *sec_sess)
318 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
319 struct otx2_sec_session_ipsec_lp *lp;
320 struct otx2_ipsec_po_sa_ctl *ctl;
321 const uint8_t *cipher_key, *auth_key;
322 int cipher_key_len, auth_key_len;
323 struct otx2_ipsec_po_in_sa *sa;
324 struct otx2_sec_session *sess;
325 struct otx2_cpt_inst_s inst;
328 sess = get_sec_session_private_data(sec_sess);
329 lp = &sess->ipsec.lp;
335 otx2_err("SA already registered");
339 memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
341 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
345 auth_xform = crypto_xform;
346 cipher_xform = crypto_xform->next;
351 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
352 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
353 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
354 cipher_key = crypto_xform->aead.key.data;
355 cipher_key_len = crypto_xform->aead.key.length;
357 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
358 aes_gcm.hmac_key[0]) >> 3;
359 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
361 cipher_key = cipher_xform->cipher.key.data;
362 cipher_key_len = cipher_xform->cipher.key.length;
363 auth_key = auth_xform->auth.key.data;
364 auth_key_len = auth_xform->auth.key.length;
366 /* TODO: check the ctx len for supporting ALGO */
367 lp->ctx_len = sizeof(struct otx2_ipsec_po_in_sa) >> 2;
368 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_INB_CTX_LEN);
371 if (cipher_key_len != 0)
372 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
376 /* Use OPAD & IPAD */
377 RTE_SET_USED(auth_key);
378 RTE_SET_USED(auth_key_len);
381 inst.egrp = OTX2_CPT_EGRP_SE;
382 inst.cptr = rte_mempool_virt2iova(sa);
384 lp->ucmd_w3 = inst.u64[7];
385 lp->ucmd_opcode = (lp->ctx_len << 8) |
386 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
388 set_session_misc_attributes(lp, crypto_xform,
389 auth_xform, cipher_xform);
391 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
392 OTX2_IPSEC_PO_WRITE_IPSEC_INB);
396 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
397 struct rte_security_ipsec_xform *ipsec,
398 struct rte_crypto_sym_xform *crypto_xform,
399 struct rte_security_session *sess)
403 if (crypto_dev->data->queue_pairs[0] == NULL) {
404 otx2_err("Setup cpt queue pair before creating sec session");
408 ret = ipsec_po_xform_verify(ipsec, crypto_xform);
412 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
413 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
416 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
421 otx2_crypto_sec_session_create(void *device,
422 struct rte_security_session_conf *conf,
423 struct rte_security_session *sess,
424 struct rte_mempool *mempool)
426 struct otx2_sec_session *priv;
429 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
432 if (rte_mempool_get(mempool, (void **)&priv)) {
433 otx2_err("Could not allocate security session private data");
437 set_sec_session_private_data(sess, priv);
439 priv->userdata = conf->userdata;
441 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
442 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
454 rte_mempool_put(mempool, priv);
455 set_sec_session_private_data(sess, NULL);
460 otx2_crypto_sec_session_destroy(void *device __rte_unused,
461 struct rte_security_session *sess)
463 struct otx2_sec_session *priv;
464 struct rte_mempool *sess_mp;
466 priv = get_sec_session_private_data(sess);
471 sess_mp = rte_mempool_from_obj(priv);
473 set_sec_session_private_data(sess, NULL);
474 rte_mempool_put(sess_mp, priv);
480 otx2_crypto_sec_session_get_size(void *device __rte_unused)
482 return sizeof(struct otx2_sec_session);
486 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
487 struct rte_security_session *session,
488 struct rte_mbuf *m, void *params __rte_unused)
490 /* Set security session as the pkt metadata */
491 m->udata64 = (uint64_t)session;
497 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
500 /* Retrieve userdata */
501 *userdata = (void *)md;
506 static struct rte_security_ops otx2_crypto_sec_ops = {
507 .session_create = otx2_crypto_sec_session_create,
508 .session_destroy = otx2_crypto_sec_session_destroy,
509 .session_get_size = otx2_crypto_sec_session_get_size,
510 .set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
511 .get_userdata = otx2_crypto_sec_get_userdata,
512 .capabilities_get = otx2_crypto_sec_capabilities_get
516 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
518 struct rte_security_ctx *ctx;
520 ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
521 sizeof(struct rte_security_ctx), 0);
528 ctx->ops = &otx2_crypto_sec_ops;
531 cdev->security_ctx = ctx;
537 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
539 rte_free(cdev->security_ctx);