1f5645f2f19a6c45768faa7dc367b8a8537c0f3f
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_ip.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
12 #include <rte_udp.h>
13
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
20
21 static int
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23                 struct rte_crypto_sym_xform *xform,
24                 struct otx2_sec_session_ipsec_lp *lp)
25 {
26         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
27
28         if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
29                 lp->partial_len = sizeof(struct rte_ipv4_hdr);
30         else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
31                 lp->partial_len = sizeof(struct rte_ipv6_hdr);
32         else
33                 return -EINVAL;
34
35         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
36                 lp->partial_len += sizeof(struct rte_esp_hdr);
37                 lp->roundup_len = sizeof(struct rte_esp_tail);
38         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
39                 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
40         } else {
41                 return -EINVAL;
42         }
43
44         if (ipsec->options.udp_encap)
45                 lp->partial_len += sizeof(struct rte_udp_hdr);
46
47         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
49                         lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
50                         lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
51                         lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
52                         return 0;
53                 } else {
54                         return -EINVAL;
55                 }
56         }
57
58         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
59                 cipher_xform = xform;
60                 auth_xform = xform->next;
61         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
62                 auth_xform = xform;
63                 cipher_xform = xform->next;
64         } else {
65                 return -EINVAL;
66         }
67
68         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
69                 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
70                 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
71         } else {
72                 return -EINVAL;
73         }
74
75         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
76                 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
77         else
78                 return -EINVAL;
79
80         return 0;
81 }
82
83 static int
84 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
85                       struct otx2_cpt_qp *qptr, uint8_t opcode)
86 {
87         uint64_t lmt_status, time_out;
88         void *lmtline = qptr->lmtline;
89         struct otx2_cpt_inst_s inst;
90         struct otx2_cpt_res *res;
91         uint64_t *mdata;
92         int ret = 0;
93
94         if (unlikely(rte_mempool_get(qptr->meta_info.pool,
95                                      (void **)&mdata) < 0))
96                 return -ENOMEM;
97
98         res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
99         res->compcode = CPT_9X_COMP_E_NOTDONE;
100
101         inst.opcode = opcode | (lp->ctx_len << 8);
102         inst.param1 = 0;
103         inst.param2 = 0;
104         inst.dlen = lp->ctx_len << 3;
105         inst.dptr = rte_mempool_virt2iova(lp);
106         inst.rptr = 0;
107         inst.cptr = rte_mempool_virt2iova(lp);
108         inst.egrp  = OTX2_CPT_EGRP_SE;
109
110         inst.u64[0] = 0;
111         inst.u64[2] = 0;
112         inst.u64[3] = 0;
113         inst.res_addr = rte_mempool_virt2iova(res);
114
115         rte_io_wmb();
116
117         do {
118                 /* Copy CPT command to LMTLINE */
119                 otx2_lmt_mov(lmtline, &inst, 2);
120                 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
121         } while (lmt_status == 0);
122
123         time_out = rte_get_timer_cycles() +
124                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
125
126         while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
127                 if (rte_get_timer_cycles() > time_out) {
128                         rte_mempool_put(qptr->meta_info.pool, mdata);
129                         otx2_err("Request timed out");
130                         return -ETIMEDOUT;
131                 }
132             rte_io_rmb();
133         }
134
135         if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
136                 ret = res->compcode;
137                 switch (ret) {
138                 case CPT_9X_COMP_E_INSTERR:
139                         otx2_err("Request failed with instruction error");
140                         break;
141                 case CPT_9X_COMP_E_FAULT:
142                         otx2_err("Request failed with DMA fault");
143                         break;
144                 case CPT_9X_COMP_E_HWERR:
145                         otx2_err("Request failed with hardware error");
146                         break;
147                 default:
148                         otx2_err("Request failed with unknown hardware "
149                                  "completion code : 0x%x", ret);
150                 }
151                 goto mempool_put;
152         }
153
154         if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
155                 ret = res->uc_compcode;
156                 switch (ret) {
157                 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
158                         otx2_err("Invalid auth type");
159                         break;
160                 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
161                         otx2_err("Invalid encrypt type");
162                         break;
163                 default:
164                         otx2_err("Request failed with unknown microcode "
165                                  "completion code : 0x%x", ret);
166                 }
167         }
168
169 mempool_put:
170         rte_mempool_put(qptr->meta_info.pool, mdata);
171         return ret;
172 }
173
174 static void
175 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
176                             struct rte_crypto_sym_xform *crypto_xform,
177                             struct rte_crypto_sym_xform *auth_xform,
178                             struct rte_crypto_sym_xform *cipher_xform)
179 {
180         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
181                 sess->iv_offset = crypto_xform->aead.iv.offset;
182                 sess->iv_length = crypto_xform->aead.iv.length;
183                 sess->aad_length = crypto_xform->aead.aad_length;
184                 sess->mac_len = crypto_xform->aead.digest_length;
185         } else {
186                 sess->iv_offset = cipher_xform->cipher.iv.offset;
187                 sess->iv_length = cipher_xform->cipher.iv.length;
188                 sess->auth_iv_offset = auth_xform->auth.iv.offset;
189                 sess->auth_iv_length = auth_xform->auth.iv.length;
190                 sess->mac_len = auth_xform->auth.digest_length;
191         }
192
193         sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
194         sess->ucmd_param2 = 0;
195 }
196
197 static int
198 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
199                                      struct rte_security_ipsec_xform *ipsec,
200                                      struct rte_crypto_sym_xform *crypto_xform,
201                                      struct rte_security_session *sec_sess)
202 {
203         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
204         const uint8_t *cipher_key, *auth_key;
205         struct otx2_sec_session_ipsec_lp *lp;
206         struct otx2_ipsec_po_sa_ctl *ctl;
207         int cipher_key_len, auth_key_len;
208         struct otx2_ipsec_po_out_sa *sa;
209         struct otx2_sec_session *sess;
210         struct otx2_cpt_inst_s inst;
211         struct rte_ipv6_hdr *ip6;
212         struct rte_ipv4_hdr *ip;
213         int ret;
214
215         sess = get_sec_session_private_data(sec_sess);
216         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
217         lp = &sess->ipsec.lp;
218
219         sa = &lp->out_sa;
220         ctl = &sa->ctl;
221         if (ctl->valid) {
222                 otx2_err("SA already registered");
223                 return -EINVAL;
224         }
225
226         memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
227
228         /* Initialize lookaside ipsec private data */
229         lp->ip_id = 0;
230         lp->seq_lo = 1;
231         lp->seq_hi = 0;
232         lp->tunnel_type = ipsec->tunnel.type;
233
234         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
235         if (ret)
236                 return ret;
237
238         ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
239         if (ret)
240                 return ret;
241
242         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
243
244         if (ipsec->options.udp_encap) {
245                 sa->udp_src = 4500;
246                 sa->udp_dst = 4500;
247         }
248
249         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
250                 /* Start ip id from 1 */
251                 lp->ip_id = 1;
252
253                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
254                         ip = &sa->template.ipv4_hdr;
255                         ip->version_ihl = RTE_IPV4_VHL_DEF;
256                         ip->next_proto_id = IPPROTO_ESP;
257                         ip->time_to_live = ipsec->tunnel.ipv4.ttl;
258                         ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
259                         if (ipsec->tunnel.ipv4.df)
260                                 ip->fragment_offset = BIT(14);
261                         memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
262                                 sizeof(struct in_addr));
263                         memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
264                                 sizeof(struct in_addr));
265                 } else if (ipsec->tunnel.type ==
266                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
267                         ip6 = &sa->template.ipv6_hdr;
268                         ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
269                                 ((ipsec->tunnel.ipv6.dscp <<
270                                         RTE_IPV6_HDR_TC_SHIFT) &
271                                         RTE_IPV6_HDR_TC_MASK) |
272                                 ((ipsec->tunnel.ipv6.flabel <<
273                                         RTE_IPV6_HDR_FL_SHIFT) &
274                                         RTE_IPV6_HDR_FL_MASK));
275                         ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
276                         ip6->proto = (ipsec->proto ==
277                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
278                                         IPPROTO_ESP : IPPROTO_AH;
279                         memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
280                                 sizeof(struct in6_addr));
281                         memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
282                                 sizeof(struct in6_addr));
283                 } else {
284                         return -EINVAL;
285                 }
286         } else {
287                 return -EINVAL;
288         }
289
290         cipher_xform = crypto_xform;
291         auth_xform = crypto_xform->next;
292
293         cipher_key_len = 0;
294         auth_key_len = 0;
295
296         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
297                 cipher_key = crypto_xform->aead.key.data;
298                 cipher_key_len = crypto_xform->aead.key.length;
299
300                 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa);
301                 lp->ctx_len >>= 3;
302                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN);
303         } else {
304                 cipher_key = cipher_xform->cipher.key.data;
305                 cipher_key_len = cipher_xform->cipher.key.length;
306                 auth_key = auth_xform->auth.key.data;
307                 auth_key_len = auth_xform->auth.key.length;
308
309                 /* TODO: check the ctx len for supporting ALGO */
310                 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa) >> 3;
311                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN);
312         }
313
314         if (cipher_key_len != 0)
315                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
316         else
317                 return -EINVAL;
318
319         /* Use OPAD & IPAD */
320         RTE_SET_USED(auth_key);
321         RTE_SET_USED(auth_key_len);
322
323         inst.u64[7] = 0;
324         inst.egrp = OTX2_CPT_EGRP_SE;
325         inst.cptr = rte_mempool_virt2iova(sa);
326
327         lp->cpt_inst_w7 = inst.u64[7];
328         lp->ucmd_opcode = (lp->ctx_len << 8) |
329                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
330
331         set_session_misc_attributes(lp, crypto_xform,
332                                     auth_xform, cipher_xform);
333
334         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
335                                      OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
336 }
337
338 static int
339 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
340                                     struct rte_security_ipsec_xform *ipsec,
341                                     struct rte_crypto_sym_xform *crypto_xform,
342                                     struct rte_security_session *sec_sess)
343 {
344         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
345         struct otx2_sec_session_ipsec_lp *lp;
346         struct otx2_ipsec_po_sa_ctl *ctl;
347         const uint8_t *cipher_key, *auth_key;
348         int cipher_key_len, auth_key_len;
349         struct otx2_ipsec_po_in_sa *sa;
350         struct otx2_sec_session *sess;
351         struct otx2_cpt_inst_s inst;
352         int ret;
353
354         sess = get_sec_session_private_data(sec_sess);
355         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
356         lp = &sess->ipsec.lp;
357
358         sa = &lp->in_sa;
359         ctl = &sa->ctl;
360
361         if (ctl->valid) {
362                 otx2_err("SA already registered");
363                 return -EINVAL;
364         }
365
366         memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
367         sa->replay_win_sz = ipsec->replay_win_sz;
368
369         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
370         if (ret)
371                 return ret;
372
373         lp->tunnel_type = ipsec->tunnel.type;
374         auth_xform = crypto_xform;
375         cipher_xform = crypto_xform->next;
376
377         cipher_key_len = 0;
378         auth_key_len = 0;
379
380         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
381                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
382                         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
383                 cipher_key = crypto_xform->aead.key.data;
384                 cipher_key_len = crypto_xform->aead.key.length;
385
386                 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
387                                             aes_gcm.hmac_key[0]) >> 3;
388                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
389         } else {
390                 cipher_key = cipher_xform->cipher.key.data;
391                 cipher_key_len = cipher_xform->cipher.key.length;
392                 auth_key = auth_xform->auth.key.data;
393                 auth_key_len = auth_xform->auth.key.length;
394
395                 /* TODO: check the ctx len for supporting ALGO */
396                 lp->ctx_len = sizeof(struct otx2_ipsec_po_in_sa) >> 2;
397                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_INB_CTX_LEN);
398         }
399
400         if (cipher_key_len != 0)
401                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
402         else
403                 return -EINVAL;
404
405         /* Use OPAD & IPAD */
406         RTE_SET_USED(auth_key);
407         RTE_SET_USED(auth_key_len);
408
409         inst.u64[7] = 0;
410         inst.egrp = OTX2_CPT_EGRP_SE;
411         inst.cptr = rte_mempool_virt2iova(sa);
412
413         lp->cpt_inst_w7 = inst.u64[7];
414         lp->ucmd_opcode = (lp->ctx_len << 8) |
415                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
416
417         set_session_misc_attributes(lp, crypto_xform,
418                                     auth_xform, cipher_xform);
419
420         if (sa->replay_win_sz) {
421                 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
422                         otx2_err("Replay window size is not supported");
423                         return -ENOTSUP;
424                 }
425                 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
426                                 0);
427                 if (sa->replay == NULL)
428                         return -ENOMEM;
429
430                 /* Set window bottom to 1, base and top to size of window */
431                 sa->replay->winb = 1;
432                 sa->replay->wint = sa->replay_win_sz;
433                 sa->replay->base = sa->replay_win_sz;
434                 sa->esn_low = 0;
435                 sa->esn_hi = 0;
436         }
437
438         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
439                                      OTX2_IPSEC_PO_WRITE_IPSEC_INB);
440 }
441
442 static int
443 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
444                                 struct rte_security_ipsec_xform *ipsec,
445                                 struct rte_crypto_sym_xform *crypto_xform,
446                                 struct rte_security_session *sess)
447 {
448         int ret;
449
450         if (crypto_dev->data->queue_pairs[0] == NULL) {
451                 otx2_err("Setup cpt queue pair before creating sec session");
452                 return -EPERM;
453         }
454
455         ret = ipsec_po_xform_verify(ipsec, crypto_xform);
456         if (ret)
457                 return ret;
458
459         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
460                 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
461                                                            crypto_xform, sess);
462         else
463                 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
464                                                             crypto_xform, sess);
465 }
466
467 static int
468 otx2_crypto_sec_session_create(void *device,
469                                struct rte_security_session_conf *conf,
470                                struct rte_security_session *sess,
471                                struct rte_mempool *mempool)
472 {
473         struct otx2_sec_session *priv;
474         int ret;
475
476         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
477                 return -ENOTSUP;
478
479         if (rte_security_dynfield_register() < 0)
480                 return -rte_errno;
481
482         if (rte_mempool_get(mempool, (void **)&priv)) {
483                 otx2_err("Could not allocate security session private data");
484                 return -ENOMEM;
485         }
486
487         set_sec_session_private_data(sess, priv);
488
489         priv->userdata = conf->userdata;
490
491         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
492                 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
493                                                       conf->crypto_xform,
494                                                       sess);
495         else
496                 ret = -ENOTSUP;
497
498         if (ret)
499                 goto mempool_put;
500
501         return 0;
502
503 mempool_put:
504         rte_mempool_put(mempool, priv);
505         set_sec_session_private_data(sess, NULL);
506         return ret;
507 }
508
509 static int
510 otx2_crypto_sec_session_destroy(void *device __rte_unused,
511                                 struct rte_security_session *sess)
512 {
513         struct otx2_sec_session *priv;
514         struct rte_mempool *sess_mp;
515
516         priv = get_sec_session_private_data(sess);
517
518         if (priv == NULL)
519                 return 0;
520
521         sess_mp = rte_mempool_from_obj(priv);
522
523         set_sec_session_private_data(sess, NULL);
524         rte_mempool_put(sess_mp, priv);
525
526         return 0;
527 }
528
529 static unsigned int
530 otx2_crypto_sec_session_get_size(void *device __rte_unused)
531 {
532         return sizeof(struct otx2_sec_session);
533 }
534
535 static int
536 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
537                               struct rte_security_session *session,
538                               struct rte_mbuf *m, void *params __rte_unused)
539 {
540         /* Set security session as the pkt metadata */
541         *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
542
543         return 0;
544 }
545
546 static int
547 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
548                              void **userdata)
549 {
550         /* Retrieve userdata  */
551         *userdata = (void *)md;
552
553         return 0;
554 }
555
556 static struct rte_security_ops otx2_crypto_sec_ops = {
557         .session_create         = otx2_crypto_sec_session_create,
558         .session_destroy        = otx2_crypto_sec_session_destroy,
559         .session_get_size       = otx2_crypto_sec_session_get_size,
560         .set_pkt_metadata       = otx2_crypto_sec_set_pkt_mdata,
561         .get_userdata           = otx2_crypto_sec_get_userdata,
562         .capabilities_get       = otx2_crypto_sec_capabilities_get
563 };
564
565 int
566 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
567 {
568         struct rte_security_ctx *ctx;
569
570         ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
571                          sizeof(struct rte_security_ctx), 0);
572
573         if (ctx == NULL)
574                 return -ENOMEM;
575
576         /* Populate ctx */
577         ctx->device = cdev;
578         ctx->ops = &otx2_crypto_sec_ops;
579         ctx->sess_cnt = 0;
580
581         cdev->security_ctx = ctx;
582
583         return 0;
584 }
585
586 void
587 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
588 {
589         rte_free(cdev->security_ctx);
590 }