net/hns3: support flow control autoneg for copper port
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_ip.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
12 #include <rte_udp.h>
13
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
20
21 static int
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23                 struct rte_crypto_sym_xform *xform,
24                 struct otx2_sec_session_ipsec_lp *lp)
25 {
26         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
27
28         lp->partial_len = 0;
29         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
30                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
31                         lp->partial_len = sizeof(struct rte_ipv4_hdr);
32                 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
33                         lp->partial_len = sizeof(struct rte_ipv6_hdr);
34                 else
35                         return -EINVAL;
36         }
37
38         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
39                 lp->partial_len += sizeof(struct rte_esp_hdr);
40                 lp->roundup_len = sizeof(struct rte_esp_tail);
41         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
42                 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
43         } else {
44                 return -EINVAL;
45         }
46
47         if (ipsec->options.udp_encap)
48                 lp->partial_len += sizeof(struct rte_udp_hdr);
49
50         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
51                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
52                         lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
53                         lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
54                         lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
55                         return 0;
56                 } else {
57                         return -EINVAL;
58                 }
59         }
60
61         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
62                 cipher_xform = xform;
63                 auth_xform = xform->next;
64         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
65                 auth_xform = xform;
66                 cipher_xform = xform->next;
67         } else {
68                 return -EINVAL;
69         }
70
71         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
72                 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
73                 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
74         } else {
75                 return -EINVAL;
76         }
77
78         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
79                 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
80         else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
81                 lp->partial_len += OTX2_SEC_SHA2_HMAC_LEN;
82         else
83                 return -EINVAL;
84
85         return 0;
86 }
87
88 static int
89 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
90                       struct otx2_cpt_qp *qptr, uint8_t opcode)
91 {
92         uint64_t lmt_status, time_out;
93         void *lmtline = qptr->lmtline;
94         struct otx2_cpt_inst_s inst;
95         struct otx2_cpt_res *res;
96         uint64_t *mdata;
97         int ret = 0;
98
99         if (unlikely(rte_mempool_get(qptr->meta_info.pool,
100                                      (void **)&mdata) < 0))
101                 return -ENOMEM;
102
103         res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
104         res->compcode = CPT_9X_COMP_E_NOTDONE;
105
106         inst.opcode = opcode | (lp->ctx_len << 8);
107         inst.param1 = 0;
108         inst.param2 = 0;
109         inst.dlen = lp->ctx_len << 3;
110         inst.dptr = rte_mempool_virt2iova(lp);
111         inst.rptr = 0;
112         inst.cptr = rte_mempool_virt2iova(lp);
113         inst.egrp  = OTX2_CPT_EGRP_SE;
114
115         inst.u64[0] = 0;
116         inst.u64[2] = 0;
117         inst.u64[3] = 0;
118         inst.res_addr = rte_mempool_virt2iova(res);
119
120         rte_io_wmb();
121
122         do {
123                 /* Copy CPT command to LMTLINE */
124                 otx2_lmt_mov(lmtline, &inst, 2);
125                 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
126         } while (lmt_status == 0);
127
128         time_out = rte_get_timer_cycles() +
129                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
130
131         while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
132                 if (rte_get_timer_cycles() > time_out) {
133                         rte_mempool_put(qptr->meta_info.pool, mdata);
134                         otx2_err("Request timed out");
135                         return -ETIMEDOUT;
136                 }
137             rte_io_rmb();
138         }
139
140         if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
141                 ret = res->compcode;
142                 switch (ret) {
143                 case CPT_9X_COMP_E_INSTERR:
144                         otx2_err("Request failed with instruction error");
145                         break;
146                 case CPT_9X_COMP_E_FAULT:
147                         otx2_err("Request failed with DMA fault");
148                         break;
149                 case CPT_9X_COMP_E_HWERR:
150                         otx2_err("Request failed with hardware error");
151                         break;
152                 default:
153                         otx2_err("Request failed with unknown hardware "
154                                  "completion code : 0x%x", ret);
155                 }
156                 goto mempool_put;
157         }
158
159         if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
160                 ret = res->uc_compcode;
161                 switch (ret) {
162                 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
163                         otx2_err("Invalid auth type");
164                         break;
165                 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
166                         otx2_err("Invalid encrypt type");
167                         break;
168                 default:
169                         otx2_err("Request failed with unknown microcode "
170                                  "completion code : 0x%x", ret);
171                 }
172         }
173
174 mempool_put:
175         rte_mempool_put(qptr->meta_info.pool, mdata);
176         return ret;
177 }
178
179 static void
180 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
181                             struct rte_crypto_sym_xform *crypto_xform,
182                             struct rte_crypto_sym_xform *auth_xform,
183                             struct rte_crypto_sym_xform *cipher_xform)
184 {
185         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
186                 sess->iv_offset = crypto_xform->aead.iv.offset;
187                 sess->iv_length = crypto_xform->aead.iv.length;
188                 sess->aad_length = crypto_xform->aead.aad_length;
189                 sess->mac_len = crypto_xform->aead.digest_length;
190         } else {
191                 sess->iv_offset = cipher_xform->cipher.iv.offset;
192                 sess->iv_length = cipher_xform->cipher.iv.length;
193                 sess->auth_iv_offset = auth_xform->auth.iv.offset;
194                 sess->auth_iv_length = auth_xform->auth.iv.length;
195                 sess->mac_len = auth_xform->auth.digest_length;
196         }
197
198         sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
199         sess->ucmd_param2 = 0;
200 }
201
202 static int
203 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
204                                      struct rte_security_ipsec_xform *ipsec,
205                                      struct rte_crypto_sym_xform *crypto_xform,
206                                      struct rte_security_session *sec_sess)
207 {
208         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
209         struct otx2_ipsec_po_ip_template *template = NULL;
210         const uint8_t *cipher_key, *auth_key;
211         struct otx2_sec_session_ipsec_lp *lp;
212         struct otx2_ipsec_po_sa_ctl *ctl;
213         int cipher_key_len, auth_key_len;
214         struct otx2_ipsec_po_out_sa *sa;
215         struct otx2_sec_session *sess;
216         struct otx2_cpt_inst_s inst;
217         struct rte_ipv6_hdr *ip6;
218         struct rte_ipv4_hdr *ip;
219         int ret, ctx_len;
220
221         sess = get_sec_session_private_data(sec_sess);
222         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
223         lp = &sess->ipsec.lp;
224
225         sa = &lp->out_sa;
226         ctl = &sa->ctl;
227         if (ctl->valid) {
228                 otx2_err("SA already registered");
229                 return -EINVAL;
230         }
231
232         memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
233
234         /* Initialize lookaside ipsec private data */
235         lp->mode_type = OTX2_IPSEC_PO_TRANSPORT;
236         lp->ip_id = 0;
237         lp->seq_lo = 1;
238         lp->seq_hi = 0;
239
240         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
241         if (ret)
242                 return ret;
243
244         ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
245         if (ret)
246                 return ret;
247
248         /* Start ip id from 1 */
249         lp->ip_id = 1;
250
251         if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
252                 template = &sa->aes_gcm.template;
253                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
254                                 aes_gcm.template) + sizeof(
255                                 sa->aes_gcm.template.ip4);
256                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
257                 lp->ctx_len = ctx_len >> 3;
258         } else if (ctl->auth_type ==
259                         OTX2_IPSEC_PO_SA_AUTH_SHA1) {
260                 template = &sa->sha1.template;
261                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
262                                 sha1.template) + sizeof(
263                                 sa->sha1.template.ip4);
264                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
265                 lp->ctx_len = ctx_len >> 3;
266         } else if (ctl->auth_type ==
267                         OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
268                 template = &sa->sha2.template;
269                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
270                                 sha2.template) + sizeof(
271                                 sa->sha2.template.ip4);
272                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
273                 lp->ctx_len = ctx_len >> 3;
274         } else {
275                 return -EINVAL;
276         }
277         ip = &template->ip4.ipv4_hdr;
278         if (ipsec->options.udp_encap) {
279                 ip->next_proto_id = IPPROTO_UDP;
280                 template->ip4.udp_src = rte_be_to_cpu_16(4500);
281                 template->ip4.udp_dst = rte_be_to_cpu_16(4500);
282         } else {
283                 ip->next_proto_id = IPPROTO_ESP;
284         }
285
286         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
287                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
288                         lp->mode_type = OTX2_IPSEC_PO_TUNNEL_IPV4;
289                         ip->version_ihl = RTE_IPV4_VHL_DEF;
290                         ip->time_to_live = ipsec->tunnel.ipv4.ttl;
291                         ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
292                         if (ipsec->tunnel.ipv4.df)
293                                 ip->fragment_offset = BIT(14);
294                         memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
295                                 sizeof(struct in_addr));
296                         memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
297                                 sizeof(struct in_addr));
298                 } else if (ipsec->tunnel.type ==
299                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
300
301                         lp->mode_type = OTX2_IPSEC_PO_TUNNEL_IPV6;
302                         if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
303                                 template = &sa->aes_gcm.template;
304                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
305                                                 aes_gcm.template) + sizeof(
306                                                 sa->aes_gcm.template.ip6);
307                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
308                                 lp->ctx_len = ctx_len >> 3;
309                         } else if (ctl->auth_type ==
310                                         OTX2_IPSEC_PO_SA_AUTH_SHA1) {
311                                 template = &sa->sha1.template;
312                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
313                                                 sha1.template) + sizeof(
314                                                 sa->sha1.template.ip6);
315                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
316                                 lp->ctx_len = ctx_len >> 3;
317                         } else if (ctl->auth_type ==
318                                         OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
319                                 template = &sa->sha2.template;
320                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
321                                                 sha2.template) + sizeof(
322                                                 sa->sha2.template.ip6);
323                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
324                                 lp->ctx_len = ctx_len >> 3;
325                         } else {
326                                 return -EINVAL;
327                         }
328
329                         ip6 = &template->ip6.ipv6_hdr;
330                         if (ipsec->options.udp_encap) {
331                                 ip6->proto = IPPROTO_UDP;
332                                 template->ip6.udp_src = rte_be_to_cpu_16(4500);
333                                 template->ip6.udp_dst = rte_be_to_cpu_16(4500);
334                         } else {
335                                 ip6->proto = (ipsec->proto ==
336                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
337                                         IPPROTO_ESP : IPPROTO_AH;
338                         }
339                         ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
340                                 ((ipsec->tunnel.ipv6.dscp <<
341                                         RTE_IPV6_HDR_TC_SHIFT) &
342                                         RTE_IPV6_HDR_TC_MASK) |
343                                 ((ipsec->tunnel.ipv6.flabel <<
344                                         RTE_IPV6_HDR_FL_SHIFT) &
345                                         RTE_IPV6_HDR_FL_MASK));
346                         ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
347                         memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
348                                 sizeof(struct in6_addr));
349                         memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
350                                 sizeof(struct in6_addr));
351                 }
352         }
353
354         cipher_xform = crypto_xform;
355         auth_xform = crypto_xform->next;
356
357         cipher_key_len = 0;
358         auth_key_len = 0;
359
360         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
361                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
362                         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
363                 cipher_key = crypto_xform->aead.key.data;
364                 cipher_key_len = crypto_xform->aead.key.length;
365         } else {
366                 cipher_key = cipher_xform->cipher.key.data;
367                 cipher_key_len = cipher_xform->cipher.key.length;
368                 auth_key = auth_xform->auth.key.data;
369                 auth_key_len = auth_xform->auth.key.length;
370
371                 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
372                         memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
373                 else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
374                         memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
375         }
376
377         if (cipher_key_len != 0)
378                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
379         else
380                 return -EINVAL;
381
382         inst.u64[7] = 0;
383         inst.egrp = OTX2_CPT_EGRP_SE;
384         inst.cptr = rte_mempool_virt2iova(sa);
385
386         lp->cpt_inst_w7 = inst.u64[7];
387         lp->ucmd_opcode = (lp->ctx_len << 8) |
388                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
389
390         set_session_misc_attributes(lp, crypto_xform,
391                                     auth_xform, cipher_xform);
392
393         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
394                                      OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
395 }
396
397 static int
398 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
399                                     struct rte_security_ipsec_xform *ipsec,
400                                     struct rte_crypto_sym_xform *crypto_xform,
401                                     struct rte_security_session *sec_sess)
402 {
403         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
404         const uint8_t *cipher_key, *auth_key;
405         struct otx2_sec_session_ipsec_lp *lp;
406         struct otx2_ipsec_po_sa_ctl *ctl;
407         int cipher_key_len, auth_key_len;
408         struct otx2_ipsec_po_in_sa *sa;
409         struct otx2_sec_session *sess;
410         struct otx2_cpt_inst_s inst;
411         int ret;
412
413         sess = get_sec_session_private_data(sec_sess);
414         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
415         lp = &sess->ipsec.lp;
416
417         sa = &lp->in_sa;
418         ctl = &sa->ctl;
419
420         if (ctl->valid) {
421                 otx2_err("SA already registered");
422                 return -EINVAL;
423         }
424
425         memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
426         sa->replay_win_sz = ipsec->replay_win_sz;
427
428         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
429         if (ret)
430                 return ret;
431
432         lp->mode_type = OTX2_IPSEC_PO_TRANSPORT;
433
434         auth_xform = crypto_xform;
435         cipher_xform = crypto_xform->next;
436
437         cipher_key_len = 0;
438         auth_key_len = 0;
439
440         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
441                 lp->mode_type = (ipsec->tunnel.type ==
442                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
443                                 OTX2_IPSEC_PO_TUNNEL_IPV4 :
444                                 OTX2_IPSEC_PO_TUNNEL_IPV6;
445
446         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
447                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
448                         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
449                 cipher_key = crypto_xform->aead.key.data;
450                 cipher_key_len = crypto_xform->aead.key.length;
451
452                 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
453                                             aes_gcm.hmac_key[0]) >> 3;
454                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
455         } else {
456                 cipher_key = cipher_xform->cipher.key.data;
457                 cipher_key_len = cipher_xform->cipher.key.length;
458                 auth_key = auth_xform->auth.key.data;
459                 auth_key_len = auth_xform->auth.key.length;
460
461                 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
462                         memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
463                         lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
464                                                     aes_gcm.selector) >> 3;
465                 } else if (auth_xform->auth.algo ==
466                                 RTE_CRYPTO_AUTH_SHA256_HMAC) {
467                         memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
468                         lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
469                                                     sha2.selector) >> 3;
470                 }
471         }
472
473         if (cipher_key_len != 0)
474                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
475         else
476                 return -EINVAL;
477
478         inst.u64[7] = 0;
479         inst.egrp = OTX2_CPT_EGRP_SE;
480         inst.cptr = rte_mempool_virt2iova(sa);
481
482         lp->cpt_inst_w7 = inst.u64[7];
483         lp->ucmd_opcode = (lp->ctx_len << 8) |
484                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
485
486         set_session_misc_attributes(lp, crypto_xform,
487                                     auth_xform, cipher_xform);
488
489         if (sa->replay_win_sz) {
490                 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
491                         otx2_err("Replay window size is not supported");
492                         return -ENOTSUP;
493                 }
494                 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
495                                 0);
496                 if (sa->replay == NULL)
497                         return -ENOMEM;
498
499                 /* Set window bottom to 1, base and top to size of window */
500                 sa->replay->winb = 1;
501                 sa->replay->wint = sa->replay_win_sz;
502                 sa->replay->base = sa->replay_win_sz;
503                 sa->esn_low = 0;
504                 sa->esn_hi = 0;
505         }
506
507         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
508                                      OTX2_IPSEC_PO_WRITE_IPSEC_INB);
509 }
510
511 static int
512 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
513                                 struct rte_security_ipsec_xform *ipsec,
514                                 struct rte_crypto_sym_xform *crypto_xform,
515                                 struct rte_security_session *sess)
516 {
517         int ret;
518
519         if (crypto_dev->data->queue_pairs[0] == NULL) {
520                 otx2_err("Setup cpt queue pair before creating sec session");
521                 return -EPERM;
522         }
523
524         ret = ipsec_po_xform_verify(ipsec, crypto_xform);
525         if (ret)
526                 return ret;
527
528         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
529                 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
530                                                            crypto_xform, sess);
531         else
532                 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
533                                                             crypto_xform, sess);
534 }
535
536 static int
537 otx2_crypto_sec_session_create(void *device,
538                                struct rte_security_session_conf *conf,
539                                struct rte_security_session *sess,
540                                struct rte_mempool *mempool)
541 {
542         struct otx2_sec_session *priv;
543         int ret;
544
545         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
546                 return -ENOTSUP;
547
548         if (rte_security_dynfield_register() < 0)
549                 return -rte_errno;
550
551         if (rte_mempool_get(mempool, (void **)&priv)) {
552                 otx2_err("Could not allocate security session private data");
553                 return -ENOMEM;
554         }
555
556         set_sec_session_private_data(sess, priv);
557
558         priv->userdata = conf->userdata;
559
560         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
561                 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
562                                                       conf->crypto_xform,
563                                                       sess);
564         else
565                 ret = -ENOTSUP;
566
567         if (ret)
568                 goto mempool_put;
569
570         return 0;
571
572 mempool_put:
573         rte_mempool_put(mempool, priv);
574         set_sec_session_private_data(sess, NULL);
575         return ret;
576 }
577
578 static int
579 otx2_crypto_sec_session_destroy(void *device __rte_unused,
580                                 struct rte_security_session *sess)
581 {
582         struct otx2_sec_session *priv;
583         struct rte_mempool *sess_mp;
584
585         priv = get_sec_session_private_data(sess);
586
587         if (priv == NULL)
588                 return 0;
589
590         sess_mp = rte_mempool_from_obj(priv);
591
592         set_sec_session_private_data(sess, NULL);
593         rte_mempool_put(sess_mp, priv);
594
595         return 0;
596 }
597
598 static unsigned int
599 otx2_crypto_sec_session_get_size(void *device __rte_unused)
600 {
601         return sizeof(struct otx2_sec_session);
602 }
603
604 static int
605 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
606                               struct rte_security_session *session,
607                               struct rte_mbuf *m, void *params __rte_unused)
608 {
609         /* Set security session as the pkt metadata */
610         *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
611
612         return 0;
613 }
614
615 static int
616 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
617                              void **userdata)
618 {
619         /* Retrieve userdata  */
620         *userdata = (void *)md;
621
622         return 0;
623 }
624
625 static struct rte_security_ops otx2_crypto_sec_ops = {
626         .session_create         = otx2_crypto_sec_session_create,
627         .session_destroy        = otx2_crypto_sec_session_destroy,
628         .session_get_size       = otx2_crypto_sec_session_get_size,
629         .set_pkt_metadata       = otx2_crypto_sec_set_pkt_mdata,
630         .get_userdata           = otx2_crypto_sec_get_userdata,
631         .capabilities_get       = otx2_crypto_sec_capabilities_get
632 };
633
634 int
635 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
636 {
637         struct rte_security_ctx *ctx;
638
639         ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
640                          sizeof(struct rte_security_ctx), 0);
641
642         if (ctx == NULL)
643                 return -ENOMEM;
644
645         /* Populate ctx */
646         ctx->device = cdev;
647         ctx->ops = &otx2_crypto_sec_ops;
648         ctx->sess_cnt = 0;
649
650         cdev->security_ctx = ctx;
651
652         return 0;
653 }
654
655 void
656 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
657 {
658         rte_free(cdev->security_ctx);
659 }