doc: add tested platforms with Mellanox NICs
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_ip.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
12 #include <rte_udp.h>
13
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
20
21 static int
22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23                 struct rte_crypto_sym_xform *xform,
24                 struct otx2_sec_session_ipsec_lp *lp)
25 {
26         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
27
28         lp->partial_len = 0;
29         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
30                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
31                         lp->partial_len = sizeof(struct rte_ipv4_hdr);
32                 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
33                         lp->partial_len = sizeof(struct rte_ipv6_hdr);
34                 else
35                         return -EINVAL;
36         }
37
38         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
39                 lp->partial_len += sizeof(struct rte_esp_hdr);
40                 lp->roundup_len = sizeof(struct rte_esp_tail);
41         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
42                 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
43         } else {
44                 return -EINVAL;
45         }
46
47         if (ipsec->options.udp_encap)
48                 lp->partial_len += sizeof(struct rte_udp_hdr);
49
50         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
51                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
52                         lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
53                         lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
54                         lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
55                         return 0;
56                 } else {
57                         return -EINVAL;
58                 }
59         }
60
61         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
62                 cipher_xform = xform;
63                 auth_xform = xform->next;
64         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
65                 auth_xform = xform;
66                 cipher_xform = xform->next;
67         } else {
68                 return -EINVAL;
69         }
70
71         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
72                 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
73                 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
74         } else {
75                 return -EINVAL;
76         }
77
78         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
79                 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
80         else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
81                 lp->partial_len += OTX2_SEC_SHA2_HMAC_LEN;
82         else
83                 return -EINVAL;
84
85         return 0;
86 }
87
88 static int
89 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
90                       struct otx2_cpt_qp *qptr, uint8_t opcode)
91 {
92         uint64_t lmt_status, time_out;
93         void *lmtline = qptr->lmtline;
94         struct otx2_cpt_inst_s inst;
95         struct otx2_cpt_res *res;
96         uint64_t *mdata;
97         int ret = 0;
98
99         if (unlikely(rte_mempool_get(qptr->meta_info.pool,
100                                      (void **)&mdata) < 0))
101                 return -ENOMEM;
102
103         res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
104         res->compcode = CPT_9X_COMP_E_NOTDONE;
105
106         inst.opcode = opcode | (lp->ctx_len << 8);
107         inst.param1 = 0;
108         inst.param2 = 0;
109         inst.dlen = lp->ctx_len << 3;
110         inst.dptr = rte_mempool_virt2iova(lp);
111         inst.rptr = 0;
112         inst.cptr = rte_mempool_virt2iova(lp);
113         inst.egrp  = OTX2_CPT_EGRP_SE;
114
115         inst.u64[0] = 0;
116         inst.u64[2] = 0;
117         inst.u64[3] = 0;
118         inst.res_addr = rte_mempool_virt2iova(res);
119
120         rte_io_wmb();
121
122         do {
123                 /* Copy CPT command to LMTLINE */
124                 otx2_lmt_mov(lmtline, &inst, 2);
125                 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
126         } while (lmt_status == 0);
127
128         time_out = rte_get_timer_cycles() +
129                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
130
131         while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
132                 if (rte_get_timer_cycles() > time_out) {
133                         rte_mempool_put(qptr->meta_info.pool, mdata);
134                         otx2_err("Request timed out");
135                         return -ETIMEDOUT;
136                 }
137             rte_io_rmb();
138         }
139
140         if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
141                 ret = res->compcode;
142                 switch (ret) {
143                 case CPT_9X_COMP_E_INSTERR:
144                         otx2_err("Request failed with instruction error");
145                         break;
146                 case CPT_9X_COMP_E_FAULT:
147                         otx2_err("Request failed with DMA fault");
148                         break;
149                 case CPT_9X_COMP_E_HWERR:
150                         otx2_err("Request failed with hardware error");
151                         break;
152                 default:
153                         otx2_err("Request failed with unknown hardware "
154                                  "completion code : 0x%x", ret);
155                 }
156                 goto mempool_put;
157         }
158
159         if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
160                 ret = res->uc_compcode;
161                 switch (ret) {
162                 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
163                         otx2_err("Invalid auth type");
164                         break;
165                 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
166                         otx2_err("Invalid encrypt type");
167                         break;
168                 default:
169                         otx2_err("Request failed with unknown microcode "
170                                  "completion code : 0x%x", ret);
171                 }
172         }
173
174 mempool_put:
175         rte_mempool_put(qptr->meta_info.pool, mdata);
176         return ret;
177 }
178
179 static void
180 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
181                             struct rte_crypto_sym_xform *crypto_xform,
182                             struct rte_crypto_sym_xform *auth_xform,
183                             struct rte_crypto_sym_xform *cipher_xform)
184 {
185         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
186                 sess->iv_offset = crypto_xform->aead.iv.offset;
187                 sess->iv_length = crypto_xform->aead.iv.length;
188                 sess->aad_length = crypto_xform->aead.aad_length;
189                 sess->mac_len = crypto_xform->aead.digest_length;
190         } else {
191                 sess->iv_offset = cipher_xform->cipher.iv.offset;
192                 sess->iv_length = cipher_xform->cipher.iv.length;
193                 sess->auth_iv_offset = auth_xform->auth.iv.offset;
194                 sess->auth_iv_length = auth_xform->auth.iv.length;
195                 sess->mac_len = auth_xform->auth.digest_length;
196         }
197 }
198
199 static int
200 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
201                                      struct rte_security_ipsec_xform *ipsec,
202                                      struct rte_crypto_sym_xform *crypto_xform,
203                                      struct rte_security_session *sec_sess)
204 {
205         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
206         struct otx2_ipsec_po_ip_template *template = NULL;
207         const uint8_t *cipher_key, *auth_key;
208         struct otx2_sec_session_ipsec_lp *lp;
209         struct otx2_ipsec_po_sa_ctl *ctl;
210         int cipher_key_len, auth_key_len;
211         struct otx2_ipsec_po_out_sa *sa;
212         struct otx2_sec_session *sess;
213         struct otx2_cpt_inst_s inst;
214         struct rte_ipv6_hdr *ip6;
215         struct rte_ipv4_hdr *ip;
216         int ret, ctx_len;
217
218         sess = get_sec_session_private_data(sec_sess);
219         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
220         lp = &sess->ipsec.lp;
221
222         sa = &lp->out_sa;
223         ctl = &sa->ctl;
224         if (ctl->valid) {
225                 otx2_err("SA already registered");
226                 return -EINVAL;
227         }
228
229         memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
230
231         /* Initialize lookaside ipsec private data */
232         lp->ip_id = 0;
233         lp->seq_lo = 1;
234         lp->seq_hi = 0;
235
236         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
237         if (ret)
238                 return ret;
239
240         ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
241         if (ret)
242                 return ret;
243
244         /* Start ip id from 1 */
245         lp->ip_id = 1;
246
247         if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
248                 template = &sa->aes_gcm.template;
249                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
250                                 aes_gcm.template) + sizeof(
251                                 sa->aes_gcm.template.ip4);
252                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
253                 lp->ctx_len = ctx_len >> 3;
254         } else if (ctl->auth_type ==
255                         OTX2_IPSEC_PO_SA_AUTH_SHA1) {
256                 template = &sa->sha1.template;
257                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
258                                 sha1.template) + sizeof(
259                                 sa->sha1.template.ip4);
260                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
261                 lp->ctx_len = ctx_len >> 3;
262         } else if (ctl->auth_type ==
263                         OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
264                 template = &sa->sha2.template;
265                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
266                                 sha2.template) + sizeof(
267                                 sa->sha2.template.ip4);
268                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
269                 lp->ctx_len = ctx_len >> 3;
270         } else {
271                 return -EINVAL;
272         }
273         ip = &template->ip4.ipv4_hdr;
274         if (ipsec->options.udp_encap) {
275                 ip->next_proto_id = IPPROTO_UDP;
276                 template->ip4.udp_src = rte_be_to_cpu_16(4500);
277                 template->ip4.udp_dst = rte_be_to_cpu_16(4500);
278         } else {
279                 ip->next_proto_id = IPPROTO_ESP;
280         }
281
282         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
283                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
284                         ip->version_ihl = RTE_IPV4_VHL_DEF;
285                         ip->time_to_live = ipsec->tunnel.ipv4.ttl;
286                         ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
287                         if (ipsec->tunnel.ipv4.df)
288                                 ip->fragment_offset = BIT(14);
289                         memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
290                                 sizeof(struct in_addr));
291                         memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
292                                 sizeof(struct in_addr));
293                 } else if (ipsec->tunnel.type ==
294                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
295
296                         if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
297                                 template = &sa->aes_gcm.template;
298                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
299                                                 aes_gcm.template) + sizeof(
300                                                 sa->aes_gcm.template.ip6);
301                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
302                                 lp->ctx_len = ctx_len >> 3;
303                         } else if (ctl->auth_type ==
304                                         OTX2_IPSEC_PO_SA_AUTH_SHA1) {
305                                 template = &sa->sha1.template;
306                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
307                                                 sha1.template) + sizeof(
308                                                 sa->sha1.template.ip6);
309                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
310                                 lp->ctx_len = ctx_len >> 3;
311                         } else if (ctl->auth_type ==
312                                         OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
313                                 template = &sa->sha2.template;
314                                 ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
315                                                 sha2.template) + sizeof(
316                                                 sa->sha2.template.ip6);
317                                 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
318                                 lp->ctx_len = ctx_len >> 3;
319                         } else {
320                                 return -EINVAL;
321                         }
322
323                         ip6 = &template->ip6.ipv6_hdr;
324                         if (ipsec->options.udp_encap) {
325                                 ip6->proto = IPPROTO_UDP;
326                                 template->ip6.udp_src = rte_be_to_cpu_16(4500);
327                                 template->ip6.udp_dst = rte_be_to_cpu_16(4500);
328                         } else {
329                                 ip6->proto = (ipsec->proto ==
330                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
331                                         IPPROTO_ESP : IPPROTO_AH;
332                         }
333                         ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
334                                 ((ipsec->tunnel.ipv6.dscp <<
335                                         RTE_IPV6_HDR_TC_SHIFT) &
336                                         RTE_IPV6_HDR_TC_MASK) |
337                                 ((ipsec->tunnel.ipv6.flabel <<
338                                         RTE_IPV6_HDR_FL_SHIFT) &
339                                         RTE_IPV6_HDR_FL_MASK));
340                         ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
341                         memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
342                                 sizeof(struct in6_addr));
343                         memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
344                                 sizeof(struct in6_addr));
345                 }
346         }
347
348         cipher_xform = crypto_xform;
349         auth_xform = crypto_xform->next;
350
351         cipher_key_len = 0;
352         auth_key_len = 0;
353
354         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
355                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
356                         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
357                 cipher_key = crypto_xform->aead.key.data;
358                 cipher_key_len = crypto_xform->aead.key.length;
359         } else {
360                 cipher_key = cipher_xform->cipher.key.data;
361                 cipher_key_len = cipher_xform->cipher.key.length;
362                 auth_key = auth_xform->auth.key.data;
363                 auth_key_len = auth_xform->auth.key.length;
364
365                 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
366                         memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
367                 else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
368                         memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
369         }
370
371         if (cipher_key_len != 0)
372                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
373         else
374                 return -EINVAL;
375
376         inst.u64[7] = 0;
377         inst.egrp = OTX2_CPT_EGRP_SE;
378         inst.cptr = rte_mempool_virt2iova(sa);
379
380         lp->cpt_inst_w7 = inst.u64[7];
381         lp->ucmd_opcode = (lp->ctx_len << 8) |
382                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
383
384         /* Set per packet IV and IKEv2 bits */
385         lp->ucmd_param1 = BIT(11) | BIT(9);
386         lp->ucmd_param2 = 0;
387
388         set_session_misc_attributes(lp, crypto_xform,
389                                     auth_xform, cipher_xform);
390
391         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
392                                      OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
393 }
394
395 static int
396 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
397                                     struct rte_security_ipsec_xform *ipsec,
398                                     struct rte_crypto_sym_xform *crypto_xform,
399                                     struct rte_security_session *sec_sess)
400 {
401         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
402         const uint8_t *cipher_key, *auth_key;
403         struct otx2_sec_session_ipsec_lp *lp;
404         struct otx2_ipsec_po_sa_ctl *ctl;
405         int cipher_key_len, auth_key_len;
406         struct otx2_ipsec_po_in_sa *sa;
407         struct otx2_sec_session *sess;
408         struct otx2_cpt_inst_s inst;
409         int ret;
410
411         sess = get_sec_session_private_data(sec_sess);
412         sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
413         lp = &sess->ipsec.lp;
414
415         sa = &lp->in_sa;
416         ctl = &sa->ctl;
417
418         if (ctl->valid) {
419                 otx2_err("SA already registered");
420                 return -EINVAL;
421         }
422
423         memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
424         sa->replay_win_sz = ipsec->replay_win_sz;
425
426         ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
427         if (ret)
428                 return ret;
429
430         auth_xform = crypto_xform;
431         cipher_xform = crypto_xform->next;
432
433         cipher_key_len = 0;
434         auth_key_len = 0;
435
436         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
437                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
438                         memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
439                 cipher_key = crypto_xform->aead.key.data;
440                 cipher_key_len = crypto_xform->aead.key.length;
441
442                 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
443                                             aes_gcm.hmac_key[0]) >> 3;
444                 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
445         } else {
446                 cipher_key = cipher_xform->cipher.key.data;
447                 cipher_key_len = cipher_xform->cipher.key.length;
448                 auth_key = auth_xform->auth.key.data;
449                 auth_key_len = auth_xform->auth.key.length;
450
451                 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
452                         memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
453                         lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
454                                                     aes_gcm.selector) >> 3;
455                 } else if (auth_xform->auth.algo ==
456                                 RTE_CRYPTO_AUTH_SHA256_HMAC) {
457                         memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
458                         lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
459                                                     sha2.selector) >> 3;
460                 }
461         }
462
463         if (cipher_key_len != 0)
464                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
465         else
466                 return -EINVAL;
467
468         inst.u64[7] = 0;
469         inst.egrp = OTX2_CPT_EGRP_SE;
470         inst.cptr = rte_mempool_virt2iova(sa);
471
472         lp->cpt_inst_w7 = inst.u64[7];
473         lp->ucmd_opcode = (lp->ctx_len << 8) |
474                                 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
475         lp->ucmd_param1 = 0;
476
477         /* Set IKEv2 bit */
478         lp->ucmd_param2 = BIT(12);
479
480         set_session_misc_attributes(lp, crypto_xform,
481                                     auth_xform, cipher_xform);
482
483         if (sa->replay_win_sz) {
484                 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
485                         otx2_err("Replay window size is not supported");
486                         return -ENOTSUP;
487                 }
488                 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
489                                 0);
490                 if (sa->replay == NULL)
491                         return -ENOMEM;
492
493                 /* Set window bottom to 1, base and top to size of window */
494                 sa->replay->winb = 1;
495                 sa->replay->wint = sa->replay_win_sz;
496                 sa->replay->base = sa->replay_win_sz;
497                 sa->esn_low = 0;
498                 sa->esn_hi = 0;
499         }
500
501         return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
502                                      OTX2_IPSEC_PO_WRITE_IPSEC_INB);
503 }
504
505 static int
506 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
507                                 struct rte_security_ipsec_xform *ipsec,
508                                 struct rte_crypto_sym_xform *crypto_xform,
509                                 struct rte_security_session *sess)
510 {
511         int ret;
512
513         if (crypto_dev->data->queue_pairs[0] == NULL) {
514                 otx2_err("Setup cpt queue pair before creating sec session");
515                 return -EPERM;
516         }
517
518         ret = ipsec_po_xform_verify(ipsec, crypto_xform);
519         if (ret)
520                 return ret;
521
522         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
523                 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
524                                                            crypto_xform, sess);
525         else
526                 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
527                                                             crypto_xform, sess);
528 }
529
530 static int
531 otx2_crypto_sec_session_create(void *device,
532                                struct rte_security_session_conf *conf,
533                                struct rte_security_session *sess,
534                                struct rte_mempool *mempool)
535 {
536         struct otx2_sec_session *priv;
537         int ret;
538
539         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
540                 return -ENOTSUP;
541
542         if (rte_security_dynfield_register() < 0)
543                 return -rte_errno;
544
545         if (rte_mempool_get(mempool, (void **)&priv)) {
546                 otx2_err("Could not allocate security session private data");
547                 return -ENOMEM;
548         }
549
550         set_sec_session_private_data(sess, priv);
551
552         priv->userdata = conf->userdata;
553
554         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
555                 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
556                                                       conf->crypto_xform,
557                                                       sess);
558         else
559                 ret = -ENOTSUP;
560
561         if (ret)
562                 goto mempool_put;
563
564         return 0;
565
566 mempool_put:
567         rte_mempool_put(mempool, priv);
568         set_sec_session_private_data(sess, NULL);
569         return ret;
570 }
571
572 static int
573 otx2_crypto_sec_session_destroy(void *device __rte_unused,
574                                 struct rte_security_session *sess)
575 {
576         struct otx2_sec_session *priv;
577         struct rte_mempool *sess_mp;
578
579         priv = get_sec_session_private_data(sess);
580
581         if (priv == NULL)
582                 return 0;
583
584         sess_mp = rte_mempool_from_obj(priv);
585
586         memset(priv, 0, sizeof(*priv));
587
588         set_sec_session_private_data(sess, NULL);
589         rte_mempool_put(sess_mp, priv);
590
591         return 0;
592 }
593
594 static unsigned int
595 otx2_crypto_sec_session_get_size(void *device __rte_unused)
596 {
597         return sizeof(struct otx2_sec_session);
598 }
599
600 static int
601 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
602                               struct rte_security_session *session,
603                               struct rte_mbuf *m, void *params __rte_unused)
604 {
605         /* Set security session as the pkt metadata */
606         *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
607
608         return 0;
609 }
610
611 static int
612 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
613                              void **userdata)
614 {
615         /* Retrieve userdata  */
616         *userdata = (void *)md;
617
618         return 0;
619 }
620
621 static struct rte_security_ops otx2_crypto_sec_ops = {
622         .session_create         = otx2_crypto_sec_session_create,
623         .session_destroy        = otx2_crypto_sec_session_destroy,
624         .session_get_size       = otx2_crypto_sec_session_get_size,
625         .set_pkt_metadata       = otx2_crypto_sec_set_pkt_mdata,
626         .get_userdata           = otx2_crypto_sec_get_userdata,
627         .capabilities_get       = otx2_crypto_sec_capabilities_get
628 };
629
630 int
631 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
632 {
633         struct rte_security_ctx *ctx;
634
635         ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
636                          sizeof(struct rte_security_ctx), 0);
637
638         if (ctx == NULL)
639                 return -ENOMEM;
640
641         /* Populate ctx */
642         ctx->device = cdev;
643         ctx->ops = &otx2_crypto_sec_ops;
644         ctx->sess_cnt = 0;
645
646         cdev->security_ctx = ctx;
647
648         return 0;
649 }
650
651 void
652 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
653 {
654         rte_free(cdev->security_ctx);
655 }