crypto/cnxk: add copy and set DF
[dpdk.git] / drivers / crypto / cnxk / cn9k_ipsec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_ip.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9
10 #include "cnxk_cryptodev.h"
11 #include "cnxk_cryptodev_ops.h"
12 #include "cnxk_ipsec.h"
13 #include "cnxk_security.h"
14 #include "cn9k_ipsec.h"
15
16 #include "roc_api.h"
17
18 static inline int
19 cn9k_cpt_enq_sa_write(struct cn9k_ipsec_sa *sa, struct cnxk_cpt_qp *qp,
20                       uint8_t opcode, size_t ctx_len)
21 {
22         struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
23         uint64_t lmtline = qp->lmtline.lmt_base;
24         uint64_t io_addr = qp->lmtline.io_addr;
25         uint64_t lmt_status, time_out;
26         struct cpt_cn9k_res_s *res;
27         struct cpt_inst_s inst;
28         uint64_t *mdata;
29         int ret = 0;
30
31         if (unlikely(rte_mempool_get(qp->meta_info.pool, (void **)&mdata) < 0))
32                 return -ENOMEM;
33
34         res = (struct cpt_cn9k_res_s *)RTE_PTR_ALIGN(mdata, 16);
35         res->compcode = CPT_COMP_NOT_DONE;
36
37         inst.w4.s.opcode_major = opcode;
38         inst.w4.s.opcode_minor = ctx_len >> 3;
39         inst.w4.s.param1 = 0;
40         inst.w4.s.param2 = 0;
41         inst.w4.s.dlen = ctx_len;
42         inst.dptr = rte_mempool_virt2iova(sa);
43         inst.rptr = 0;
44         inst.w7.s.cptr = rte_mempool_virt2iova(sa);
45         inst.w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
46
47         inst.w0.u64 = 0;
48         inst.w2.u64 = 0;
49         inst.w3.u64 = 0;
50         inst.res_addr = rte_mempool_virt2iova(res);
51
52         rte_io_wmb();
53
54         do {
55                 /* Copy CPT command to LMTLINE */
56                 roc_lmt_mov((void *)lmtline, &inst, 2);
57                 lmt_status = roc_lmt_submit_ldeor(io_addr);
58         } while (lmt_status == 0);
59
60         time_out = rte_get_timer_cycles() +
61                    DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
62
63         while (res->compcode == CPT_COMP_NOT_DONE) {
64                 if (rte_get_timer_cycles() > time_out) {
65                         rte_mempool_put(qp->meta_info.pool, mdata);
66                         plt_err("Request timed out");
67                         return -ETIMEDOUT;
68                 }
69                 rte_io_rmb();
70         }
71
72         if (unlikely(res->compcode != CPT_COMP_GOOD)) {
73                 ret = res->compcode;
74                 switch (ret) {
75                 case CPT_COMP_INSTERR:
76                         plt_err("Request failed with instruction error");
77                         break;
78                 case CPT_COMP_FAULT:
79                         plt_err("Request failed with DMA fault");
80                         break;
81                 case CPT_COMP_HWERR:
82                         plt_err("Request failed with hardware error");
83                         break;
84                 default:
85                         plt_err("Request failed with unknown hardware "
86                                 "completion code : 0x%x",
87                                 ret);
88                 }
89                 ret = -EINVAL;
90                 goto mempool_put;
91         }
92
93         if (unlikely(res->uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
94                 ret = res->uc_compcode;
95                 switch (ret) {
96                 case ROC_IE_ON_AUTH_UNSUPPORTED:
97                         plt_err("Invalid auth type");
98                         break;
99                 case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
100                         plt_err("Invalid encrypt type");
101                         break;
102                 default:
103                         plt_err("Request failed with unknown microcode "
104                                 "completion code : 0x%x",
105                                 ret);
106                 }
107                 ret = -ENOTSUP;
108         }
109
110 mempool_put:
111         rte_mempool_put(qp->meta_info.pool, mdata);
112         return ret;
113 }
114
115 static inline int
116 ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
117                  struct rte_crypto_sym_xform *crypto_xform,
118                  struct roc_ie_on_sa_ctl *ctl)
119 {
120         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
121         int aes_key_len = 0;
122
123         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
124                 ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
125                 cipher_xform = crypto_xform;
126                 auth_xform = crypto_xform->next;
127         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
128                 ctl->direction = ROC_IE_SA_DIR_INBOUND;
129                 auth_xform = crypto_xform;
130                 cipher_xform = crypto_xform->next;
131         } else {
132                 return -EINVAL;
133         }
134
135         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
136                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
137                         ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
138                 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
139                         ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
140                 else
141                         return -EINVAL;
142         }
143
144         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
145                 ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
146                 ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
147         } else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
148                 ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
149         else
150                 return -EINVAL;
151
152         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
153                 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH;
154         else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
155                 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
156         else
157                 return -EINVAL;
158
159         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
160                 switch (crypto_xform->aead.algo) {
161                 case RTE_CRYPTO_AEAD_AES_GCM:
162                         ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
163                         aes_key_len = crypto_xform->aead.key.length;
164                         break;
165                 default:
166                         plt_err("Unsupported AEAD algorithm");
167                         return -ENOTSUP;
168                 }
169         } else {
170                 switch (cipher_xform->cipher.algo) {
171                 case RTE_CRYPTO_CIPHER_NULL:
172                         ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
173                         break;
174                 case RTE_CRYPTO_CIPHER_AES_CBC:
175                         ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
176                         aes_key_len = cipher_xform->cipher.key.length;
177                         break;
178                 case RTE_CRYPTO_CIPHER_AES_CTR:
179                         ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
180                         aes_key_len = cipher_xform->cipher.key.length;
181                         break;
182                 default:
183                         plt_err("Unsupported cipher algorithm");
184                         return -ENOTSUP;
185                 }
186
187                 switch (auth_xform->auth.algo) {
188                 case RTE_CRYPTO_AUTH_NULL:
189                         ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
190                         break;
191                 case RTE_CRYPTO_AUTH_MD5_HMAC:
192                         ctl->auth_type = ROC_IE_ON_SA_AUTH_MD5;
193                         break;
194                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
195                         ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
196                         break;
197                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
198                         ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_224;
199                         break;
200                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
201                         ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_256;
202                         break;
203                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
204                         ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_384;
205                         break;
206                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
207                         ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_512;
208                         break;
209                 case RTE_CRYPTO_AUTH_AES_GMAC:
210                         ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_GMAC;
211                         break;
212                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
213                         ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_XCBC_128;
214                         break;
215                 default:
216                         plt_err("Unsupported auth algorithm");
217                         return -ENOTSUP;
218                 }
219         }
220
221         /* Set AES key length */
222         if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CBC ||
223             ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CCM ||
224             ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CTR ||
225             ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
226             ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
227                 switch (aes_key_len) {
228                 case 16:
229                         ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
230                         break;
231                 case 24:
232                         ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
233                         break;
234                 case 32:
235                         ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
236                         break;
237                 default:
238                         plt_err("Invalid AES key length");
239                         return -EINVAL;
240                 }
241         }
242
243         if (ipsec->options.esn)
244                 ctl->esn_en = 1;
245
246         if (ipsec->options.udp_encap == 1)
247                 ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
248
249         ctl->copy_df = ipsec->options.copy_df;
250
251         ctl->spi = rte_cpu_to_be_32(ipsec->spi);
252
253         rte_io_wmb();
254
255         ctl->valid = 1;
256
257         return 0;
258 }
259
260 static inline int
261 fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec,
262                      struct rte_crypto_sym_xform *crypto_xform,
263                      struct roc_ie_on_common_sa *common_sa)
264 {
265         struct rte_crypto_sym_xform *cipher_xform;
266         const uint8_t *cipher_key;
267         int cipher_key_len = 0;
268         int ret;
269
270         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
271                 cipher_xform = crypto_xform->next;
272         else
273                 cipher_xform = crypto_xform;
274
275         ret = ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
276         if (ret)
277                 return ret;
278
279         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
280                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
281                         memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
282                 cipher_key = crypto_xform->aead.key.data;
283                 cipher_key_len = crypto_xform->aead.key.length;
284         } else {
285                 cipher_key = cipher_xform->cipher.key.data;
286                 cipher_key_len = cipher_xform->cipher.key.length;
287         }
288
289         if (cipher_key_len != 0)
290                 memcpy(common_sa->cipher_key, cipher_key, cipher_key_len);
291
292         return 0;
293 }
294
295 static int
296 cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
297                           struct rte_security_ipsec_xform *ipsec,
298                           struct rte_crypto_sym_xform *crypto_xform,
299                           struct rte_security_session *sec_sess)
300 {
301         struct rte_crypto_sym_xform *auth_xform = crypto_xform->next;
302         struct roc_ie_on_ip_template *template = NULL;
303         struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
304         union roc_on_ipsec_outb_param1 param1;
305         struct cnxk_cpt_inst_tmpl *inst_tmpl;
306         struct roc_ie_on_outb_sa *out_sa;
307         struct cn9k_sec_session *sess;
308         struct roc_ie_on_sa_ctl *ctl;
309         struct cn9k_ipsec_sa *sa;
310         struct rte_ipv6_hdr *ip6;
311         struct rte_ipv4_hdr *ip4;
312         const uint8_t *auth_key;
313         union cpt_inst_w4 w4;
314         union cpt_inst_w7 w7;
315         int auth_key_len = 0;
316         size_t ctx_len;
317         int ret;
318
319         sess = get_sec_session_private_data(sec_sess);
320         sa = &sess->sa;
321         out_sa = &sa->out_sa;
322         ctl = &out_sa->common_sa.ctl;
323
324         memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
325
326         /* Initialize lookaside IPsec private data */
327         sa->dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
328         /* Start ip id from 1 */
329         sa->ip_id = 1;
330         sa->seq_lo = 1;
331         sa->seq_hi = 0;
332
333         ret = fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
334         if (ret)
335                 return ret;
336
337         ret = cnxk_ipsec_outb_rlens_get(&sa->rlens, ipsec, crypto_xform);
338         if (ret)
339                 return ret;
340
341         if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
342             ctl->auth_type == ROC_IE_ON_SA_AUTH_NULL) {
343                 template = &out_sa->aes_gcm.template;
344                 ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
345         } else {
346                 switch (ctl->auth_type) {
347                 case ROC_IE_ON_SA_AUTH_SHA1:
348                         template = &out_sa->sha1.template;
349                         ctx_len = offsetof(struct roc_ie_on_outb_sa,
350                                            sha1.template);
351                         break;
352                 case ROC_IE_ON_SA_AUTH_SHA2_256:
353                 case ROC_IE_ON_SA_AUTH_SHA2_384:
354                 case ROC_IE_ON_SA_AUTH_SHA2_512:
355                         template = &out_sa->sha2.template;
356                         ctx_len = offsetof(struct roc_ie_on_outb_sa,
357                                            sha2.template);
358                         break;
359                 case ROC_IE_ON_SA_AUTH_AES_XCBC_128:
360                         template = &out_sa->aes_xcbc.template;
361                         ctx_len = offsetof(struct roc_ie_on_outb_sa,
362                                            aes_xcbc.template);
363                         break;
364                 default:
365                         plt_err("Unsupported auth algorithm");
366                         return -EINVAL;
367                 }
368         }
369
370         ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr;
371         if (ipsec->options.udp_encap) {
372                 ip4->next_proto_id = IPPROTO_UDP;
373                 template->ip4.udp_src = rte_be_to_cpu_16(4500);
374                 template->ip4.udp_dst = rte_be_to_cpu_16(4500);
375         } else {
376                 ip4->next_proto_id = IPPROTO_ESP;
377         }
378
379         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
380                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
381                         uint16_t frag_off = 0;
382                         ctx_len += sizeof(template->ip4);
383
384                         ip4->version_ihl = RTE_IPV4_VHL_DEF;
385                         ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
386                         ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
387                         if (ipsec->tunnel.ipv4.df)
388                                 frag_off |= RTE_IPV4_HDR_DF_FLAG;
389                         ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
390
391                         memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
392                                sizeof(struct in_addr));
393                         memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
394                                sizeof(struct in_addr));
395                 } else if (ipsec->tunnel.type ==
396                            RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
397                         ctx_len += sizeof(template->ip6);
398
399                         ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr;
400                         if (ipsec->options.udp_encap) {
401                                 ip6->proto = IPPROTO_UDP;
402                                 template->ip6.udp_src = rte_be_to_cpu_16(4500);
403                                 template->ip6.udp_dst = rte_be_to_cpu_16(4500);
404                         } else {
405                                 ip6->proto = (ipsec->proto ==
406                                               RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
407                                                      IPPROTO_ESP :
408                                                      IPPROTO_AH;
409                         }
410                         ip6->vtc_flow =
411                                 rte_cpu_to_be_32(0x60000000 |
412                                                  ((ipsec->tunnel.ipv6.dscp
413                                                    << RTE_IPV6_HDR_TC_SHIFT) &
414                                                   RTE_IPV6_HDR_TC_MASK) |
415                                                  ((ipsec->tunnel.ipv6.flabel
416                                                    << RTE_IPV6_HDR_FL_SHIFT) &
417                                                   RTE_IPV6_HDR_FL_MASK));
418                         ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
419                         memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
420                                sizeof(struct in6_addr));
421                         memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
422                                sizeof(struct in6_addr));
423                 }
424         } else
425                 ctx_len += sizeof(template->ip4);
426
427         ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
428
429         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
430                 sa->cipher_iv_off = crypto_xform->aead.iv.offset;
431                 sa->cipher_iv_len = crypto_xform->aead.iv.length;
432         } else {
433                 sa->cipher_iv_off = crypto_xform->cipher.iv.offset;
434                 sa->cipher_iv_len = crypto_xform->cipher.iv.length;
435
436                 auth_key = auth_xform->auth.key.data;
437                 auth_key_len = auth_xform->auth.key.length;
438
439                 switch (auth_xform->auth.algo) {
440                 case RTE_CRYPTO_AUTH_NULL:
441                         break;
442                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
443                         memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
444                         break;
445                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
446                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
447                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
448                         memcpy(out_sa->sha2.hmac_key, auth_key, auth_key_len);
449                         break;
450                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
451                         memcpy(out_sa->aes_xcbc.key, auth_key, auth_key_len);
452                         break;
453                 default:
454                         plt_err("Unsupported auth algorithm %u",
455                                 auth_xform->auth.algo);
456                         return -ENOTSUP;
457                 }
458         }
459
460         inst_tmpl = &sa->inst;
461
462         w4.u64 = 0;
463         w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC;
464         w4.s.opcode_minor = ctx_len >> 3;
465
466         param1.u16 = 0;
467         param1.s.ikev2 = 1;
468         param1.s.per_pkt_iv = 1;
469         w4.s.param1 = param1.u16;
470
471         inst_tmpl->w4 = w4.u64;
472
473         w7.u64 = 0;
474         w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
475         w7.s.cptr = rte_mempool_virt2iova(out_sa);
476         inst_tmpl->w7 = w7.u64;
477
478         return cn9k_cpt_enq_sa_write(
479                 sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND, ctx_len);
480 }
481
482 static int
483 cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
484                          struct rte_security_ipsec_xform *ipsec,
485                          struct rte_crypto_sym_xform *crypto_xform,
486                          struct rte_security_session *sec_sess)
487 {
488         struct rte_crypto_sym_xform *auth_xform = crypto_xform;
489         struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
490         union roc_on_ipsec_inb_param2 param2;
491         struct cnxk_cpt_inst_tmpl *inst_tmpl;
492         struct roc_ie_on_inb_sa *in_sa;
493         struct cn9k_sec_session *sess;
494         struct cn9k_ipsec_sa *sa;
495         const uint8_t *auth_key;
496         union cpt_inst_w4 w4;
497         union cpt_inst_w7 w7;
498         int auth_key_len = 0;
499         size_t ctx_len = 0;
500         int ret;
501
502         sess = get_sec_session_private_data(sec_sess);
503         sa = &sess->sa;
504         in_sa = &sa->in_sa;
505
506         memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
507
508         sa->dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
509         sa->replay_win_sz = ipsec->replay_win_sz;
510
511         ret = fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa);
512         if (ret)
513                 return ret;
514
515         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
516             auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL) {
517                 ctx_len = offsetof(struct roc_ie_on_inb_sa,
518                                    sha1_or_gcm.hmac_key[0]);
519         } else {
520                 auth_key = auth_xform->auth.key.data;
521                 auth_key_len = auth_xform->auth.key.length;
522
523                 switch (auth_xform->auth.algo) {
524                 case RTE_CRYPTO_AUTH_NULL:
525                         break;
526                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
527                         memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
528                                auth_key_len);
529                         ctx_len = offsetof(struct roc_ie_on_inb_sa,
530                                            sha1_or_gcm.selector);
531                         break;
532                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
533                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
534                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
535                         memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len);
536                         ctx_len = offsetof(struct roc_ie_on_inb_sa,
537                                            sha2.selector);
538                         break;
539                 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
540                         memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len);
541                         ctx_len = offsetof(struct roc_ie_on_inb_sa,
542                                            aes_xcbc.selector);
543                         break;
544                 default:
545                         plt_err("Unsupported auth algorithm %u",
546                                 auth_xform->auth.algo);
547                         return -ENOTSUP;
548                 }
549         }
550
551         inst_tmpl = &sa->inst;
552
553         w4.u64 = 0;
554         w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC;
555         w4.s.opcode_minor = ctx_len >> 3;
556
557         param2.u16 = 0;
558         param2.s.ikev2 = 1;
559         w4.s.param2 = param2.u16;
560
561         inst_tmpl->w4 = w4.u64;
562
563         w7.u64 = 0;
564         w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
565         w7.s.cptr = rte_mempool_virt2iova(in_sa);
566         inst_tmpl->w7 = w7.u64;
567
568         if (sa->replay_win_sz) {
569                 if (sa->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
570                         plt_err("Replay window size:%u is not supported",
571                                 sa->replay_win_sz);
572                         return -ENOTSUP;
573                 }
574
575                 /* Set window bottom to 1, base and top to size of window */
576                 sa->ar.winb = 1;
577                 sa->ar.wint = sa->replay_win_sz;
578                 sa->ar.base = sa->replay_win_sz;
579
580                 in_sa->common_sa.esn_low = 0;
581                 in_sa->common_sa.esn_hi = 0;
582         }
583
584         return cn9k_cpt_enq_sa_write(
585                 sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND, ctx_len);
586 }
587
588 static inline int
589 cn9k_ipsec_xform_verify(struct rte_security_ipsec_xform *ipsec,
590                         struct rte_crypto_sym_xform *crypto)
591 {
592         if (ipsec->life.bytes_hard_limit != 0 ||
593             ipsec->life.bytes_soft_limit != 0 ||
594             ipsec->life.packets_hard_limit != 0 ||
595             ipsec->life.packets_soft_limit != 0)
596                 return -ENOTSUP;
597
598         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
599                 enum rte_crypto_sym_xform_type type = crypto->type;
600
601                 if (type == RTE_CRYPTO_SYM_XFORM_AEAD) {
602                         if ((crypto->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) &&
603                             (crypto->aead.key.length == 32)) {
604                                 plt_err("Transport mode AES-256-GCM is not supported");
605                                 return -ENOTSUP;
606                         }
607                 } else {
608                         struct rte_crypto_cipher_xform *cipher;
609                         struct rte_crypto_auth_xform *auth;
610
611                         if (crypto->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
612                                 cipher = &crypto->cipher;
613                                 auth = &crypto->next->auth;
614                         } else {
615                                 cipher = &crypto->next->cipher;
616                                 auth = &crypto->auth;
617                         }
618
619                         if ((cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) &&
620                             (auth->algo == RTE_CRYPTO_AUTH_SHA256_HMAC)) {
621                                 plt_err("Transport mode AES-CBC SHA2 HMAC 256 is not supported");
622                                 return -ENOTSUP;
623                         }
624
625                         if ((cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) &&
626                             (auth->algo == RTE_CRYPTO_AUTH_SHA384_HMAC)) {
627                                 plt_err("Transport mode AES-CBC SHA2 HMAC 384 is not supported");
628                                 return -ENOTSUP;
629                         }
630
631                         if ((cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) &&
632                             (auth->algo == RTE_CRYPTO_AUTH_SHA512_HMAC)) {
633                                 plt_err("Transport mode AES-CBC SHA2 HMAC 512 is not supported");
634                                 return -ENOTSUP;
635                         }
636
637                         if ((cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) &&
638                             (auth->algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC)) {
639                                 plt_err("Transport mode AES-CBC AES-XCBC is not supported");
640                                 return -ENOTSUP;
641                         }
642                 }
643         }
644
645         return 0;
646 }
647
648 static int
649 cn9k_ipsec_session_create(void *dev,
650                           struct rte_security_ipsec_xform *ipsec_xform,
651                           struct rte_crypto_sym_xform *crypto_xform,
652                           struct rte_security_session *sess)
653 {
654         struct rte_cryptodev *crypto_dev = dev;
655         struct cnxk_cpt_qp *qp;
656         int ret;
657
658         qp = crypto_dev->data->queue_pairs[0];
659         if (qp == NULL) {
660                 plt_err("CPT queue pairs need to be setup for creating security"
661                         " session");
662                 return -EPERM;
663         }
664
665         ret = cnxk_ipsec_xform_verify(ipsec_xform, crypto_xform);
666         if (ret)
667                 return ret;
668
669         ret = cn9k_ipsec_xform_verify(ipsec_xform, crypto_xform);
670         if (ret)
671                 return ret;
672
673         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
674                 return cn9k_ipsec_inb_sa_create(qp, ipsec_xform, crypto_xform,
675                                                 sess);
676         else
677                 return cn9k_ipsec_outb_sa_create(qp, ipsec_xform, crypto_xform,
678                                                  sess);
679 }
680
681 static int
682 cn9k_sec_session_create(void *device, struct rte_security_session_conf *conf,
683                         struct rte_security_session *sess,
684                         struct rte_mempool *mempool)
685 {
686         struct cn9k_sec_session *priv;
687         int ret;
688
689         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
690                 return -EINVAL;
691
692         if (rte_mempool_get(mempool, (void **)&priv)) {
693                 plt_err("Could not allocate security session private data");
694                 return -ENOMEM;
695         }
696
697         memset(priv, 0, sizeof(*priv));
698
699         set_sec_session_private_data(sess, priv);
700
701         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
702                 ret = -ENOTSUP;
703                 goto mempool_put;
704         }
705
706         ret = cn9k_ipsec_session_create(device, &conf->ipsec,
707                                         conf->crypto_xform, sess);
708         if (ret)
709                 goto mempool_put;
710
711         return 0;
712
713 mempool_put:
714         rte_mempool_put(mempool, priv);
715         set_sec_session_private_data(sess, NULL);
716         return ret;
717 }
718
719 static int
720 cn9k_sec_session_destroy(void *device __rte_unused,
721                          struct rte_security_session *sess)
722 {
723         struct roc_ie_on_outb_sa *out_sa;
724         struct cn9k_sec_session *priv;
725         struct rte_mempool *sess_mp;
726         struct roc_ie_on_sa_ctl *ctl;
727         struct cn9k_ipsec_sa *sa;
728
729         priv = get_sec_session_private_data(sess);
730         if (priv == NULL)
731                 return 0;
732
733         sa = &priv->sa;
734         out_sa = &sa->out_sa;
735
736         ctl = &out_sa->common_sa.ctl;
737         ctl->valid = 0;
738
739         rte_io_wmb();
740
741         sess_mp = rte_mempool_from_obj(priv);
742
743         memset(priv, 0, sizeof(*priv));
744
745         set_sec_session_private_data(sess, NULL);
746         rte_mempool_put(sess_mp, priv);
747
748         return 0;
749 }
750
751 static unsigned int
752 cn9k_sec_session_get_size(void *device __rte_unused)
753 {
754         return sizeof(struct cn9k_sec_session);
755 }
756
757 /* Update platform specific security ops */
758 void
759 cn9k_sec_ops_override(void)
760 {
761         /* Update platform specific ops */
762         cnxk_sec_ops.session_create = cn9k_sec_session_create;
763         cnxk_sec_ops.session_destroy = cn9k_sec_session_destroy;
764         cnxk_sec_ops.session_get_size = cn9k_sec_session_get_size;
765 }