common/mlx5: fix device list operations concurrency
[dpdk.git] / drivers / common / cnxk / cnxk_security.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_udp.h>
6
7 #include "cnxk_security.h"
8
9 #include "roc_api.h"
10
11 static void
12 ipsec_hmac_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform,
13                          uint8_t *hmac_opad_ipad)
14 {
15         const uint8_t *key = auth_xform->auth.key.data;
16         uint32_t length = auth_xform->auth.key.length;
17         uint8_t opad[128] = {[0 ... 127] = 0x5c};
18         uint8_t ipad[128] = {[0 ... 127] = 0x36};
19         uint32_t i;
20
21         /* HMAC OPAD and IPAD */
22         for (i = 0; i < 127 && i < length; i++) {
23                 opad[i] = opad[i] ^ key[i];
24                 ipad[i] = ipad[i] ^ key[i];
25         }
26
27         /* Precompute hash of HMAC OPAD and IPAD to avoid
28          * per packet computation
29          */
30         switch (auth_xform->auth.algo) {
31         case RTE_CRYPTO_AUTH_SHA1_HMAC:
32                 roc_hash_sha1_gen(opad, (uint32_t *)&hmac_opad_ipad[0]);
33                 roc_hash_sha1_gen(ipad, (uint32_t *)&hmac_opad_ipad[24]);
34                 break;
35         default:
36                 break;
37         }
38 }
39
40 static int
41 ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
42                               uint8_t *cipher_key, uint8_t *salt_key,
43                               uint8_t *hmac_opad_ipad,
44                               struct rte_security_ipsec_xform *ipsec_xfrm,
45                               struct rte_crypto_sym_xform *crypto_xfrm)
46 {
47         struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
48         const uint8_t *key;
49         uint32_t *tmp_salt;
50         uint64_t *tmp_key;
51         int length, i;
52
53         /* Set direction */
54         switch (ipsec_xfrm->direction) {
55         case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
56                 w2->s.dir = ROC_IE_SA_DIR_INBOUND;
57                 auth_xfrm = crypto_xfrm;
58                 cipher_xfrm = crypto_xfrm->next;
59                 break;
60         case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
61                 w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
62                 cipher_xfrm = crypto_xfrm;
63                 auth_xfrm = crypto_xfrm->next;
64                 break;
65         default:
66                 return -EINVAL;
67         }
68
69         /* Set protocol - ESP vs AH */
70         switch (ipsec_xfrm->proto) {
71         case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
72                 w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
73                 break;
74         case RTE_SECURITY_IPSEC_SA_PROTO_AH:
75                 w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
76                 break;
77         default:
78                 return -EINVAL;
79         }
80
81         /* Set mode - transport vs tunnel */
82         switch (ipsec_xfrm->mode) {
83         case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
84                 w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
85                 break;
86         case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
87                 w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
88                 break;
89         default:
90                 return -EINVAL;
91         }
92
93         /* Set encryption algorithm */
94         if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
95                 key = crypto_xfrm->aead.key.data;
96                 length = crypto_xfrm->aead.key.length;
97
98                 switch (crypto_xfrm->aead.algo) {
99                 case RTE_CRYPTO_AEAD_AES_GCM:
100                         w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_GCM;
101                         w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
102                         memcpy(salt_key, &ipsec_xfrm->salt, 4);
103                         tmp_salt = (uint32_t *)salt_key;
104                         *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
105                         break;
106                 default:
107                         return -ENOTSUP;
108                 }
109         } else {
110                 switch (cipher_xfrm->cipher.algo) {
111                 case RTE_CRYPTO_CIPHER_AES_CBC:
112                         w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_CBC;
113                         break;
114                 default:
115                         return -ENOTSUP;
116                 }
117
118                 switch (auth_xfrm->auth.algo) {
119                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
120                         w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA1;
121                         break;
122                 default:
123                         return -ENOTSUP;
124                 }
125
126                 key = cipher_xfrm->cipher.key.data;
127                 length = cipher_xfrm->cipher.key.length;
128
129                 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
130
131                 tmp_key = (uint64_t *)hmac_opad_ipad;
132                 for (i = 0;
133                      i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t));
134                      i++)
135                         tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
136         }
137
138         /* Set encapsulation type */
139         if (ipsec_xfrm->options.udp_encap)
140                 w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
141
142         w2->s.spi = ipsec_xfrm->spi;
143
144         /* Copy encryption key */
145         memcpy(cipher_key, key, length);
146         tmp_key = (uint64_t *)cipher_key;
147         for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
148                 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
149
150         switch (length) {
151         case ROC_CPT_AES128_KEY_LEN:
152                 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
153                 break;
154         case ROC_CPT_AES192_KEY_LEN:
155                 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
156                 break;
157         case ROC_CPT_AES256_KEY_LEN:
158                 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
159                 break;
160         default:
161                 return -EINVAL;
162         }
163
164         if (ipsec_xfrm->life.packets_soft_limit != 0 ||
165             ipsec_xfrm->life.packets_hard_limit != 0) {
166                 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
167                     ipsec_xfrm->life.bytes_hard_limit != 0) {
168                         plt_err("Expiry tracking with both packets & bytes is not supported");
169                         return -EINVAL;
170                 }
171                 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
172         }
173
174         if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
175             ipsec_xfrm->life.bytes_hard_limit != 0) {
176                 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
177                     ipsec_xfrm->life.packets_hard_limit != 0) {
178                         plt_err("Expiry tracking with both packets & bytes is not supported");
179                         return -EINVAL;
180                 }
181                 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
182         }
183
184         return 0;
185 }
186
187 static size_t
188 ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
189 {
190         size_t size;
191
192         /* Variable based on Anti-replay Window */
193         size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) +
194                offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits);
195
196         if (sa->w0.s.ar_win)
197                 size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
198
199         return size;
200 }
201
202 static int
203 ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
204                              struct rte_security_ipsec_xform *ipsec_xfrm)
205 {
206         struct rte_security_ipsec_tunnel_param *tunnel;
207
208         if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
209                 return 0;
210
211         if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
212                 return 0;
213
214         tunnel = &ipsec_xfrm->tunnel;
215
216         switch (tunnel->type) {
217         case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
218                 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
219                 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
220                        sizeof(struct in_addr));
221                 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
222                        sizeof(struct in_addr));
223
224                 /* IP Source and Dest are in LE/CPU endian */
225                 sa->outer_hdr.ipv4.src_addr =
226                         rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
227                 sa->outer_hdr.ipv4.dst_addr =
228                         rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
229
230                 break;
231         case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
232                 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
233                 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
234                        sizeof(struct in6_addr));
235                 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
236                        sizeof(struct in6_addr));
237
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         switch (ipsec_xfrm->options.tunnel_hdr_verify) {
244         case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
245                 sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
246                 break;
247         case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
248                 sa->w2.s.ip_hdr_verify =
249                         ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
250                 break;
251         default:
252                 return -ENOTSUP;
253         }
254
255         return 0;
256 }
257
258 int
259 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
260                           struct rte_security_ipsec_xform *ipsec_xfrm,
261                           struct rte_crypto_sym_xform *crypto_xfrm)
262 {
263         union roc_ot_ipsec_sa_word2 w2;
264         uint32_t replay_win_sz;
265         size_t offset;
266         int rc;
267
268         w2.u64 = 0;
269         rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
270                                            sa->hmac_opad_ipad, ipsec_xfrm,
271                                            crypto_xfrm);
272         if (rc)
273                 return rc;
274
275         /* Updata common word2 data */
276         sa->w2.u64 = w2.u64;
277
278         /* Only support power-of-two window sizes supported */
279         replay_win_sz = ipsec_xfrm->replay_win_sz;
280         if (replay_win_sz) {
281                 if (!rte_is_power_of_2(replay_win_sz) ||
282                     replay_win_sz > ROC_AR_WIN_SIZE_MAX)
283                         return -ENOTSUP;
284
285                 sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
286         }
287
288         rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
289         if (rc)
290                 return rc;
291
292         /* Default options for pkt_out and pkt_fmt are with
293          * second pass meta and no defrag.
294          */
295         sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
296         sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
297         sa->w0.s.pkind = ROC_OT_CPT_META_PKIND;
298
299         /* ESN */
300         sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
301         if (ipsec_xfrm->options.udp_encap) {
302                 sa->w10.s.udp_src_port = 4500;
303                 sa->w10.s.udp_dst_port = 4500;
304         }
305
306         offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
307         /* Word offset for HW managed SA field */
308         sa->w0.s.hw_ctx_off = offset / 8;
309         /* Context push size for inbound spans up to hw_ctx including
310          * ar_base field, in 8b units
311          */
312         sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
313         /* Entire context size in 128B units */
314         sa->w0.s.ctx_size =
315                 (PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) /
316                  ROC_CTX_UNIT_128B) -
317                 1;
318
319         /**
320          * CPT MC triggers expiry when counter value changes from 2 to 1. To
321          * mitigate this behaviour add 1 to the life counter values provided.
322          */
323
324         if (ipsec_xfrm->life.bytes_soft_limit) {
325                 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
326                 sa->w0.s.soft_life_dec = 1;
327         }
328
329         if (ipsec_xfrm->life.packets_soft_limit) {
330                 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
331                 sa->w0.s.soft_life_dec = 1;
332         }
333
334         if (ipsec_xfrm->life.bytes_hard_limit) {
335                 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
336                 sa->w0.s.hard_life_dec = 1;
337         }
338
339         if (ipsec_xfrm->life.packets_hard_limit) {
340                 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
341                 sa->w0.s.hard_life_dec = 1;
342         }
343
344         /* There are two words of CPT_CTX_HW_S for ucode to skip */
345         sa->w0.s.ctx_hdr_size = 1;
346         sa->w0.s.aop_valid = 1;
347
348         rte_wmb();
349
350         /* Enable SA */
351         sa->w2.s.valid = 1;
352         return 0;
353 }
354
355 int
356 cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
357                            struct rte_security_ipsec_xform *ipsec_xfrm,
358                            struct rte_crypto_sym_xform *crypto_xfrm)
359 {
360         struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
361         union roc_ot_ipsec_sa_word2 w2;
362         size_t offset;
363         int rc;
364
365         w2.u64 = 0;
366         rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt,
367                                            sa->hmac_opad_ipad, ipsec_xfrm,
368                                            crypto_xfrm);
369         if (rc)
370                 return rc;
371
372         /* Update common word2 data */
373         sa->w2.u64 = w2.u64;
374
375         if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
376                 goto skip_tunnel_info;
377
378         /* Tunnel header info */
379         switch (tunnel->type) {
380         case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
381                 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
382                 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
383                        sizeof(struct in_addr));
384                 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
385                        sizeof(struct in_addr));
386
387                 /* IP Source and Dest seems to be in LE/CPU endian */
388                 sa->outer_hdr.ipv4.src_addr =
389                         rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
390                 sa->outer_hdr.ipv4.dst_addr =
391                         rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
392
393                 /* Outer header DF bit source */
394                 if (!ipsec_xfrm->options.copy_df) {
395                         sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
396                                 ROC_IE_OT_SA_COPY_FROM_SA;
397                         sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
398                 } else {
399                         sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
400                                 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
401                 }
402
403                 /* Outer header DSCP source */
404                 if (!ipsec_xfrm->options.copy_dscp) {
405                         sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
406                         sa->w10.s.dscp = tunnel->ipv4.dscp;
407                 } else {
408                         sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
409                 }
410                 break;
411         case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
412                 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
413                 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
414                        sizeof(struct in6_addr));
415                 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
416                        sizeof(struct in6_addr));
417
418                 /* Outer header flow label source */
419                 if (!ipsec_xfrm->options.copy_flabel) {
420                         sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
421                                 ROC_IE_OT_SA_COPY_FROM_SA;
422
423                         sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
424                 } else {
425                         sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
426                                 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
427                 }
428
429                 /* Outer header DSCP source */
430                 if (!ipsec_xfrm->options.copy_dscp) {
431                         sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
432                         sa->w10.s.dscp = tunnel->ipv6.dscp;
433                 } else {
434                         sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
435                 }
436                 break;
437         default:
438                 return -EINVAL;
439         }
440
441         /* Default options of DSCP and Flow label/DF */
442         sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
443         sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
444
445 skip_tunnel_info:
446         /* ESN */
447         sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
448
449         if (ipsec_xfrm->options.udp_encap) {
450                 sa->w10.s.udp_src_port = 4500;
451                 sa->w10.s.udp_dst_port = 4500;
452         }
453
454         offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
455         /* Word offset for HW managed SA field */
456         sa->w0.s.hw_ctx_off = offset / 8;
457         /* Context push size is up to hmac_opad_ipad */
458         sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off;
459         /* Entire context size in 128B units */
460         offset = sizeof(struct roc_ot_ipsec_outb_sa);
461         sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) /
462                              ROC_CTX_UNIT_128B) -
463                             1;
464
465         /* IPID gen */
466         sa->w2.s.ipid_gen = 1;
467
468         /**
469          * CPT MC triggers expiry when counter value changes from 2 to 1. To
470          * mitigate this behaviour add 1 to the life counter values provided.
471          */
472
473         if (ipsec_xfrm->life.bytes_soft_limit) {
474                 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
475                 sa->w0.s.soft_life_dec = 1;
476         }
477
478         if (ipsec_xfrm->life.packets_soft_limit) {
479                 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
480                 sa->w0.s.soft_life_dec = 1;
481         }
482
483         if (ipsec_xfrm->life.bytes_hard_limit) {
484                 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
485                 sa->w0.s.hard_life_dec = 1;
486         }
487
488         if (ipsec_xfrm->life.packets_hard_limit) {
489                 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
490                 sa->w0.s.hard_life_dec = 1;
491         }
492
493         /* There are two words of CPT_CTX_HW_S for ucode to skip */
494         sa->w0.s.ctx_hdr_size = 1;
495         sa->w0.s.aop_valid = 1;
496
497         rte_wmb();
498
499         /* Enable SA */
500         sa->w2.s.valid = 1;
501         return 0;
502 }
503
504 bool
505 cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa)
506 {
507         return !!sa->w2.s.valid;
508 }
509
510 bool
511 cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
512 {
513         return !!sa->w2.s.valid;
514 }
515
516 uint8_t
517 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
518                      enum rte_crypto_auth_algorithm a_algo,
519                      enum rte_crypto_aead_algorithm aead_algo)
520 {
521         uint8_t ivlen = 0;
522
523         if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
524                 ivlen = 8;
525
526         switch (c_algo) {
527         case RTE_CRYPTO_CIPHER_AES_CTR:
528                 ivlen = 8;
529                 break;
530         case RTE_CRYPTO_CIPHER_3DES_CBC:
531                 ivlen = ROC_CPT_DES_BLOCK_LENGTH;
532                 break;
533         case RTE_CRYPTO_CIPHER_AES_CBC:
534                 ivlen = ROC_CPT_AES_BLOCK_LENGTH;
535                 break;
536         default:
537                 break;
538         }
539
540         switch (a_algo) {
541         case RTE_CRYPTO_AUTH_AES_GMAC:
542                 ivlen = 8;
543                 break;
544         default:
545                 break;
546         }
547
548         return ivlen;
549 }
550
551 uint8_t
552 cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
553                       enum rte_crypto_auth_algorithm a_algo,
554                       enum rte_crypto_aead_algorithm aead_algo)
555 {
556         uint8_t icv = 0;
557
558         (void)c_algo;
559
560         switch (a_algo) {
561         case RTE_CRYPTO_AUTH_NULL:
562                 icv = 0;
563                 break;
564         case RTE_CRYPTO_AUTH_SHA1_HMAC:
565                 icv = 12;
566                 break;
567         case RTE_CRYPTO_AUTH_SHA256_HMAC:
568         case RTE_CRYPTO_AUTH_AES_GMAC:
569                 icv = 16;
570                 break;
571         case RTE_CRYPTO_AUTH_SHA384_HMAC:
572                 icv = 24;
573                 break;
574         case RTE_CRYPTO_AUTH_SHA512_HMAC:
575                 icv = 32;
576                 break;
577         default:
578                 break;
579         }
580
581         switch (aead_algo) {
582         case RTE_CRYPTO_AEAD_AES_GCM:
583                 icv = 16;
584                 break;
585         default:
586                 break;
587         }
588
589         return icv;
590 }
591
592 uint8_t
593 cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
594                              enum rte_crypto_aead_algorithm aead_algo)
595 {
596         uint8_t roundup_byte = 4;
597
598         if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
599                 return roundup_byte;
600
601         switch (c_algo) {
602         case RTE_CRYPTO_CIPHER_AES_CTR:
603                 roundup_byte = 4;
604                 break;
605         case RTE_CRYPTO_CIPHER_AES_CBC:
606                 roundup_byte = 16;
607                 break;
608         case RTE_CRYPTO_CIPHER_3DES_CBC:
609                 roundup_byte = 8;
610                 break;
611         case RTE_CRYPTO_CIPHER_NULL:
612                 roundup_byte = 4;
613                 break;
614         default:
615                 break;
616         }
617
618         return roundup_byte;
619 }
620
621 int
622 cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
623                           struct rte_security_ipsec_xform *ipsec_xfrm,
624                           struct rte_crypto_sym_xform *crypto_xfrm)
625 {
626         struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
627         enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
628         enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
629         enum rte_crypto_aead_algorithm aead_algo = 0;
630         uint16_t partial_len = 0;
631         uint8_t roundup_byte = 0;
632         int8_t roundup_len = 0;
633
634         memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens));
635
636         /* Get Cipher and Auth algo */
637         if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
638                 aead_algo = crypto_xfrm->aead.algo;
639         } else {
640                 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
641                         c_algo = crypto_xfrm->cipher.algo;
642                 else
643                         a_algo = crypto_xfrm->auth.algo;
644
645                 if (crypto_xfrm->next) {
646                         if (crypto_xfrm->next->type ==
647                             RTE_CRYPTO_SYM_XFORM_CIPHER)
648                                 c_algo = crypto_xfrm->next->cipher.algo;
649                         else
650                                 a_algo = crypto_xfrm->next->auth.algo;
651                 }
652         }
653
654         if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
655                 partial_len = ROC_CPT_ESP_HDR_LEN;
656                 roundup_len = ROC_CPT_ESP_TRL_LEN;
657         } else {
658                 partial_len = ROC_CPT_AH_HDR_LEN;
659         }
660
661         if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
662                 if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
663                         partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN;
664                 else
665                         partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN;
666         }
667
668         partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo);
669         partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo);
670         roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo);
671
672         if (ipsec_xfrm->options.udp_encap)
673                 partial_len += sizeof(struct rte_udp_hdr);
674
675         rlens->partial_len = partial_len;
676         rlens->roundup_len = roundup_len;
677         rlens->roundup_byte = roundup_byte;
678         rlens->max_extended_len = partial_len + roundup_len + roundup_byte;
679         return 0;
680 }