1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
7 #include "cnxk_security.h"
12 ipsec_hmac_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform,
13 uint8_t *hmac_opad_ipad)
15 const uint8_t *key = auth_xform->auth.key.data;
16 uint32_t length = auth_xform->auth.key.length;
17 uint8_t opad[128] = {[0 ... 127] = 0x5c};
18 uint8_t ipad[128] = {[0 ... 127] = 0x36};
21 /* HMAC OPAD and IPAD */
22 for (i = 0; i < 127 && i < length; i++) {
23 opad[i] = opad[i] ^ key[i];
24 ipad[i] = ipad[i] ^ key[i];
27 /* Precompute hash of HMAC OPAD and IPAD to avoid
28 * per packet computation
30 switch (auth_xform->auth.algo) {
31 case RTE_CRYPTO_AUTH_SHA1_HMAC:
32 roc_hash_sha1_gen(opad, (uint32_t *)&hmac_opad_ipad[0]);
33 roc_hash_sha1_gen(ipad, (uint32_t *)&hmac_opad_ipad[24]);
41 ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
42 uint8_t *cipher_key, uint8_t *salt_key,
43 uint8_t *hmac_opad_ipad,
44 struct rte_security_ipsec_xform *ipsec_xfrm,
45 struct rte_crypto_sym_xform *crypto_xfrm)
47 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
54 switch (ipsec_xfrm->direction) {
55 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
56 w2->s.dir = ROC_IE_SA_DIR_INBOUND;
57 auth_xfrm = crypto_xfrm;
58 cipher_xfrm = crypto_xfrm->next;
60 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
61 w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
62 cipher_xfrm = crypto_xfrm;
63 auth_xfrm = crypto_xfrm->next;
69 /* Set protocol - ESP vs AH */
70 switch (ipsec_xfrm->proto) {
71 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
72 w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
74 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
75 w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
81 /* Set mode - transport vs tunnel */
82 switch (ipsec_xfrm->mode) {
83 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
84 w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
86 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
87 w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
93 /* Set encryption algorithm */
94 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
95 key = crypto_xfrm->aead.key.data;
96 length = crypto_xfrm->aead.key.length;
98 switch (crypto_xfrm->aead.algo) {
99 case RTE_CRYPTO_AEAD_AES_GCM:
100 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_GCM;
101 w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
102 memcpy(salt_key, &ipsec_xfrm->salt, 4);
103 tmp_salt = (uint32_t *)salt_key;
104 *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
110 switch (cipher_xfrm->cipher.algo) {
111 case RTE_CRYPTO_CIPHER_AES_CBC:
112 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_CBC;
118 switch (auth_xfrm->auth.algo) {
119 case RTE_CRYPTO_AUTH_NULL:
120 w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
122 case RTE_CRYPTO_AUTH_SHA1_HMAC:
123 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA1;
124 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
126 tmp_key = (uint64_t *)hmac_opad_ipad;
127 for (i = 0; i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN /
130 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
136 key = cipher_xfrm->cipher.key.data;
137 length = cipher_xfrm->cipher.key.length;
140 /* Set encapsulation type */
141 if (ipsec_xfrm->options.udp_encap)
142 w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
144 w2->s.spi = ipsec_xfrm->spi;
146 /* Copy encryption key */
147 memcpy(cipher_key, key, length);
148 tmp_key = (uint64_t *)cipher_key;
149 for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
150 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
153 case ROC_CPT_AES128_KEY_LEN:
154 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
156 case ROC_CPT_AES192_KEY_LEN:
157 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
159 case ROC_CPT_AES256_KEY_LEN:
160 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
166 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
167 ipsec_xfrm->life.packets_hard_limit != 0) {
168 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
169 ipsec_xfrm->life.bytes_hard_limit != 0) {
170 plt_err("Expiry tracking with both packets & bytes is not supported");
173 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
176 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
177 ipsec_xfrm->life.bytes_hard_limit != 0) {
178 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
179 ipsec_xfrm->life.packets_hard_limit != 0) {
180 plt_err("Expiry tracking with both packets & bytes is not supported");
183 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
190 ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
194 /* Variable based on Anti-replay Window */
195 size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) +
196 offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits);
199 size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
205 ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
206 struct rte_security_ipsec_xform *ipsec_xfrm)
208 struct rte_security_ipsec_tunnel_param *tunnel;
210 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
213 if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
216 tunnel = &ipsec_xfrm->tunnel;
218 switch (tunnel->type) {
219 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
220 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
221 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
222 sizeof(struct in_addr));
223 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
224 sizeof(struct in_addr));
226 /* IP Source and Dest are in LE/CPU endian */
227 sa->outer_hdr.ipv4.src_addr =
228 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
229 sa->outer_hdr.ipv4.dst_addr =
230 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
233 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
234 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
235 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
236 sizeof(struct in6_addr));
237 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
238 sizeof(struct in6_addr));
245 switch (ipsec_xfrm->options.tunnel_hdr_verify) {
246 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
247 sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
249 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
250 sa->w2.s.ip_hdr_verify =
251 ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
261 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
262 struct rte_security_ipsec_xform *ipsec_xfrm,
263 struct rte_crypto_sym_xform *crypto_xfrm)
265 union roc_ot_ipsec_sa_word2 w2;
266 uint32_t replay_win_sz;
271 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
272 sa->hmac_opad_ipad, ipsec_xfrm,
277 /* Updata common word2 data */
280 /* Only support power-of-two window sizes supported */
281 replay_win_sz = ipsec_xfrm->replay_win_sz;
283 if (!rte_is_power_of_2(replay_win_sz) ||
284 replay_win_sz > ROC_AR_WIN_SIZE_MAX)
287 sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
290 rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
294 /* Default options for pkt_out and pkt_fmt are with
295 * second pass meta and no defrag.
297 sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
298 sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
299 sa->w0.s.pkind = ROC_OT_CPT_META_PKIND;
302 sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
303 if (ipsec_xfrm->options.udp_encap) {
304 sa->w10.s.udp_src_port = 4500;
305 sa->w10.s.udp_dst_port = 4500;
308 if (ipsec_xfrm->options.udp_ports_verify)
309 sa->w2.s.udp_ports_verify = 1;
311 offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
312 /* Word offset for HW managed SA field */
313 sa->w0.s.hw_ctx_off = offset / 8;
314 /* Context push size for inbound spans up to hw_ctx including
315 * ar_base field, in 8b units
317 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
318 /* Entire context size in 128B units */
320 (PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) /
325 * CPT MC triggers expiry when counter value changes from 2 to 1. To
326 * mitigate this behaviour add 1 to the life counter values provided.
329 if (ipsec_xfrm->life.bytes_soft_limit) {
330 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
331 sa->w0.s.soft_life_dec = 1;
334 if (ipsec_xfrm->life.packets_soft_limit) {
335 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
336 sa->w0.s.soft_life_dec = 1;
339 if (ipsec_xfrm->life.bytes_hard_limit) {
340 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
341 sa->w0.s.hard_life_dec = 1;
344 if (ipsec_xfrm->life.packets_hard_limit) {
345 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
346 sa->w0.s.hard_life_dec = 1;
349 /* There are two words of CPT_CTX_HW_S for ucode to skip */
350 sa->w0.s.ctx_hdr_size = 1;
351 sa->w0.s.aop_valid = 1;
352 sa->w0.s.et_ovrwr = 1;
362 cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
363 struct rte_security_ipsec_xform *ipsec_xfrm,
364 struct rte_crypto_sym_xform *crypto_xfrm)
366 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
367 union roc_ot_ipsec_sa_word2 w2;
372 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt,
373 sa->hmac_opad_ipad, ipsec_xfrm,
378 /* Update common word2 data */
381 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
382 goto skip_tunnel_info;
384 /* Tunnel header info */
385 switch (tunnel->type) {
386 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
387 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
388 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
389 sizeof(struct in_addr));
390 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
391 sizeof(struct in_addr));
393 /* IP Source and Dest seems to be in LE/CPU endian */
394 sa->outer_hdr.ipv4.src_addr =
395 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
396 sa->outer_hdr.ipv4.dst_addr =
397 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
399 /* Outer header DF bit source */
400 if (!ipsec_xfrm->options.copy_df) {
401 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
402 ROC_IE_OT_SA_COPY_FROM_SA;
403 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
405 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
406 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
409 /* Outer header DSCP source */
410 if (!ipsec_xfrm->options.copy_dscp) {
411 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
412 sa->w10.s.dscp = tunnel->ipv4.dscp;
414 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
417 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
418 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
419 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
420 sizeof(struct in6_addr));
421 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
422 sizeof(struct in6_addr));
424 /* Outer header flow label source */
425 if (!ipsec_xfrm->options.copy_flabel) {
426 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
427 ROC_IE_OT_SA_COPY_FROM_SA;
429 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
431 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
432 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
435 /* Outer header DSCP source */
436 if (!ipsec_xfrm->options.copy_dscp) {
437 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
438 sa->w10.s.dscp = tunnel->ipv6.dscp;
440 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
449 sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
451 if (ipsec_xfrm->options.udp_encap) {
452 sa->w10.s.udp_src_port = 4500;
453 sa->w10.s.udp_dst_port = 4500;
456 offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
457 /* Word offset for HW managed SA field */
458 sa->w0.s.hw_ctx_off = offset / 8;
459 /* Context push size is up to hmac_opad_ipad */
460 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off;
461 /* Entire context size in 128B units */
462 offset = sizeof(struct roc_ot_ipsec_outb_sa);
463 sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) /
468 sa->w2.s.ipid_gen = 1;
471 * CPT MC triggers expiry when counter value changes from 2 to 1. To
472 * mitigate this behaviour add 1 to the life counter values provided.
475 if (ipsec_xfrm->life.bytes_soft_limit) {
476 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
477 sa->w0.s.soft_life_dec = 1;
480 if (ipsec_xfrm->life.packets_soft_limit) {
481 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
482 sa->w0.s.soft_life_dec = 1;
485 if (ipsec_xfrm->life.bytes_hard_limit) {
486 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
487 sa->w0.s.hard_life_dec = 1;
490 if (ipsec_xfrm->life.packets_hard_limit) {
491 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
492 sa->w0.s.hard_life_dec = 1;
495 /* There are two words of CPT_CTX_HW_S for ucode to skip */
496 sa->w0.s.ctx_hdr_size = 1;
497 sa->w0.s.aop_valid = 1;
507 cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa)
509 return !!sa->w2.s.valid;
513 cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
515 return !!sa->w2.s.valid;
519 ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
520 struct rte_crypto_sym_xform *crypto_xfrm)
522 if (crypto_xfrm->next == NULL)
525 if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
526 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
527 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
530 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
531 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
539 onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
540 uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
541 struct rte_security_ipsec_xform *ipsec_xfrm,
542 struct rte_crypto_sym_xform *crypto_xfrm)
544 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
545 int rc, length, auth_key_len;
546 const uint8_t *key = NULL;
549 switch (ipsec_xfrm->direction) {
550 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
551 ctl->direction = ROC_IE_SA_DIR_INBOUND;
552 auth_xfrm = crypto_xfrm;
553 cipher_xfrm = crypto_xfrm->next;
555 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
556 ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
557 cipher_xfrm = crypto_xfrm;
558 auth_xfrm = crypto_xfrm->next;
564 /* Set protocol - ESP vs AH */
565 switch (ipsec_xfrm->proto) {
566 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
567 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
569 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
575 /* Set mode - transport vs tunnel */
576 switch (ipsec_xfrm->mode) {
577 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
578 ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
580 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
581 ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
587 /* Set encryption algorithm */
588 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
589 length = crypto_xfrm->aead.key.length;
591 switch (crypto_xfrm->aead.algo) {
592 case RTE_CRYPTO_AEAD_AES_GCM:
593 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
594 ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
595 memcpy(salt, &ipsec_xfrm->salt, 4);
596 key = crypto_xfrm->aead.key.data;
603 rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
607 switch (cipher_xfrm->cipher.algo) {
608 case RTE_CRYPTO_CIPHER_AES_CBC:
609 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
615 switch (auth_xfrm->auth.algo) {
616 case RTE_CRYPTO_AUTH_SHA1_HMAC:
617 ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
622 auth_key_len = auth_xfrm->auth.key.length;
623 if (auth_key_len < 20 || auth_key_len > 64)
626 key = cipher_xfrm->cipher.key.data;
627 length = cipher_xfrm->cipher.key.length;
629 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
633 case ROC_CPT_AES128_KEY_LEN:
634 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
636 case ROC_CPT_AES192_KEY_LEN:
637 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
639 case ROC_CPT_AES256_KEY_LEN:
640 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
646 memcpy(cipher_key, key, length);
648 if (ipsec_xfrm->options.esn)
651 ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
656 cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
657 struct rte_security_ipsec_xform *ipsec_xfrm,
658 struct rte_crypto_sym_xform *crypto_xfrm)
660 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
663 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
664 sa->hmac_key, ipsec_xfrm,
677 cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
678 struct rte_security_ipsec_xform *ipsec_xfrm,
679 struct rte_crypto_sym_xform *crypto_xfrm)
681 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
682 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
685 /* Fill common params */
686 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
687 sa->hmac_key, ipsec_xfrm,
692 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
693 goto skip_tunnel_info;
695 /* Tunnel header info */
696 switch (tunnel->type) {
697 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
698 memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
699 sizeof(struct in_addr));
700 memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
701 sizeof(struct in_addr));
703 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
718 cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
720 return !!sa->ctl.valid;
724 cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
726 return !!sa->ctl.valid;
730 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
731 enum rte_crypto_auth_algorithm a_algo,
732 enum rte_crypto_aead_algorithm aead_algo)
736 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
740 case RTE_CRYPTO_CIPHER_AES_CTR:
743 case RTE_CRYPTO_CIPHER_3DES_CBC:
744 ivlen = ROC_CPT_DES_BLOCK_LENGTH;
746 case RTE_CRYPTO_CIPHER_AES_CBC:
747 ivlen = ROC_CPT_AES_BLOCK_LENGTH;
754 case RTE_CRYPTO_AUTH_AES_GMAC:
765 cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
766 enum rte_crypto_auth_algorithm a_algo,
767 enum rte_crypto_aead_algorithm aead_algo)
774 case RTE_CRYPTO_AUTH_NULL:
777 case RTE_CRYPTO_AUTH_SHA1_HMAC:
780 case RTE_CRYPTO_AUTH_SHA256_HMAC:
781 case RTE_CRYPTO_AUTH_AES_GMAC:
784 case RTE_CRYPTO_AUTH_SHA384_HMAC:
787 case RTE_CRYPTO_AUTH_SHA512_HMAC:
795 case RTE_CRYPTO_AEAD_AES_GCM:
806 cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
807 enum rte_crypto_aead_algorithm aead_algo)
809 uint8_t roundup_byte = 4;
811 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
815 case RTE_CRYPTO_CIPHER_AES_CTR:
818 case RTE_CRYPTO_CIPHER_AES_CBC:
821 case RTE_CRYPTO_CIPHER_3DES_CBC:
824 case RTE_CRYPTO_CIPHER_NULL:
835 cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
836 struct rte_security_ipsec_xform *ipsec_xfrm,
837 struct rte_crypto_sym_xform *crypto_xfrm)
839 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
840 enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
841 enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
842 enum rte_crypto_aead_algorithm aead_algo = 0;
843 uint16_t partial_len = 0;
844 uint8_t roundup_byte = 0;
845 int8_t roundup_len = 0;
847 memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens));
849 /* Get Cipher and Auth algo */
850 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
851 aead_algo = crypto_xfrm->aead.algo;
853 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
854 c_algo = crypto_xfrm->cipher.algo;
856 a_algo = crypto_xfrm->auth.algo;
858 if (crypto_xfrm->next) {
859 if (crypto_xfrm->next->type ==
860 RTE_CRYPTO_SYM_XFORM_CIPHER)
861 c_algo = crypto_xfrm->next->cipher.algo;
863 a_algo = crypto_xfrm->next->auth.algo;
867 if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
868 partial_len = ROC_CPT_ESP_HDR_LEN;
869 roundup_len = ROC_CPT_ESP_TRL_LEN;
871 partial_len = ROC_CPT_AH_HDR_LEN;
874 if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
875 if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
876 partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN;
878 partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN;
881 partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo);
882 partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo);
883 roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo);
885 if (ipsec_xfrm->options.udp_encap)
886 partial_len += sizeof(struct rte_udp_hdr);
888 rlens->partial_len = partial_len;
889 rlens->roundup_len = roundup_len;
890 rlens->roundup_byte = roundup_byte;
891 rlens->max_extended_len = partial_len + roundup_len + roundup_byte;