1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
7 #include "cnxk_security.h"
12 ipsec_hmac_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform,
13 uint8_t *hmac_opad_ipad)
15 const uint8_t *key = auth_xform->auth.key.data;
16 uint32_t length = auth_xform->auth.key.length;
17 uint8_t opad[128] = {[0 ... 127] = 0x5c};
18 uint8_t ipad[128] = {[0 ... 127] = 0x36};
21 /* HMAC OPAD and IPAD */
22 for (i = 0; i < 127 && i < length; i++) {
23 opad[i] = opad[i] ^ key[i];
24 ipad[i] = ipad[i] ^ key[i];
27 /* Precompute hash of HMAC OPAD and IPAD to avoid
28 * per packet computation
30 switch (auth_xform->auth.algo) {
31 case RTE_CRYPTO_AUTH_SHA1_HMAC:
32 roc_hash_sha1_gen(opad, (uint32_t *)&hmac_opad_ipad[0]);
33 roc_hash_sha1_gen(ipad, (uint32_t *)&hmac_opad_ipad[24]);
41 ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
42 uint8_t *cipher_key, uint8_t *salt_key,
43 uint8_t *hmac_opad_ipad,
44 struct rte_security_ipsec_xform *ipsec_xfrm,
45 struct rte_crypto_sym_xform *crypto_xfrm)
47 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
54 switch (ipsec_xfrm->direction) {
55 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
56 w2->s.dir = ROC_IE_SA_DIR_INBOUND;
57 auth_xfrm = crypto_xfrm;
58 cipher_xfrm = crypto_xfrm->next;
60 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
61 w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
62 cipher_xfrm = crypto_xfrm;
63 auth_xfrm = crypto_xfrm->next;
69 /* Set protocol - ESP vs AH */
70 switch (ipsec_xfrm->proto) {
71 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
72 w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
74 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
75 w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
81 /* Set mode - transport vs tunnel */
82 switch (ipsec_xfrm->mode) {
83 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
84 w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
86 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
87 w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
93 /* Set encryption algorithm */
94 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
95 key = crypto_xfrm->aead.key.data;
96 length = crypto_xfrm->aead.key.length;
98 switch (crypto_xfrm->aead.algo) {
99 case RTE_CRYPTO_AEAD_AES_GCM:
100 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_GCM;
101 w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
102 memcpy(salt_key, &ipsec_xfrm->salt, 4);
103 tmp_salt = (uint32_t *)salt_key;
104 *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
110 switch (cipher_xfrm->cipher.algo) {
111 case RTE_CRYPTO_CIPHER_AES_CBC:
112 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_CBC;
118 switch (auth_xfrm->auth.algo) {
119 case RTE_CRYPTO_AUTH_SHA1_HMAC:
120 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA1;
126 key = cipher_xfrm->cipher.key.data;
127 length = cipher_xfrm->cipher.key.length;
129 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
131 tmp_key = (uint64_t *)hmac_opad_ipad;
133 i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t));
135 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
138 /* Set encapsulation type */
139 if (ipsec_xfrm->options.udp_encap)
140 w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
142 w2->s.spi = ipsec_xfrm->spi;
144 /* Copy encryption key */
145 memcpy(cipher_key, key, length);
146 tmp_key = (uint64_t *)cipher_key;
147 for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
148 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
151 case ROC_CPT_AES128_KEY_LEN:
152 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
154 case ROC_CPT_AES192_KEY_LEN:
155 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
157 case ROC_CPT_AES256_KEY_LEN:
158 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
164 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
165 ipsec_xfrm->life.packets_hard_limit != 0) {
166 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
167 ipsec_xfrm->life.bytes_hard_limit != 0) {
168 plt_err("Expiry tracking with both packets & bytes is not supported");
171 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
174 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
175 ipsec_xfrm->life.bytes_hard_limit != 0) {
176 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
177 ipsec_xfrm->life.packets_hard_limit != 0) {
178 plt_err("Expiry tracking with both packets & bytes is not supported");
181 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
188 ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
192 /* Variable based on Anti-replay Window */
193 size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) +
194 offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits);
197 size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
203 ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
204 struct rte_security_ipsec_xform *ipsec_xfrm)
206 struct rte_security_ipsec_tunnel_param *tunnel;
208 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
211 if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
214 tunnel = &ipsec_xfrm->tunnel;
216 switch (tunnel->type) {
217 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
218 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
219 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
220 sizeof(struct in_addr));
221 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
222 sizeof(struct in_addr));
224 /* IP Source and Dest are in LE/CPU endian */
225 sa->outer_hdr.ipv4.src_addr =
226 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
227 sa->outer_hdr.ipv4.dst_addr =
228 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
231 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
232 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
233 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
234 sizeof(struct in6_addr));
235 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
236 sizeof(struct in6_addr));
243 switch (ipsec_xfrm->options.tunnel_hdr_verify) {
244 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
245 sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
247 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
248 sa->w2.s.ip_hdr_verify =
249 ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
259 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
260 struct rte_security_ipsec_xform *ipsec_xfrm,
261 struct rte_crypto_sym_xform *crypto_xfrm)
263 union roc_ot_ipsec_sa_word2 w2;
264 uint32_t replay_win_sz;
269 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
270 sa->hmac_opad_ipad, ipsec_xfrm,
275 /* Updata common word2 data */
278 /* Only support power-of-two window sizes supported */
279 replay_win_sz = ipsec_xfrm->replay_win_sz;
281 if (!rte_is_power_of_2(replay_win_sz) ||
282 replay_win_sz > ROC_AR_WIN_SIZE_MAX)
285 sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
288 rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
292 /* Default options for pkt_out and pkt_fmt are with
293 * second pass meta and no defrag.
295 sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
296 sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
297 sa->w0.s.pkind = ROC_OT_CPT_META_PKIND;
300 sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
301 if (ipsec_xfrm->options.udp_encap) {
302 sa->w10.s.udp_src_port = 4500;
303 sa->w10.s.udp_dst_port = 4500;
306 if (ipsec_xfrm->options.udp_ports_verify)
307 sa->w2.s.udp_ports_verify = 1;
309 offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
310 /* Word offset for HW managed SA field */
311 sa->w0.s.hw_ctx_off = offset / 8;
312 /* Context push size for inbound spans up to hw_ctx including
313 * ar_base field, in 8b units
315 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
316 /* Entire context size in 128B units */
318 (PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) /
323 * CPT MC triggers expiry when counter value changes from 2 to 1. To
324 * mitigate this behaviour add 1 to the life counter values provided.
327 if (ipsec_xfrm->life.bytes_soft_limit) {
328 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
329 sa->w0.s.soft_life_dec = 1;
332 if (ipsec_xfrm->life.packets_soft_limit) {
333 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
334 sa->w0.s.soft_life_dec = 1;
337 if (ipsec_xfrm->life.bytes_hard_limit) {
338 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
339 sa->w0.s.hard_life_dec = 1;
342 if (ipsec_xfrm->life.packets_hard_limit) {
343 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
344 sa->w0.s.hard_life_dec = 1;
347 /* There are two words of CPT_CTX_HW_S for ucode to skip */
348 sa->w0.s.ctx_hdr_size = 1;
349 sa->w0.s.aop_valid = 1;
350 sa->w0.s.et_ovrwr = 1;
360 cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
361 struct rte_security_ipsec_xform *ipsec_xfrm,
362 struct rte_crypto_sym_xform *crypto_xfrm)
364 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
365 union roc_ot_ipsec_sa_word2 w2;
370 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt,
371 sa->hmac_opad_ipad, ipsec_xfrm,
376 /* Update common word2 data */
379 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
380 goto skip_tunnel_info;
382 /* Tunnel header info */
383 switch (tunnel->type) {
384 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
385 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
386 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
387 sizeof(struct in_addr));
388 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
389 sizeof(struct in_addr));
391 /* IP Source and Dest seems to be in LE/CPU endian */
392 sa->outer_hdr.ipv4.src_addr =
393 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
394 sa->outer_hdr.ipv4.dst_addr =
395 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
397 /* Outer header DF bit source */
398 if (!ipsec_xfrm->options.copy_df) {
399 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
400 ROC_IE_OT_SA_COPY_FROM_SA;
401 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
403 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
404 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
407 /* Outer header DSCP source */
408 if (!ipsec_xfrm->options.copy_dscp) {
409 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
410 sa->w10.s.dscp = tunnel->ipv4.dscp;
412 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
415 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
416 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
417 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
418 sizeof(struct in6_addr));
419 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
420 sizeof(struct in6_addr));
422 /* Outer header flow label source */
423 if (!ipsec_xfrm->options.copy_flabel) {
424 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
425 ROC_IE_OT_SA_COPY_FROM_SA;
427 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
429 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
430 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
433 /* Outer header DSCP source */
434 if (!ipsec_xfrm->options.copy_dscp) {
435 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
436 sa->w10.s.dscp = tunnel->ipv6.dscp;
438 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
445 /* Default options of DSCP and Flow label/DF */
446 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
447 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
451 sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
453 if (ipsec_xfrm->options.udp_encap) {
454 sa->w10.s.udp_src_port = 4500;
455 sa->w10.s.udp_dst_port = 4500;
458 offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
459 /* Word offset for HW managed SA field */
460 sa->w0.s.hw_ctx_off = offset / 8;
461 /* Context push size is up to hmac_opad_ipad */
462 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off;
463 /* Entire context size in 128B units */
464 offset = sizeof(struct roc_ot_ipsec_outb_sa);
465 sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) /
470 sa->w2.s.ipid_gen = 1;
473 * CPT MC triggers expiry when counter value changes from 2 to 1. To
474 * mitigate this behaviour add 1 to the life counter values provided.
477 if (ipsec_xfrm->life.bytes_soft_limit) {
478 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
479 sa->w0.s.soft_life_dec = 1;
482 if (ipsec_xfrm->life.packets_soft_limit) {
483 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
484 sa->w0.s.soft_life_dec = 1;
487 if (ipsec_xfrm->life.bytes_hard_limit) {
488 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
489 sa->w0.s.hard_life_dec = 1;
492 if (ipsec_xfrm->life.packets_hard_limit) {
493 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
494 sa->w0.s.hard_life_dec = 1;
497 /* There are two words of CPT_CTX_HW_S for ucode to skip */
498 sa->w0.s.ctx_hdr_size = 1;
499 sa->w0.s.aop_valid = 1;
509 cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa)
511 return !!sa->w2.s.valid;
515 cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
517 return !!sa->w2.s.valid;
521 ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
522 struct rte_crypto_sym_xform *crypto_xfrm)
524 if (crypto_xfrm->next == NULL)
527 if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
528 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
529 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
532 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
533 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
541 onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
542 uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
543 struct rte_security_ipsec_xform *ipsec_xfrm,
544 struct rte_crypto_sym_xform *crypto_xfrm)
546 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
547 int rc, length, auth_key_len;
548 const uint8_t *key = NULL;
551 switch (ipsec_xfrm->direction) {
552 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
553 ctl->direction = ROC_IE_SA_DIR_INBOUND;
554 auth_xfrm = crypto_xfrm;
555 cipher_xfrm = crypto_xfrm->next;
557 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
558 ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
559 cipher_xfrm = crypto_xfrm;
560 auth_xfrm = crypto_xfrm->next;
566 /* Set protocol - ESP vs AH */
567 switch (ipsec_xfrm->proto) {
568 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
569 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
571 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
577 /* Set mode - transport vs tunnel */
578 switch (ipsec_xfrm->mode) {
579 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
580 ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
582 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
583 ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
589 /* Set encryption algorithm */
590 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
591 length = crypto_xfrm->aead.key.length;
593 switch (crypto_xfrm->aead.algo) {
594 case RTE_CRYPTO_AEAD_AES_GCM:
595 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
596 ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
597 memcpy(salt, &ipsec_xfrm->salt, 4);
598 key = crypto_xfrm->aead.key.data;
605 rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
609 switch (cipher_xfrm->cipher.algo) {
610 case RTE_CRYPTO_CIPHER_AES_CBC:
611 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
617 switch (auth_xfrm->auth.algo) {
618 case RTE_CRYPTO_AUTH_SHA1_HMAC:
619 ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
624 auth_key_len = auth_xfrm->auth.key.length;
625 if (auth_key_len < 20 || auth_key_len > 64)
628 key = cipher_xfrm->cipher.key.data;
629 length = cipher_xfrm->cipher.key.length;
631 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
635 case ROC_CPT_AES128_KEY_LEN:
636 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
638 case ROC_CPT_AES192_KEY_LEN:
639 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
641 case ROC_CPT_AES256_KEY_LEN:
642 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
648 memcpy(cipher_key, key, length);
650 if (ipsec_xfrm->options.esn)
653 ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
658 cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
659 struct rte_security_ipsec_xform *ipsec_xfrm,
660 struct rte_crypto_sym_xform *crypto_xfrm)
662 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
665 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
666 sa->hmac_key, ipsec_xfrm,
679 cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
680 struct rte_security_ipsec_xform *ipsec_xfrm,
681 struct rte_crypto_sym_xform *crypto_xfrm)
683 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
684 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
687 /* Fill common params */
688 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
689 sa->hmac_key, ipsec_xfrm,
694 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
695 goto skip_tunnel_info;
697 /* Tunnel header info */
698 switch (tunnel->type) {
699 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
700 memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
701 sizeof(struct in_addr));
702 memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
703 sizeof(struct in_addr));
705 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
720 cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
722 return !!sa->ctl.valid;
726 cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
728 return !!sa->ctl.valid;
732 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
733 enum rte_crypto_auth_algorithm a_algo,
734 enum rte_crypto_aead_algorithm aead_algo)
738 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
742 case RTE_CRYPTO_CIPHER_AES_CTR:
745 case RTE_CRYPTO_CIPHER_3DES_CBC:
746 ivlen = ROC_CPT_DES_BLOCK_LENGTH;
748 case RTE_CRYPTO_CIPHER_AES_CBC:
749 ivlen = ROC_CPT_AES_BLOCK_LENGTH;
756 case RTE_CRYPTO_AUTH_AES_GMAC:
767 cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
768 enum rte_crypto_auth_algorithm a_algo,
769 enum rte_crypto_aead_algorithm aead_algo)
776 case RTE_CRYPTO_AUTH_NULL:
779 case RTE_CRYPTO_AUTH_SHA1_HMAC:
782 case RTE_CRYPTO_AUTH_SHA256_HMAC:
783 case RTE_CRYPTO_AUTH_AES_GMAC:
786 case RTE_CRYPTO_AUTH_SHA384_HMAC:
789 case RTE_CRYPTO_AUTH_SHA512_HMAC:
797 case RTE_CRYPTO_AEAD_AES_GCM:
808 cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
809 enum rte_crypto_aead_algorithm aead_algo)
811 uint8_t roundup_byte = 4;
813 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
817 case RTE_CRYPTO_CIPHER_AES_CTR:
820 case RTE_CRYPTO_CIPHER_AES_CBC:
823 case RTE_CRYPTO_CIPHER_3DES_CBC:
826 case RTE_CRYPTO_CIPHER_NULL:
837 cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
838 struct rte_security_ipsec_xform *ipsec_xfrm,
839 struct rte_crypto_sym_xform *crypto_xfrm)
841 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
842 enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
843 enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
844 enum rte_crypto_aead_algorithm aead_algo = 0;
845 uint16_t partial_len = 0;
846 uint8_t roundup_byte = 0;
847 int8_t roundup_len = 0;
849 memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens));
851 /* Get Cipher and Auth algo */
852 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
853 aead_algo = crypto_xfrm->aead.algo;
855 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
856 c_algo = crypto_xfrm->cipher.algo;
858 a_algo = crypto_xfrm->auth.algo;
860 if (crypto_xfrm->next) {
861 if (crypto_xfrm->next->type ==
862 RTE_CRYPTO_SYM_XFORM_CIPHER)
863 c_algo = crypto_xfrm->next->cipher.algo;
865 a_algo = crypto_xfrm->next->auth.algo;
869 if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
870 partial_len = ROC_CPT_ESP_HDR_LEN;
871 roundup_len = ROC_CPT_ESP_TRL_LEN;
873 partial_len = ROC_CPT_AH_HDR_LEN;
876 if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
877 if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
878 partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN;
880 partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN;
883 partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo);
884 partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo);
885 roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo);
887 if (ipsec_xfrm->options.udp_encap)
888 partial_len += sizeof(struct rte_udp_hdr);
890 rlens->partial_len = partial_len;
891 rlens->roundup_len = roundup_len;
892 rlens->roundup_byte = roundup_byte;
893 rlens->max_extended_len = partial_len + roundup_len + roundup_byte;