1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
7 #include "cnxk_security.h"
12 ipsec_hmac_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform,
13 uint8_t *hmac_opad_ipad)
15 const uint8_t *key = auth_xform->auth.key.data;
16 uint32_t length = auth_xform->auth.key.length;
17 uint8_t opad[128] = {[0 ... 127] = 0x5c};
18 uint8_t ipad[128] = {[0 ... 127] = 0x36};
21 /* HMAC OPAD and IPAD */
22 for (i = 0; i < 127 && i < length; i++) {
23 opad[i] = opad[i] ^ key[i];
24 ipad[i] = ipad[i] ^ key[i];
27 /* Precompute hash of HMAC OPAD and IPAD to avoid
28 * per packet computation
30 switch (auth_xform->auth.algo) {
31 case RTE_CRYPTO_AUTH_SHA1_HMAC:
32 roc_hash_sha1_gen(opad, (uint32_t *)&hmac_opad_ipad[0]);
33 roc_hash_sha1_gen(ipad, (uint32_t *)&hmac_opad_ipad[24]);
35 case RTE_CRYPTO_AUTH_SHA256_HMAC:
36 roc_hash_sha256_gen(opad, (uint32_t *)&hmac_opad_ipad[0]);
37 roc_hash_sha256_gen(ipad, (uint32_t *)&hmac_opad_ipad[64]);
39 case RTE_CRYPTO_AUTH_SHA384_HMAC:
40 roc_hash_sha512_gen(opad, (uint64_t *)&hmac_opad_ipad[0], 384);
41 roc_hash_sha512_gen(ipad, (uint64_t *)&hmac_opad_ipad[64], 384);
43 case RTE_CRYPTO_AUTH_SHA512_HMAC:
44 roc_hash_sha512_gen(opad, (uint64_t *)&hmac_opad_ipad[0], 512);
45 roc_hash_sha512_gen(ipad, (uint64_t *)&hmac_opad_ipad[64], 512);
53 ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
54 uint8_t *cipher_key, uint8_t *salt_key,
55 uint8_t *hmac_opad_ipad,
56 struct rte_security_ipsec_xform *ipsec_xfrm,
57 struct rte_crypto_sym_xform *crypto_xfrm)
59 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
66 switch (ipsec_xfrm->direction) {
67 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
68 w2->s.dir = ROC_IE_SA_DIR_INBOUND;
69 auth_xfrm = crypto_xfrm;
70 cipher_xfrm = crypto_xfrm->next;
72 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
73 w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
74 cipher_xfrm = crypto_xfrm;
75 auth_xfrm = crypto_xfrm->next;
81 /* Set protocol - ESP vs AH */
82 switch (ipsec_xfrm->proto) {
83 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
84 w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
86 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
87 w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
93 /* Set mode - transport vs tunnel */
94 switch (ipsec_xfrm->mode) {
95 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
96 w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
98 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
99 w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
105 /* Set encryption algorithm */
106 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
107 key = crypto_xfrm->aead.key.data;
108 length = crypto_xfrm->aead.key.length;
110 switch (crypto_xfrm->aead.algo) {
111 case RTE_CRYPTO_AEAD_AES_GCM:
112 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_GCM;
113 w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
114 memcpy(salt_key, &ipsec_xfrm->salt, 4);
115 tmp_salt = (uint32_t *)salt_key;
116 *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
122 switch (cipher_xfrm->cipher.algo) {
123 case RTE_CRYPTO_CIPHER_AES_CBC:
124 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_CBC;
126 case RTE_CRYPTO_CIPHER_AES_CTR:
127 w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_CTR;
133 switch (auth_xfrm->auth.algo) {
134 case RTE_CRYPTO_AUTH_NULL:
135 w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
137 case RTE_CRYPTO_AUTH_SHA1_HMAC:
138 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA1;
140 case RTE_CRYPTO_AUTH_SHA256_HMAC:
141 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA2_256;
143 case RTE_CRYPTO_AUTH_SHA384_HMAC:
144 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA2_384;
146 case RTE_CRYPTO_AUTH_SHA512_HMAC:
147 w2->s.auth_type = ROC_IE_OT_SA_AUTH_SHA2_512;
153 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
155 tmp_key = (uint64_t *)hmac_opad_ipad;
157 i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t));
159 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
161 key = cipher_xfrm->cipher.key.data;
162 length = cipher_xfrm->cipher.key.length;
165 /* Set encapsulation type */
166 if (ipsec_xfrm->options.udp_encap)
167 w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
169 w2->s.spi = ipsec_xfrm->spi;
171 /* Copy encryption key */
172 memcpy(cipher_key, key, length);
173 tmp_key = (uint64_t *)cipher_key;
174 for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
175 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
178 case ROC_CPT_AES128_KEY_LEN:
179 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
181 case ROC_CPT_AES192_KEY_LEN:
182 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
184 case ROC_CPT_AES256_KEY_LEN:
185 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
191 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
192 ipsec_xfrm->life.packets_hard_limit != 0) {
193 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
194 ipsec_xfrm->life.bytes_hard_limit != 0) {
195 plt_err("Expiry tracking with both packets & bytes is not supported");
198 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
201 if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
202 ipsec_xfrm->life.bytes_hard_limit != 0) {
203 if (ipsec_xfrm->life.packets_soft_limit != 0 ||
204 ipsec_xfrm->life.packets_hard_limit != 0) {
205 plt_err("Expiry tracking with both packets & bytes is not supported");
208 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
215 ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
219 /* Variable based on Anti-replay Window */
220 size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) +
221 offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits);
224 size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
230 ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
231 struct rte_security_ipsec_xform *ipsec_xfrm)
233 struct rte_security_ipsec_tunnel_param *tunnel;
235 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
238 if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
241 tunnel = &ipsec_xfrm->tunnel;
243 switch (tunnel->type) {
244 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
245 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
246 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
247 sizeof(struct in_addr));
248 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
249 sizeof(struct in_addr));
251 /* IP Source and Dest are in LE/CPU endian */
252 sa->outer_hdr.ipv4.src_addr =
253 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
254 sa->outer_hdr.ipv4.dst_addr =
255 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
258 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
259 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
260 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
261 sizeof(struct in6_addr));
262 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
263 sizeof(struct in6_addr));
270 switch (ipsec_xfrm->options.tunnel_hdr_verify) {
271 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
272 sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
274 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
275 sa->w2.s.ip_hdr_verify =
276 ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
286 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
287 struct rte_security_ipsec_xform *ipsec_xfrm,
288 struct rte_crypto_sym_xform *crypto_xfrm)
290 union roc_ot_ipsec_sa_word2 w2;
291 uint32_t replay_win_sz;
296 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
297 sa->hmac_opad_ipad, ipsec_xfrm,
302 /* Updata common word2 data */
305 /* Only support power-of-two window sizes supported */
306 replay_win_sz = ipsec_xfrm->replay_win_sz;
308 if (!rte_is_power_of_2(replay_win_sz) ||
309 replay_win_sz > ROC_AR_WIN_SIZE_MAX)
312 sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
315 rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
319 /* Default options for pkt_out and pkt_fmt are with
320 * second pass meta and no defrag.
322 sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
323 sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
324 sa->w0.s.pkind = ROC_OT_CPT_META_PKIND;
327 sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
328 if (ipsec_xfrm->options.udp_encap) {
329 sa->w10.s.udp_src_port = 4500;
330 sa->w10.s.udp_dst_port = 4500;
333 if (ipsec_xfrm->options.udp_ports_verify)
334 sa->w2.s.udp_ports_verify = 1;
336 offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
337 /* Word offset for HW managed SA field */
338 sa->w0.s.hw_ctx_off = offset / 8;
339 /* Context push size for inbound spans up to hw_ctx including
340 * ar_base field, in 8b units
342 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
343 /* Entire context size in 128B units */
345 (PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) /
350 * CPT MC triggers expiry when counter value changes from 2 to 1. To
351 * mitigate this behaviour add 1 to the life counter values provided.
354 if (ipsec_xfrm->life.bytes_soft_limit) {
355 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
356 sa->w0.s.soft_life_dec = 1;
359 if (ipsec_xfrm->life.packets_soft_limit) {
360 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
361 sa->w0.s.soft_life_dec = 1;
364 if (ipsec_xfrm->life.bytes_hard_limit) {
365 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
366 sa->w0.s.hard_life_dec = 1;
369 if (ipsec_xfrm->life.packets_hard_limit) {
370 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
371 sa->w0.s.hard_life_dec = 1;
374 /* There are two words of CPT_CTX_HW_S for ucode to skip */
375 sa->w0.s.ctx_hdr_size = 1;
376 sa->w0.s.aop_valid = 1;
377 sa->w0.s.et_ovrwr = 1;
387 cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
388 struct rte_security_ipsec_xform *ipsec_xfrm,
389 struct rte_crypto_sym_xform *crypto_xfrm)
391 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
392 union roc_ot_ipsec_sa_word2 w2;
397 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt,
398 sa->hmac_opad_ipad, ipsec_xfrm,
403 /* Update common word2 data */
406 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
407 goto skip_tunnel_info;
409 /* Tunnel header info */
410 switch (tunnel->type) {
411 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
412 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
413 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
414 sizeof(struct in_addr));
415 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
416 sizeof(struct in_addr));
418 /* IP Source and Dest seems to be in LE/CPU endian */
419 sa->outer_hdr.ipv4.src_addr =
420 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
421 sa->outer_hdr.ipv4.dst_addr =
422 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
424 /* Outer header DF bit source */
425 if (!ipsec_xfrm->options.copy_df) {
426 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
427 ROC_IE_OT_SA_COPY_FROM_SA;
428 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
430 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
431 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
434 /* Outer header DSCP source */
435 if (!ipsec_xfrm->options.copy_dscp) {
436 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
437 sa->w10.s.dscp = tunnel->ipv4.dscp;
439 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
442 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
443 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
444 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
445 sizeof(struct in6_addr));
446 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
447 sizeof(struct in6_addr));
449 /* Outer header flow label source */
450 if (!ipsec_xfrm->options.copy_flabel) {
451 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
452 ROC_IE_OT_SA_COPY_FROM_SA;
454 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
456 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
457 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
460 /* Outer header DSCP source */
461 if (!ipsec_xfrm->options.copy_dscp) {
462 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
463 sa->w10.s.dscp = tunnel->ipv6.dscp;
465 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
474 sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
476 if (ipsec_xfrm->options.udp_encap) {
477 sa->w10.s.udp_src_port = 4500;
478 sa->w10.s.udp_dst_port = 4500;
481 offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
482 /* Word offset for HW managed SA field */
483 sa->w0.s.hw_ctx_off = offset / 8;
484 /* Context push size is up to hmac_opad_ipad */
485 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off;
486 /* Entire context size in 128B units */
487 offset = sizeof(struct roc_ot_ipsec_outb_sa);
488 sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) /
493 sa->w2.s.ipid_gen = 1;
496 * CPT MC triggers expiry when counter value changes from 2 to 1. To
497 * mitigate this behaviour add 1 to the life counter values provided.
500 if (ipsec_xfrm->life.bytes_soft_limit) {
501 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
502 sa->w0.s.soft_life_dec = 1;
505 if (ipsec_xfrm->life.packets_soft_limit) {
506 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
507 sa->w0.s.soft_life_dec = 1;
510 if (ipsec_xfrm->life.bytes_hard_limit) {
511 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
512 sa->w0.s.hard_life_dec = 1;
515 if (ipsec_xfrm->life.packets_hard_limit) {
516 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
517 sa->w0.s.hard_life_dec = 1;
520 /* There are two words of CPT_CTX_HW_S for ucode to skip */
521 sa->w0.s.ctx_hdr_size = 1;
522 sa->w0.s.aop_valid = 1;
532 cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa)
534 return !!sa->w2.s.valid;
538 cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
540 return !!sa->w2.s.valid;
544 ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
545 struct rte_crypto_sym_xform *crypto_xfrm)
547 if (crypto_xfrm->next == NULL)
550 if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
551 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
552 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
555 if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
556 crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
564 onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
565 uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
566 struct rte_security_ipsec_xform *ipsec_xfrm,
567 struct rte_crypto_sym_xform *crypto_xfrm)
569 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
570 int rc, length, auth_key_len;
571 const uint8_t *key = NULL;
574 switch (ipsec_xfrm->direction) {
575 case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
576 ctl->direction = ROC_IE_SA_DIR_INBOUND;
577 auth_xfrm = crypto_xfrm;
578 cipher_xfrm = crypto_xfrm->next;
580 case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
581 ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
582 cipher_xfrm = crypto_xfrm;
583 auth_xfrm = crypto_xfrm->next;
589 /* Set protocol - ESP vs AH */
590 switch (ipsec_xfrm->proto) {
591 case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
592 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
594 case RTE_SECURITY_IPSEC_SA_PROTO_AH:
600 /* Set mode - transport vs tunnel */
601 switch (ipsec_xfrm->mode) {
602 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
603 ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
605 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
606 ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
612 /* Set encryption algorithm */
613 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
614 length = crypto_xfrm->aead.key.length;
616 switch (crypto_xfrm->aead.algo) {
617 case RTE_CRYPTO_AEAD_AES_GCM:
618 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
619 ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
620 memcpy(salt, &ipsec_xfrm->salt, 4);
621 key = crypto_xfrm->aead.key.data;
628 rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
632 switch (cipher_xfrm->cipher.algo) {
633 case RTE_CRYPTO_CIPHER_AES_CBC:
634 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
636 case RTE_CRYPTO_CIPHER_AES_CTR:
637 ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
643 switch (auth_xfrm->auth.algo) {
644 case RTE_CRYPTO_AUTH_SHA1_HMAC:
645 ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
650 auth_key_len = auth_xfrm->auth.key.length;
651 if (auth_key_len < 20 || auth_key_len > 64)
654 key = cipher_xfrm->cipher.key.data;
655 length = cipher_xfrm->cipher.key.length;
657 ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
661 case ROC_CPT_AES128_KEY_LEN:
662 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
664 case ROC_CPT_AES192_KEY_LEN:
665 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
667 case ROC_CPT_AES256_KEY_LEN:
668 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
674 memcpy(cipher_key, key, length);
676 if (ipsec_xfrm->options.esn)
679 ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
684 cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
685 struct rte_security_ipsec_xform *ipsec_xfrm,
686 struct rte_crypto_sym_xform *crypto_xfrm)
688 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
691 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
692 sa->hmac_key, ipsec_xfrm,
705 cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
706 struct rte_security_ipsec_xform *ipsec_xfrm,
707 struct rte_crypto_sym_xform *crypto_xfrm)
709 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
710 struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
713 /* Fill common params */
714 rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
715 sa->hmac_key, ipsec_xfrm,
720 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
721 goto skip_tunnel_info;
723 /* Tunnel header info */
724 switch (tunnel->type) {
725 case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
726 memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
727 sizeof(struct in_addr));
728 memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
729 sizeof(struct in_addr));
731 case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
746 cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
748 return !!sa->ctl.valid;
752 cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
754 return !!sa->ctl.valid;
758 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
759 enum rte_crypto_auth_algorithm a_algo,
760 enum rte_crypto_aead_algorithm aead_algo)
764 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
768 case RTE_CRYPTO_CIPHER_AES_CTR:
771 case RTE_CRYPTO_CIPHER_3DES_CBC:
772 ivlen = ROC_CPT_DES_BLOCK_LENGTH;
774 case RTE_CRYPTO_CIPHER_AES_CBC:
775 ivlen = ROC_CPT_AES_BLOCK_LENGTH;
782 case RTE_CRYPTO_AUTH_AES_GMAC:
793 cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
794 enum rte_crypto_auth_algorithm a_algo,
795 enum rte_crypto_aead_algorithm aead_algo)
802 case RTE_CRYPTO_AUTH_NULL:
805 case RTE_CRYPTO_AUTH_SHA1_HMAC:
808 case RTE_CRYPTO_AUTH_SHA256_HMAC:
809 case RTE_CRYPTO_AUTH_AES_GMAC:
812 case RTE_CRYPTO_AUTH_SHA384_HMAC:
815 case RTE_CRYPTO_AUTH_SHA512_HMAC:
823 case RTE_CRYPTO_AEAD_AES_GCM:
834 cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
835 enum rte_crypto_aead_algorithm aead_algo)
837 uint8_t roundup_byte = 4;
839 if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
843 case RTE_CRYPTO_CIPHER_AES_CTR:
846 case RTE_CRYPTO_CIPHER_AES_CBC:
849 case RTE_CRYPTO_CIPHER_3DES_CBC:
852 case RTE_CRYPTO_CIPHER_NULL:
863 cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
864 struct rte_security_ipsec_xform *ipsec_xfrm,
865 struct rte_crypto_sym_xform *crypto_xfrm)
867 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
868 enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
869 enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
870 enum rte_crypto_aead_algorithm aead_algo = 0;
871 uint16_t partial_len = 0;
872 uint8_t roundup_byte = 0;
873 int8_t roundup_len = 0;
875 memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens));
877 /* Get Cipher and Auth algo */
878 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
879 aead_algo = crypto_xfrm->aead.algo;
881 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
882 c_algo = crypto_xfrm->cipher.algo;
884 a_algo = crypto_xfrm->auth.algo;
886 if (crypto_xfrm->next) {
887 if (crypto_xfrm->next->type ==
888 RTE_CRYPTO_SYM_XFORM_CIPHER)
889 c_algo = crypto_xfrm->next->cipher.algo;
891 a_algo = crypto_xfrm->next->auth.algo;
895 if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
896 partial_len = ROC_CPT_ESP_HDR_LEN;
897 roundup_len = ROC_CPT_ESP_TRL_LEN;
899 partial_len = ROC_CPT_AH_HDR_LEN;
902 if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
903 if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
904 partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN;
906 partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN;
909 partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo);
910 partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo);
911 roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo);
913 if (ipsec_xfrm->options.udp_encap)
914 partial_len += sizeof(struct rte_udp_hdr);
916 rlens->partial_len = partial_len;
917 rlens->roundup_len = roundup_len;
918 rlens->roundup_byte = roundup_byte;
919 rlens->max_extended_len = partial_len + roundup_len + roundup_byte;