1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2022 Marvell.
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
14 #include "test_security_inline_proto_vectors.h"
16 #ifdef RTE_EXEC_ENV_WINDOWS
18 test_inline_ipsec(void)
20 printf("Inline ipsec not supported on Windows, skipping test\n");
26 #define NB_ETHPORTS_USED 1
27 #define MEMPOOL_CACHE_SIZE 32
28 #define MAX_PKT_BURST 32
29 #define RTE_TEST_RX_DESC_DEFAULT 1024
30 #define RTE_TEST_TX_DESC_DEFAULT 1024
31 #define RTE_PORT_ALL (~(uint16_t)0x0)
33 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
34 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
35 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
37 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
38 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
39 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
41 #define MAX_TRAFFIC_BURST 2048
44 extern struct ipsec_test_data pkt_aes_128_gcm;
45 extern struct ipsec_test_data pkt_aes_192_gcm;
46 extern struct ipsec_test_data pkt_aes_256_gcm;
47 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
48 extern struct ipsec_test_data pkt_aes_128_cbc_null;
49 extern struct ipsec_test_data pkt_null_aes_xcbc;
50 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
51 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
53 static struct rte_mempool *mbufpool;
54 static struct rte_mempool *sess_pool;
55 static struct rte_mempool *sess_priv_pool;
56 /* ethernet addresses of ports */
57 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
59 static struct rte_eth_conf port_conf = {
61 .mq_mode = RTE_ETH_MQ_RX_NONE,
63 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
64 RTE_ETH_RX_OFFLOAD_SECURITY,
67 .mq_mode = RTE_ETH_MQ_TX_NONE,
68 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
69 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
71 .lpbk_mode = 1, /* enable loopback */
74 static struct rte_eth_rxconf rx_conf = {
76 .pthresh = RX_PTHRESH,
77 .hthresh = RX_HTHRESH,
78 .wthresh = RX_WTHRESH,
83 static struct rte_eth_txconf tx_conf = {
85 .pthresh = TX_PTHRESH,
86 .hthresh = TX_HTHRESH,
87 .wthresh = TX_WTHRESH,
89 .tx_free_thresh = 32, /* Use PMD default values */
90 .tx_rs_thresh = 32, /* Use PMD default values */
95 static uint64_t link_mbps;
97 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
99 /* Create Inline IPsec session */
101 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
102 struct rte_security_session **sess, struct rte_security_ctx **ctx,
103 uint32_t *ol_flags, const struct ipsec_test_flags *flags,
104 struct rte_security_session_conf *sess_conf)
106 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
108 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
110 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
111 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
112 struct rte_security_capability_idx sec_cap_idx;
113 const struct rte_security_capability *sec_cap;
114 enum rte_security_ipsec_sa_direction dir;
115 struct rte_security_ctx *sec_ctx;
118 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
119 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
120 sess_conf->ipsec = sa->ipsec_xform;
122 dir = sa->ipsec_xform.direction;
123 verify = flags->tunnel_hdr_verify;
125 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
126 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
128 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
132 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
133 if (sa->ipsec_xform.tunnel.type ==
134 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
135 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
137 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
140 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
141 sess_conf->ipsec.tunnel.ipv4.df = 0;
143 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
144 sess_conf->ipsec.tunnel.ipv4.df = 1;
146 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
147 sess_conf->ipsec.tunnel.ipv4.dscp = 0;
149 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
150 sess_conf->ipsec.tunnel.ipv4.dscp =
153 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
154 sess_conf->ipsec.tunnel.ipv6.dscp = 0;
156 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
157 sess_conf->ipsec.tunnel.ipv6.dscp =
160 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
162 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
167 /* Save SA as userdata for the security session. When
168 * the packet is received, this userdata will be
169 * retrieved using the metadata from the packet.
171 * The PMD is expected to set similar metadata for other
172 * operations, like rte_eth_event, which are tied to
173 * security session. In such cases, the userdata could
174 * be obtained to uniquely identify the security
175 * parameters denoted.
178 sess_conf->userdata = (void *) sa;
180 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
181 if (sec_ctx == NULL) {
182 printf("Ethernet device doesn't support security features.\n");
186 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
187 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
188 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
189 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
190 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
191 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
192 if (sec_cap == NULL) {
193 printf("No capabilities registered\n");
197 if (sa->aead || sa->aes_gmac)
198 memcpy(&sess_conf->ipsec.salt, sa->salt.data,
199 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
201 /* Copy cipher session parameters */
203 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
204 sizeof(struct rte_crypto_sym_xform));
205 sess_conf->crypto_xform->aead.key.data = sa->key.data;
206 /* Verify crypto capabilities */
207 if (test_ipsec_crypto_caps_aead_verify(sec_cap,
208 sess_conf->crypto_xform) != 0) {
210 "Crypto capabilities not supported\n");
214 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
215 rte_memcpy(&sess_conf->crypto_xform->cipher,
216 &sa->xform.chain.cipher.cipher,
217 sizeof(struct rte_crypto_cipher_xform));
219 rte_memcpy(&sess_conf->crypto_xform->next->auth,
220 &sa->xform.chain.auth.auth,
221 sizeof(struct rte_crypto_auth_xform));
222 sess_conf->crypto_xform->cipher.key.data =
224 sess_conf->crypto_xform->next->auth.key.data =
226 /* Verify crypto capabilities */
227 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
228 sess_conf->crypto_xform) != 0) {
230 "Cipher crypto capabilities not supported\n");
234 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
235 sess_conf->crypto_xform->next) != 0) {
237 "Auth crypto capabilities not supported\n");
241 rte_memcpy(&sess_conf->crypto_xform->next->cipher,
242 &sa->xform.chain.cipher.cipher,
243 sizeof(struct rte_crypto_cipher_xform));
244 rte_memcpy(&sess_conf->crypto_xform->auth,
245 &sa->xform.chain.auth.auth,
246 sizeof(struct rte_crypto_auth_xform));
247 sess_conf->crypto_xform->auth.key.data =
249 sess_conf->crypto_xform->next->cipher.key.data =
252 /* Verify crypto capabilities */
253 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
254 sess_conf->crypto_xform->next) != 0) {
256 "Cipher crypto capabilities not supported\n");
260 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
261 sess_conf->crypto_xform) != 0) {
263 "Auth crypto capabilities not supported\n");
269 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
272 if ((sa->ipsec_xform.direction ==
273 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
274 (sa->ipsec_xform.options.iv_gen_disable == 1)) {
275 /* Set env variable when IV generation is disabled */
278 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
280 for (; j < iv_len; j++)
281 len += snprintf(arr+len, sizeof(arr) - len,
282 "0x%x, ", sa->iv.data[j]);
283 setenv("ETH_SEC_IV_OVR", arr, 1);
286 *sess = rte_security_session_create(sec_ctx,
287 sess_conf, sess_pool, sess_priv_pool);
289 printf("SEC Session init failed.\n");
293 *ol_flags = sec_cap->ol_flags;
299 /* Check the link status of all ports in up to 3s, and print them finally */
301 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
303 #define CHECK_INTERVAL 100 /* 100ms */
304 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
306 uint8_t count, all_ports_up, print_flag = 0;
307 struct rte_eth_link link;
309 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
311 printf("Checking link statuses...\n");
313 for (count = 0; count <= MAX_CHECK_TIME; count++) {
315 for (portid = 0; portid < port_num; portid++) {
316 if ((port_mask & (1 << portid)) == 0)
318 memset(&link, 0, sizeof(link));
319 ret = rte_eth_link_get_nowait(portid, &link);
323 printf("Port %u link get failed: %s\n",
324 portid, rte_strerror(-ret));
328 /* print link status if flag set */
329 if (print_flag == 1) {
330 if (link.link_status && link_mbps == 0)
331 link_mbps = link.link_speed;
333 rte_eth_link_to_str(link_status,
334 sizeof(link_status), &link);
335 printf("Port %d %s\n", portid, link_status);
338 /* clear all_ports_up flag if any link down */
339 if (link.link_status == RTE_ETH_LINK_DOWN) {
344 /* after finally printing all link status, get out */
348 if (all_ports_up == 0) {
350 rte_delay_ms(CHECK_INTERVAL);
353 /* set the print_flag if all ports up or timeout */
354 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
360 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
362 char buf[RTE_ETHER_ADDR_FMT_SIZE];
363 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
364 printf("%s%s", name, buf);
368 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
369 struct rte_mbuf *pkt, unsigned int offset)
371 unsigned int copied = 0;
372 unsigned int copy_len;
373 struct rte_mbuf *seg;
377 while (offset >= seg->data_len) {
378 offset -= seg->data_len;
381 copy_len = seg->data_len - offset;
382 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
383 while (len > copy_len) {
384 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
388 seg_buf = rte_pktmbuf_mtod(seg, void *);
390 rte_memcpy(seg_buf, buf + copied, (size_t) len);
393 static inline struct rte_mbuf *
394 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
396 struct rte_mbuf *pkt;
398 pkt = rte_pktmbuf_alloc(mp);
401 if (((data[0] & 0xF0) >> 4) == IPVERSION) {
402 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
403 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
404 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
406 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
407 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
408 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
410 pkt->l2_len = RTE_ETHER_HDR_LEN;
412 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
413 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
415 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
420 init_mempools(unsigned int nb_mbuf)
422 struct rte_security_ctx *sec_ctx;
423 uint16_t nb_sess = 512;
427 if (mbufpool == NULL) {
428 snprintf(s, sizeof(s), "mbuf_pool");
429 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
430 MEMPOOL_CACHE_SIZE, 0,
431 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
432 if (mbufpool == NULL) {
433 printf("Cannot init mbuf pool\n");
436 printf("Allocated mbuf pool\n");
439 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
440 if (sec_ctx == NULL) {
441 printf("Device does not support Security ctx\n");
444 sess_sz = rte_security_session_get_size(sec_ctx);
445 if (sess_pool == NULL) {
446 snprintf(s, sizeof(s), "sess_pool");
447 sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
448 MEMPOOL_CACHE_SIZE, 0,
449 NULL, NULL, NULL, NULL,
451 if (sess_pool == NULL) {
452 printf("Cannot init sess pool\n");
455 printf("Allocated sess pool\n");
457 if (sess_priv_pool == NULL) {
458 snprintf(s, sizeof(s), "sess_priv_pool");
459 sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
460 MEMPOOL_CACHE_SIZE, 0,
461 NULL, NULL, NULL, NULL,
463 if (sess_priv_pool == NULL) {
464 printf("Cannot init sess_priv pool\n");
467 printf("Allocated sess_priv pool\n");
474 create_default_flow(uint16_t portid)
476 struct rte_flow_action action[2];
477 struct rte_flow_item pattern[2];
478 struct rte_flow_attr attr = {0};
479 struct rte_flow_error err;
480 struct rte_flow *flow;
483 /* Add the default rte_flow to enable SECURITY for all ESP packets */
485 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
486 pattern[0].spec = NULL;
487 pattern[0].mask = NULL;
488 pattern[0].last = NULL;
489 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
491 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
492 action[0].conf = NULL;
493 action[1].type = RTE_FLOW_ACTION_TYPE_END;
494 action[1].conf = NULL;
498 ret = rte_flow_validate(portid, &attr, pattern, action, &err);
500 printf("\nValidate flow failed, ret = %d\n", ret);
503 flow = rte_flow_create(portid, &attr, pattern, action, &err);
505 printf("\nDefault flow rule create failed\n");
509 default_flow[portid] = flow;
515 destroy_default_flow(uint16_t portid)
517 struct rte_flow_error err;
520 if (!default_flow[portid])
522 ret = rte_flow_destroy(portid, default_flow[portid], &err);
524 printf("\nDefault flow rule destroy failed\n");
527 default_flow[portid] = NULL;
530 struct rte_mbuf **tx_pkts_burst;
531 struct rte_mbuf **rx_pkts_burst;
534 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
535 struct ipsec_test_data *res_d,
538 const struct ipsec_test_flags *flags)
540 struct rte_security_session_conf sess_conf = {0};
541 struct rte_crypto_sym_xform cipher = {0};
542 struct rte_crypto_sym_xform auth = {0};
543 struct rte_crypto_sym_xform aead = {0};
544 struct rte_security_session *ses;
545 struct rte_security_ctx *ctx;
546 int nb_rx = 0, nb_sent;
550 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
553 sess_conf.crypto_xform = &aead;
555 if (td->ipsec_xform.direction ==
556 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
557 sess_conf.crypto_xform = &cipher;
558 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
559 sess_conf.crypto_xform->next = &auth;
560 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
562 sess_conf.crypto_xform = &auth;
563 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
564 sess_conf.crypto_xform->next = &cipher;
565 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
569 /* Create Inline IPsec session. */
570 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
571 &ol_flags, flags, &sess_conf);
575 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
576 ret = create_default_flow(port_id);
580 for (i = 0; i < nb_pkts; i++) {
581 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
583 if (tx_pkts_burst[i] == NULL) {
585 rte_pktmbuf_free(tx_pkts_burst[i]);
590 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
591 uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
593 rte_pktmbuf_free(tx_pkts_burst[i]);
598 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
599 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
600 rte_security_set_pkt_metadata(ctx, ses,
601 tx_pkts_burst[i], NULL);
602 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
605 /* Send packet to ethdev for inline IPsec processing. */
606 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
607 if (nb_sent != nb_pkts) {
608 printf("\nUnable to TX %d packets", nb_pkts);
609 for ( ; nb_sent < nb_pkts; nb_sent++)
610 rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
617 /* Receive back packet on loopback interface. */
620 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
622 if (nb_rx >= nb_sent)
624 } while (j++ < 5 || nb_rx == 0);
626 if (nb_rx != nb_sent) {
627 printf("\nUnable to RX all %d packets", nb_sent);
629 rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
634 for (i = 0; i < nb_rx; i++) {
635 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
637 ret = test_ipsec_post_process(rx_pkts_burst[i], td,
638 res_d, silent, flags);
639 if (ret != TEST_SUCCESS) {
640 for ( ; i < nb_rx; i++)
641 rte_pktmbuf_free(rx_pkts_burst[i]);
645 ret = test_ipsec_stats_verify(ctx, ses, flags,
646 td->ipsec_xform.direction);
647 if (ret != TEST_SUCCESS) {
648 for ( ; i < nb_rx; i++)
649 rte_pktmbuf_free(rx_pkts_burst[i]);
653 rte_pktmbuf_free(rx_pkts_burst[i]);
654 rx_pkts_burst[i] = NULL;
658 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
659 destroy_default_flow(port_id);
661 /* Destroy session so that other cases can create the session again */
662 rte_security_session_destroy(ctx, ses);
669 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
671 struct ipsec_test_data td_outb;
672 struct ipsec_test_data td_inb;
673 unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
676 if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
677 flags->sa_expiry_pkts_hard)
678 nb_pkts = IPSEC_TEST_PACKETS_MAX;
680 for (i = 0; i < RTE_DIM(alg_list); i++) {
681 test_ipsec_td_prepare(alg_list[i].param1,
686 enum rte_crypto_cipher_algorithm cipher_alg;
687 enum rte_crypto_auth_algorithm auth_alg;
689 cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
690 auth_alg = td_outb.xform.chain.auth.auth.algo;
692 if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
695 /* ICV is not applicable for NULL auth */
696 if (flags->icv_corrupt &&
697 auth_alg == RTE_CRYPTO_AUTH_NULL)
700 /* IV is not applicable for NULL cipher */
702 cipher_alg == RTE_CRYPTO_CIPHER_NULL)
706 if (flags->udp_encap)
707 td_outb.ipsec_xform.options.udp_encap = 1;
709 ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
711 if (ret == TEST_SKIPPED)
714 if (ret == TEST_FAILED) {
715 printf("\n TEST FAILED");
716 test_ipsec_display_alg(alg_list[i].param1,
722 test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
724 ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
726 if (ret == TEST_SKIPPED)
729 if (ret == TEST_FAILED) {
730 printf("\n TEST FAILED");
731 test_ipsec_display_alg(alg_list[i].param1,
737 if (flags->display_alg)
738 test_ipsec_display_alg(alg_list[i].param1,
744 printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
755 ut_setup_inline_ipsec(void)
760 ret = rte_eth_dev_start(port_id);
762 printf("rte_eth_dev_start: err=%d, port=%d\n",
766 /* always enable promiscuous */
767 ret = rte_eth_promiscuous_enable(port_id);
769 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
770 rte_strerror(-ret), port_id);
774 check_all_ports_link_status(1, RTE_PORT_ALL);
780 ut_teardown_inline_ipsec(void)
786 RTE_ETH_FOREACH_DEV(portid) {
787 ret = rte_eth_dev_stop(portid);
789 printf("rte_eth_dev_stop: err=%s, port=%u\n",
790 rte_strerror(-ret), portid);
795 inline_ipsec_testsuite_setup(void)
801 uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
803 printf("Start inline IPsec test.\n");
805 nb_ports = rte_eth_dev_count_avail();
806 if (nb_ports < NB_ETHPORTS_USED) {
807 printf("At least %u port(s) used for test\n",
812 ret = init_mempools(NB_MBUF);
816 if (tx_pkts_burst == NULL) {
817 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
820 RTE_CACHE_LINE_SIZE);
824 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
827 RTE_CACHE_LINE_SIZE);
832 printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
834 nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
835 nb_txd = RTE_TEST_TX_DESC_DEFAULT;
837 /* configuring port 0 for the test is enough */
840 ret = rte_eth_dev_configure(port_id, nb_rx_queue,
841 nb_tx_queue, &port_conf);
843 printf("Cannot configure device: err=%d, port=%d\n",
847 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
849 printf("Cannot get mac address: err=%d, port=%d\n",
853 printf("Port %u ", port_id);
854 print_ethaddr("Address:", &ports_eth_addr[port_id]);
858 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
859 SOCKET_ID_ANY, &tx_conf);
861 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
866 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
869 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
873 test_ipsec_alg_list_populate();
879 inline_ipsec_testsuite_teardown(void)
885 RTE_ETH_FOREACH_DEV(portid) {
886 ret = rte_eth_dev_reset(portid);
888 printf("rte_eth_dev_reset: err=%s, port=%u\n",
889 rte_strerror(-ret), port_id);
894 test_ipsec_inline_proto_known_vec(const void *test_data)
896 struct ipsec_test_data td_outb;
897 struct ipsec_test_flags flags;
899 memset(&flags, 0, sizeof(flags));
901 memcpy(&td_outb, test_data, sizeof(td_outb));
904 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
905 /* Disable IV gen to be able to test with known vectors */
906 td_outb.ipsec_xform.options.iv_gen_disable = 1;
909 return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
914 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
916 const struct ipsec_test_data *td = test_data;
917 struct ipsec_test_flags flags;
918 struct ipsec_test_data td_inb;
920 memset(&flags, 0, sizeof(flags));
922 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
923 test_ipsec_td_in_from_out(td, &td_inb);
925 memcpy(&td_inb, td, sizeof(td_inb));
927 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
931 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
933 struct ipsec_test_flags flags;
935 memset(&flags, 0, sizeof(flags));
937 flags.display_alg = true;
939 return test_ipsec_inline_proto_all(&flags);
942 static struct unit_test_suite inline_ipsec_testsuite = {
943 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
944 .setup = inline_ipsec_testsuite_setup,
945 .teardown = inline_ipsec_testsuite_teardown,
947 TEST_CASE_NAMED_WITH_DATA(
948 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
949 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
950 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
951 TEST_CASE_NAMED_WITH_DATA(
952 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
953 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
954 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
955 TEST_CASE_NAMED_WITH_DATA(
956 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
957 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
958 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
959 TEST_CASE_NAMED_WITH_DATA(
960 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
961 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
962 test_ipsec_inline_proto_known_vec,
963 &pkt_aes_128_cbc_hmac_sha256),
964 TEST_CASE_NAMED_WITH_DATA(
965 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
966 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
967 test_ipsec_inline_proto_known_vec,
968 &pkt_aes_128_cbc_hmac_sha384),
969 TEST_CASE_NAMED_WITH_DATA(
970 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
971 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
972 test_ipsec_inline_proto_known_vec,
973 &pkt_aes_128_cbc_hmac_sha512),
974 TEST_CASE_NAMED_WITH_DATA(
975 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
976 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
977 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
978 TEST_CASE_NAMED_WITH_DATA(
979 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
980 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
981 test_ipsec_inline_proto_known_vec,
982 &pkt_aes_128_cbc_hmac_sha256_v6),
983 TEST_CASE_NAMED_WITH_DATA(
984 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
985 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
986 test_ipsec_inline_proto_known_vec,
988 TEST_CASE_NAMED_WITH_DATA(
989 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
990 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
991 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
992 TEST_CASE_NAMED_WITH_DATA(
993 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
994 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
995 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
996 TEST_CASE_NAMED_WITH_DATA(
997 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
998 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
999 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
1000 TEST_CASE_NAMED_WITH_DATA(
1001 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
1002 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1003 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
1004 TEST_CASE_NAMED_WITH_DATA(
1005 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1006 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1007 test_ipsec_inline_proto_known_vec_inb,
1008 &pkt_aes_128_cbc_hmac_sha256),
1009 TEST_CASE_NAMED_WITH_DATA(
1010 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
1011 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1012 test_ipsec_inline_proto_known_vec_inb,
1013 &pkt_aes_128_cbc_hmac_sha384),
1014 TEST_CASE_NAMED_WITH_DATA(
1015 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
1016 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1017 test_ipsec_inline_proto_known_vec_inb,
1018 &pkt_aes_128_cbc_hmac_sha512),
1019 TEST_CASE_NAMED_WITH_DATA(
1020 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
1021 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1022 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
1023 TEST_CASE_NAMED_WITH_DATA(
1024 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1025 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1026 test_ipsec_inline_proto_known_vec_inb,
1027 &pkt_aes_128_cbc_hmac_sha256_v6),
1028 TEST_CASE_NAMED_WITH_DATA(
1029 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
1030 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1031 test_ipsec_inline_proto_known_vec_inb,
1032 &pkt_null_aes_xcbc),
1035 "Combined test alg list",
1036 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1037 test_ipsec_inline_proto_display_list),
1041 TEST_CASES_END() /**< NULL terminate unit test array */
1047 test_inline_ipsec(void)
1049 return unit_test_suite_runner(&inline_ipsec_testsuite);
1052 #endif /* !RTE_EXEC_ENV_WINDOWS */
1054 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);