1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2022 Marvell.
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
14 #include "test_security_inline_proto_vectors.h"
16 #ifdef RTE_EXEC_ENV_WINDOWS
18 test_inline_ipsec(void)
20 printf("Inline ipsec not supported on Windows, skipping test\n");
26 #define NB_ETHPORTS_USED 1
27 #define MEMPOOL_CACHE_SIZE 32
28 #define MAX_PKT_BURST 32
29 #define RTE_TEST_RX_DESC_DEFAULT 1024
30 #define RTE_TEST_TX_DESC_DEFAULT 1024
31 #define RTE_PORT_ALL (~(uint16_t)0x0)
33 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
34 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
35 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
37 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
38 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
39 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
41 #define MAX_TRAFFIC_BURST 2048
44 extern struct ipsec_test_data pkt_aes_128_gcm;
45 extern struct ipsec_test_data pkt_aes_192_gcm;
46 extern struct ipsec_test_data pkt_aes_256_gcm;
47 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
48 extern struct ipsec_test_data pkt_aes_128_cbc_null;
49 extern struct ipsec_test_data pkt_null_aes_xcbc;
50 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
51 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
53 static struct rte_mempool *mbufpool;
54 static struct rte_mempool *sess_pool;
55 static struct rte_mempool *sess_priv_pool;
56 /* ethernet addresses of ports */
57 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
59 static struct rte_eth_conf port_conf = {
61 .mq_mode = RTE_ETH_MQ_RX_NONE,
63 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
64 RTE_ETH_RX_OFFLOAD_SECURITY,
67 .mq_mode = RTE_ETH_MQ_TX_NONE,
68 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
69 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
71 .lpbk_mode = 1, /* enable loopback */
74 static struct rte_eth_rxconf rx_conf = {
76 .pthresh = RX_PTHRESH,
77 .hthresh = RX_HTHRESH,
78 .wthresh = RX_WTHRESH,
83 static struct rte_eth_txconf tx_conf = {
85 .pthresh = TX_PTHRESH,
86 .hthresh = TX_HTHRESH,
87 .wthresh = TX_WTHRESH,
89 .tx_free_thresh = 32, /* Use PMD default values */
90 .tx_rs_thresh = 32, /* Use PMD default values */
95 static uint64_t link_mbps;
97 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
99 /* Create Inline IPsec session */
101 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
102 struct rte_security_session **sess, struct rte_security_ctx **ctx,
103 uint32_t *ol_flags, const struct ipsec_test_flags *flags,
104 struct rte_security_session_conf *sess_conf)
106 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
108 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
110 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
111 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
112 struct rte_security_capability_idx sec_cap_idx;
113 const struct rte_security_capability *sec_cap;
114 enum rte_security_ipsec_sa_direction dir;
115 struct rte_security_ctx *sec_ctx;
118 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
119 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
120 sess_conf->ipsec = sa->ipsec_xform;
122 dir = sa->ipsec_xform.direction;
123 verify = flags->tunnel_hdr_verify;
125 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
126 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
128 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
132 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
133 if (sa->ipsec_xform.tunnel.type ==
134 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
135 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
137 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
140 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
141 sess_conf->ipsec.tunnel.ipv4.df = 0;
143 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
144 sess_conf->ipsec.tunnel.ipv4.df = 1;
146 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
147 sess_conf->ipsec.tunnel.ipv4.dscp = 0;
149 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
150 sess_conf->ipsec.tunnel.ipv4.dscp =
153 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
154 sess_conf->ipsec.tunnel.ipv6.dscp = 0;
156 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
157 sess_conf->ipsec.tunnel.ipv6.dscp =
160 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
162 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
167 /* Save SA as userdata for the security session. When
168 * the packet is received, this userdata will be
169 * retrieved using the metadata from the packet.
171 * The PMD is expected to set similar metadata for other
172 * operations, like rte_eth_event, which are tied to
173 * security session. In such cases, the userdata could
174 * be obtained to uniquely identify the security
175 * parameters denoted.
178 sess_conf->userdata = (void *) sa;
180 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
181 if (sec_ctx == NULL) {
182 printf("Ethernet device doesn't support security features.\n");
186 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
187 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
188 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
189 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
190 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
191 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
192 if (sec_cap == NULL) {
193 printf("No capabilities registered\n");
197 if (sa->aead || sa->aes_gmac)
198 memcpy(&sess_conf->ipsec.salt, sa->salt.data,
199 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
201 /* Copy cipher session parameters */
203 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
204 sizeof(struct rte_crypto_sym_xform));
205 sess_conf->crypto_xform->aead.key.data = sa->key.data;
206 /* Verify crypto capabilities */
207 if (test_ipsec_crypto_caps_aead_verify(sec_cap,
208 sess_conf->crypto_xform) != 0) {
210 "Crypto capabilities not supported\n");
214 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
215 rte_memcpy(&sess_conf->crypto_xform->cipher,
216 &sa->xform.chain.cipher.cipher,
217 sizeof(struct rte_crypto_cipher_xform));
219 rte_memcpy(&sess_conf->crypto_xform->next->auth,
220 &sa->xform.chain.auth.auth,
221 sizeof(struct rte_crypto_auth_xform));
222 sess_conf->crypto_xform->cipher.key.data =
224 sess_conf->crypto_xform->next->auth.key.data =
226 /* Verify crypto capabilities */
227 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
228 sess_conf->crypto_xform) != 0) {
230 "Cipher crypto capabilities not supported\n");
234 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
235 sess_conf->crypto_xform->next) != 0) {
237 "Auth crypto capabilities not supported\n");
241 rte_memcpy(&sess_conf->crypto_xform->next->cipher,
242 &sa->xform.chain.cipher.cipher,
243 sizeof(struct rte_crypto_cipher_xform));
244 rte_memcpy(&sess_conf->crypto_xform->auth,
245 &sa->xform.chain.auth.auth,
246 sizeof(struct rte_crypto_auth_xform));
247 sess_conf->crypto_xform->auth.key.data =
249 sess_conf->crypto_xform->next->cipher.key.data =
252 /* Verify crypto capabilities */
253 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
254 sess_conf->crypto_xform->next) != 0) {
256 "Cipher crypto capabilities not supported\n");
260 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
261 sess_conf->crypto_xform) != 0) {
263 "Auth crypto capabilities not supported\n");
269 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
272 if ((sa->ipsec_xform.direction ==
273 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
274 (sa->ipsec_xform.options.iv_gen_disable == 1)) {
275 /* Set env variable when IV generation is disabled */
278 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
280 for (; j < iv_len; j++)
281 len += snprintf(arr+len, sizeof(arr) - len,
282 "0x%x, ", sa->iv.data[j]);
283 setenv("ETH_SEC_IV_OVR", arr, 1);
286 *sess = rte_security_session_create(sec_ctx,
287 sess_conf, sess_pool, sess_priv_pool);
289 printf("SEC Session init failed.\n");
293 *ol_flags = sec_cap->ol_flags;
299 /* Check the link status of all ports in up to 3s, and print them finally */
301 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
303 #define CHECK_INTERVAL 100 /* 100ms */
304 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
306 uint8_t count, all_ports_up, print_flag = 0;
307 struct rte_eth_link link;
309 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
311 printf("Checking link statuses...\n");
313 for (count = 0; count <= MAX_CHECK_TIME; count++) {
315 for (portid = 0; portid < port_num; portid++) {
316 if ((port_mask & (1 << portid)) == 0)
318 memset(&link, 0, sizeof(link));
319 ret = rte_eth_link_get_nowait(portid, &link);
323 printf("Port %u link get failed: %s\n",
324 portid, rte_strerror(-ret));
328 /* print link status if flag set */
329 if (print_flag == 1) {
330 if (link.link_status && link_mbps == 0)
331 link_mbps = link.link_speed;
333 rte_eth_link_to_str(link_status,
334 sizeof(link_status), &link);
335 printf("Port %d %s\n", portid, link_status);
338 /* clear all_ports_up flag if any link down */
339 if (link.link_status == RTE_ETH_LINK_DOWN) {
344 /* after finally printing all link status, get out */
348 if (all_ports_up == 0) {
350 rte_delay_ms(CHECK_INTERVAL);
353 /* set the print_flag if all ports up or timeout */
354 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
360 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
362 char buf[RTE_ETHER_ADDR_FMT_SIZE];
363 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
364 printf("%s%s", name, buf);
368 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
369 struct rte_mbuf *pkt, unsigned int offset)
371 unsigned int copied = 0;
372 unsigned int copy_len;
373 struct rte_mbuf *seg;
377 while (offset >= seg->data_len) {
378 offset -= seg->data_len;
381 copy_len = seg->data_len - offset;
382 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
383 while (len > copy_len) {
384 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
388 seg_buf = rte_pktmbuf_mtod(seg, void *);
390 rte_memcpy(seg_buf, buf + copied, (size_t) len);
393 static inline struct rte_mbuf *
394 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
396 struct rte_mbuf *pkt;
398 pkt = rte_pktmbuf_alloc(mp);
401 if (((data[0] & 0xF0) >> 4) == IPVERSION) {
402 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
403 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
404 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
406 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
407 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
408 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
410 pkt->l2_len = RTE_ETHER_HDR_LEN;
412 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
413 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
415 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
420 init_mempools(unsigned int nb_mbuf)
422 struct rte_security_ctx *sec_ctx;
423 uint16_t nb_sess = 512;
427 if (mbufpool == NULL) {
428 snprintf(s, sizeof(s), "mbuf_pool");
429 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
430 MEMPOOL_CACHE_SIZE, 0,
431 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
432 if (mbufpool == NULL) {
433 printf("Cannot init mbuf pool\n");
436 printf("Allocated mbuf pool\n");
439 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
440 if (sec_ctx == NULL) {
441 printf("Device does not support Security ctx\n");
444 sess_sz = rte_security_session_get_size(sec_ctx);
445 if (sess_pool == NULL) {
446 snprintf(s, sizeof(s), "sess_pool");
447 sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
448 MEMPOOL_CACHE_SIZE, 0,
449 NULL, NULL, NULL, NULL,
451 if (sess_pool == NULL) {
452 printf("Cannot init sess pool\n");
455 printf("Allocated sess pool\n");
457 if (sess_priv_pool == NULL) {
458 snprintf(s, sizeof(s), "sess_priv_pool");
459 sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
460 MEMPOOL_CACHE_SIZE, 0,
461 NULL, NULL, NULL, NULL,
463 if (sess_priv_pool == NULL) {
464 printf("Cannot init sess_priv pool\n");
467 printf("Allocated sess_priv pool\n");
474 create_default_flow(uint16_t portid)
476 struct rte_flow_action action[2];
477 struct rte_flow_item pattern[2];
478 struct rte_flow_attr attr = {0};
479 struct rte_flow_error err;
480 struct rte_flow *flow;
483 /* Add the default rte_flow to enable SECURITY for all ESP packets */
485 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
486 pattern[0].spec = NULL;
487 pattern[0].mask = NULL;
488 pattern[0].last = NULL;
489 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
491 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
492 action[0].conf = NULL;
493 action[1].type = RTE_FLOW_ACTION_TYPE_END;
494 action[1].conf = NULL;
498 ret = rte_flow_validate(portid, &attr, pattern, action, &err);
500 printf("\nValidate flow failed, ret = %d\n", ret);
503 flow = rte_flow_create(portid, &attr, pattern, action, &err);
505 printf("\nDefault flow rule create failed\n");
509 default_flow[portid] = flow;
515 destroy_default_flow(uint16_t portid)
517 struct rte_flow_error err;
520 if (!default_flow[portid])
522 ret = rte_flow_destroy(portid, default_flow[portid], &err);
524 printf("\nDefault flow rule destroy failed\n");
527 default_flow[portid] = NULL;
530 struct rte_mbuf **tx_pkts_burst;
531 struct rte_mbuf **rx_pkts_burst;
534 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
535 struct ipsec_test_data *res_d,
538 const struct ipsec_test_flags *flags)
540 struct rte_security_session_conf sess_conf = {0};
541 struct rte_crypto_sym_xform cipher = {0};
542 struct rte_crypto_sym_xform auth = {0};
543 struct rte_crypto_sym_xform aead = {0};
544 struct rte_security_session *ses;
545 struct rte_security_ctx *ctx;
546 int nb_rx = 0, nb_sent;
550 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
553 sess_conf.crypto_xform = &aead;
555 if (td->ipsec_xform.direction ==
556 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
557 sess_conf.crypto_xform = &cipher;
558 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
559 sess_conf.crypto_xform->next = &auth;
560 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
562 sess_conf.crypto_xform = &auth;
563 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
564 sess_conf.crypto_xform->next = &cipher;
565 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
569 /* Create Inline IPsec session. */
570 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
571 &ol_flags, flags, &sess_conf);
575 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
576 ret = create_default_flow(port_id);
580 for (i = 0; i < nb_pkts; i++) {
581 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
583 if (tx_pkts_burst[i] == NULL) {
585 rte_pktmbuf_free(tx_pkts_burst[i]);
590 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
591 uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
593 rte_pktmbuf_free(tx_pkts_burst[i]);
598 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
599 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
600 rte_security_set_pkt_metadata(ctx, ses,
601 tx_pkts_burst[i], NULL);
602 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
605 /* Send packet to ethdev for inline IPsec processing. */
606 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
607 if (nb_sent != nb_pkts) {
608 printf("\nUnable to TX %d packets", nb_pkts);
609 for ( ; nb_sent < nb_pkts; nb_sent++)
610 rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
617 /* Receive back packet on loopback interface. */
620 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
622 if (nb_rx >= nb_sent)
624 } while (j++ < 5 || nb_rx == 0);
626 if (nb_rx != nb_sent) {
627 printf("\nUnable to RX all %d packets", nb_sent);
629 rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
634 for (i = 0; i < nb_rx; i++) {
635 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
637 ret = test_ipsec_post_process(rx_pkts_burst[i], td,
638 res_d, silent, flags);
639 if (ret != TEST_SUCCESS) {
640 for ( ; i < nb_rx; i++)
641 rte_pktmbuf_free(rx_pkts_burst[i]);
645 ret = test_ipsec_stats_verify(ctx, ses, flags,
646 td->ipsec_xform.direction);
647 if (ret != TEST_SUCCESS) {
648 for ( ; i < nb_rx; i++)
649 rte_pktmbuf_free(rx_pkts_burst[i]);
653 rte_pktmbuf_free(rx_pkts_burst[i]);
654 rx_pkts_burst[i] = NULL;
658 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
659 destroy_default_flow(port_id);
661 /* Destroy session so that other cases can create the session again */
662 rte_security_session_destroy(ctx, ses);
669 ut_setup_inline_ipsec(void)
674 ret = rte_eth_dev_start(port_id);
676 printf("rte_eth_dev_start: err=%d, port=%d\n",
680 /* always enable promiscuous */
681 ret = rte_eth_promiscuous_enable(port_id);
683 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
684 rte_strerror(-ret), port_id);
688 check_all_ports_link_status(1, RTE_PORT_ALL);
694 ut_teardown_inline_ipsec(void)
700 RTE_ETH_FOREACH_DEV(portid) {
701 ret = rte_eth_dev_stop(portid);
703 printf("rte_eth_dev_stop: err=%s, port=%u\n",
704 rte_strerror(-ret), portid);
709 inline_ipsec_testsuite_setup(void)
715 uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
717 printf("Start inline IPsec test.\n");
719 nb_ports = rte_eth_dev_count_avail();
720 if (nb_ports < NB_ETHPORTS_USED) {
721 printf("At least %u port(s) used for test\n",
726 ret = init_mempools(NB_MBUF);
730 if (tx_pkts_burst == NULL) {
731 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
734 RTE_CACHE_LINE_SIZE);
738 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
741 RTE_CACHE_LINE_SIZE);
746 printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
748 nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
749 nb_txd = RTE_TEST_TX_DESC_DEFAULT;
751 /* configuring port 0 for the test is enough */
754 ret = rte_eth_dev_configure(port_id, nb_rx_queue,
755 nb_tx_queue, &port_conf);
757 printf("Cannot configure device: err=%d, port=%d\n",
761 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
763 printf("Cannot get mac address: err=%d, port=%d\n",
767 printf("Port %u ", port_id);
768 print_ethaddr("Address:", &ports_eth_addr[port_id]);
772 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
773 SOCKET_ID_ANY, &tx_conf);
775 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
780 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
783 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
787 test_ipsec_alg_list_populate();
793 inline_ipsec_testsuite_teardown(void)
799 RTE_ETH_FOREACH_DEV(portid) {
800 ret = rte_eth_dev_reset(portid);
802 printf("rte_eth_dev_reset: err=%s, port=%u\n",
803 rte_strerror(-ret), port_id);
808 test_ipsec_inline_proto_known_vec(const void *test_data)
810 struct ipsec_test_data td_outb;
811 struct ipsec_test_flags flags;
813 memset(&flags, 0, sizeof(flags));
815 memcpy(&td_outb, test_data, sizeof(td_outb));
818 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
819 /* Disable IV gen to be able to test with known vectors */
820 td_outb.ipsec_xform.options.iv_gen_disable = 1;
823 return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
828 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
830 const struct ipsec_test_data *td = test_data;
831 struct ipsec_test_flags flags;
832 struct ipsec_test_data td_inb;
834 memset(&flags, 0, sizeof(flags));
836 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
837 test_ipsec_td_in_from_out(td, &td_inb);
839 memcpy(&td_inb, td, sizeof(td_inb));
841 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
845 static struct unit_test_suite inline_ipsec_testsuite = {
846 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
847 .setup = inline_ipsec_testsuite_setup,
848 .teardown = inline_ipsec_testsuite_teardown,
850 TEST_CASE_NAMED_WITH_DATA(
851 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
852 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
853 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
854 TEST_CASE_NAMED_WITH_DATA(
855 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
856 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
857 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
858 TEST_CASE_NAMED_WITH_DATA(
859 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
860 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
861 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
862 TEST_CASE_NAMED_WITH_DATA(
863 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
864 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
865 test_ipsec_inline_proto_known_vec,
866 &pkt_aes_128_cbc_hmac_sha256),
867 TEST_CASE_NAMED_WITH_DATA(
868 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
869 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
870 test_ipsec_inline_proto_known_vec,
871 &pkt_aes_128_cbc_hmac_sha384),
872 TEST_CASE_NAMED_WITH_DATA(
873 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
874 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
875 test_ipsec_inline_proto_known_vec,
876 &pkt_aes_128_cbc_hmac_sha512),
877 TEST_CASE_NAMED_WITH_DATA(
878 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
879 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
880 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
881 TEST_CASE_NAMED_WITH_DATA(
882 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
883 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
884 test_ipsec_inline_proto_known_vec,
885 &pkt_aes_128_cbc_hmac_sha256_v6),
886 TEST_CASE_NAMED_WITH_DATA(
887 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
888 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
889 test_ipsec_inline_proto_known_vec,
891 TEST_CASE_NAMED_WITH_DATA(
892 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
893 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
894 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
895 TEST_CASE_NAMED_WITH_DATA(
896 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
897 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
898 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
899 TEST_CASE_NAMED_WITH_DATA(
900 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
901 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
902 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
903 TEST_CASE_NAMED_WITH_DATA(
904 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
905 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
906 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
907 TEST_CASE_NAMED_WITH_DATA(
908 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
909 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
910 test_ipsec_inline_proto_known_vec_inb,
911 &pkt_aes_128_cbc_hmac_sha256),
912 TEST_CASE_NAMED_WITH_DATA(
913 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
914 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
915 test_ipsec_inline_proto_known_vec_inb,
916 &pkt_aes_128_cbc_hmac_sha384),
917 TEST_CASE_NAMED_WITH_DATA(
918 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
919 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
920 test_ipsec_inline_proto_known_vec_inb,
921 &pkt_aes_128_cbc_hmac_sha512),
922 TEST_CASE_NAMED_WITH_DATA(
923 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
924 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
925 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
926 TEST_CASE_NAMED_WITH_DATA(
927 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
928 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
929 test_ipsec_inline_proto_known_vec_inb,
930 &pkt_aes_128_cbc_hmac_sha256_v6),
931 TEST_CASE_NAMED_WITH_DATA(
932 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
933 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
934 test_ipsec_inline_proto_known_vec_inb,
939 TEST_CASES_END() /**< NULL terminate unit test array */
945 test_inline_ipsec(void)
947 return unit_test_suite_runner(&inline_ipsec_testsuite);
950 #endif /* !RTE_EXEC_ENV_WINDOWS */
952 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);