1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2022 Marvell.
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
14 #include "test_security_inline_proto_vectors.h"
16 #ifdef RTE_EXEC_ENV_WINDOWS
18 test_inline_ipsec(void)
20 printf("Inline ipsec not supported on Windows, skipping test\n");
26 #define NB_ETHPORTS_USED 1
27 #define MEMPOOL_CACHE_SIZE 32
28 #define MAX_PKT_BURST 32
29 #define RTE_TEST_RX_DESC_DEFAULT 1024
30 #define RTE_TEST_TX_DESC_DEFAULT 1024
31 #define RTE_PORT_ALL (~(uint16_t)0x0)
33 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
34 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
35 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
37 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
38 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
39 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
41 #define MAX_TRAFFIC_BURST 2048
44 #define ENCAP_DECAP_BURST_SZ 33
45 #define APP_REASS_TIMEOUT 10
47 extern struct ipsec_test_data pkt_aes_128_gcm;
48 extern struct ipsec_test_data pkt_aes_192_gcm;
49 extern struct ipsec_test_data pkt_aes_256_gcm;
50 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
51 extern struct ipsec_test_data pkt_aes_128_cbc_null;
52 extern struct ipsec_test_data pkt_null_aes_xcbc;
53 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
54 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
56 static struct rte_mempool *mbufpool;
57 static struct rte_mempool *sess_pool;
58 static struct rte_mempool *sess_priv_pool;
59 /* ethernet addresses of ports */
60 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
62 static struct rte_eth_conf port_conf = {
64 .mq_mode = RTE_ETH_MQ_RX_NONE,
66 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
67 RTE_ETH_RX_OFFLOAD_SECURITY,
70 .mq_mode = RTE_ETH_MQ_TX_NONE,
71 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
72 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
74 .lpbk_mode = 1, /* enable loopback */
77 static struct rte_eth_rxconf rx_conf = {
79 .pthresh = RX_PTHRESH,
80 .hthresh = RX_HTHRESH,
81 .wthresh = RX_WTHRESH,
86 static struct rte_eth_txconf tx_conf = {
88 .pthresh = TX_PTHRESH,
89 .hthresh = TX_HTHRESH,
90 .wthresh = TX_WTHRESH,
92 .tx_free_thresh = 32, /* Use PMD default values */
93 .tx_rs_thresh = 32, /* Use PMD default values */
98 static uint64_t link_mbps;
100 static int ip_reassembly_dynfield_offset = -1;
102 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
104 /* Create Inline IPsec session */
106 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
107 struct rte_security_session **sess, struct rte_security_ctx **ctx,
108 uint32_t *ol_flags, const struct ipsec_test_flags *flags,
109 struct rte_security_session_conf *sess_conf)
111 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
113 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
115 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
116 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
117 struct rte_security_capability_idx sec_cap_idx;
118 const struct rte_security_capability *sec_cap;
119 enum rte_security_ipsec_sa_direction dir;
120 struct rte_security_ctx *sec_ctx;
123 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
124 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
125 sess_conf->ipsec = sa->ipsec_xform;
127 dir = sa->ipsec_xform.direction;
128 verify = flags->tunnel_hdr_verify;
130 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
131 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
133 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
137 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
138 if (sa->ipsec_xform.tunnel.type ==
139 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
140 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
142 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
145 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
146 sess_conf->ipsec.tunnel.ipv4.df = 0;
148 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
149 sess_conf->ipsec.tunnel.ipv4.df = 1;
151 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
152 sess_conf->ipsec.tunnel.ipv4.dscp = 0;
154 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
155 sess_conf->ipsec.tunnel.ipv4.dscp =
158 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
159 sess_conf->ipsec.tunnel.ipv6.dscp = 0;
161 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
162 sess_conf->ipsec.tunnel.ipv6.dscp =
165 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
167 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
172 /* Save SA as userdata for the security session. When
173 * the packet is received, this userdata will be
174 * retrieved using the metadata from the packet.
176 * The PMD is expected to set similar metadata for other
177 * operations, like rte_eth_event, which are tied to
178 * security session. In such cases, the userdata could
179 * be obtained to uniquely identify the security
180 * parameters denoted.
183 sess_conf->userdata = (void *) sa;
185 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
186 if (sec_ctx == NULL) {
187 printf("Ethernet device doesn't support security features.\n");
191 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
192 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
193 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
194 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
195 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
196 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
197 if (sec_cap == NULL) {
198 printf("No capabilities registered\n");
202 if (sa->aead || sa->aes_gmac)
203 memcpy(&sess_conf->ipsec.salt, sa->salt.data,
204 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
206 /* Copy cipher session parameters */
208 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
209 sizeof(struct rte_crypto_sym_xform));
210 sess_conf->crypto_xform->aead.key.data = sa->key.data;
211 /* Verify crypto capabilities */
212 if (test_ipsec_crypto_caps_aead_verify(sec_cap,
213 sess_conf->crypto_xform) != 0) {
215 "Crypto capabilities not supported\n");
219 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
220 rte_memcpy(&sess_conf->crypto_xform->cipher,
221 &sa->xform.chain.cipher.cipher,
222 sizeof(struct rte_crypto_cipher_xform));
224 rte_memcpy(&sess_conf->crypto_xform->next->auth,
225 &sa->xform.chain.auth.auth,
226 sizeof(struct rte_crypto_auth_xform));
227 sess_conf->crypto_xform->cipher.key.data =
229 sess_conf->crypto_xform->next->auth.key.data =
231 /* Verify crypto capabilities */
232 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
233 sess_conf->crypto_xform) != 0) {
235 "Cipher crypto capabilities not supported\n");
239 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
240 sess_conf->crypto_xform->next) != 0) {
242 "Auth crypto capabilities not supported\n");
246 rte_memcpy(&sess_conf->crypto_xform->next->cipher,
247 &sa->xform.chain.cipher.cipher,
248 sizeof(struct rte_crypto_cipher_xform));
249 rte_memcpy(&sess_conf->crypto_xform->auth,
250 &sa->xform.chain.auth.auth,
251 sizeof(struct rte_crypto_auth_xform));
252 sess_conf->crypto_xform->auth.key.data =
254 sess_conf->crypto_xform->next->cipher.key.data =
257 /* Verify crypto capabilities */
258 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
259 sess_conf->crypto_xform->next) != 0) {
261 "Cipher crypto capabilities not supported\n");
265 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
266 sess_conf->crypto_xform) != 0) {
268 "Auth crypto capabilities not supported\n");
274 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
277 if ((sa->ipsec_xform.direction ==
278 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
279 (sa->ipsec_xform.options.iv_gen_disable == 1)) {
280 /* Set env variable when IV generation is disabled */
283 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
285 for (; j < iv_len; j++)
286 len += snprintf(arr+len, sizeof(arr) - len,
287 "0x%x, ", sa->iv.data[j]);
288 setenv("ETH_SEC_IV_OVR", arr, 1);
291 *sess = rte_security_session_create(sec_ctx,
292 sess_conf, sess_pool, sess_priv_pool);
294 printf("SEC Session init failed.\n");
298 *ol_flags = sec_cap->ol_flags;
304 /* Check the link status of all ports in up to 3s, and print them finally */
306 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
308 #define CHECK_INTERVAL 100 /* 100ms */
309 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
311 uint8_t count, all_ports_up, print_flag = 0;
312 struct rte_eth_link link;
314 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
316 printf("Checking link statuses...\n");
318 for (count = 0; count <= MAX_CHECK_TIME; count++) {
320 for (portid = 0; portid < port_num; portid++) {
321 if ((port_mask & (1 << portid)) == 0)
323 memset(&link, 0, sizeof(link));
324 ret = rte_eth_link_get_nowait(portid, &link);
328 printf("Port %u link get failed: %s\n",
329 portid, rte_strerror(-ret));
333 /* print link status if flag set */
334 if (print_flag == 1) {
335 if (link.link_status && link_mbps == 0)
336 link_mbps = link.link_speed;
338 rte_eth_link_to_str(link_status,
339 sizeof(link_status), &link);
340 printf("Port %d %s\n", portid, link_status);
343 /* clear all_ports_up flag if any link down */
344 if (link.link_status == RTE_ETH_LINK_DOWN) {
349 /* after finally printing all link status, get out */
353 if (all_ports_up == 0) {
355 rte_delay_ms(CHECK_INTERVAL);
358 /* set the print_flag if all ports up or timeout */
359 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
365 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
367 char buf[RTE_ETHER_ADDR_FMT_SIZE];
368 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
369 printf("%s%s", name, buf);
373 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
374 struct rte_mbuf *pkt, unsigned int offset)
376 unsigned int copied = 0;
377 unsigned int copy_len;
378 struct rte_mbuf *seg;
382 while (offset >= seg->data_len) {
383 offset -= seg->data_len;
386 copy_len = seg->data_len - offset;
387 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
388 while (len > copy_len) {
389 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
393 seg_buf = rte_pktmbuf_mtod(seg, void *);
395 rte_memcpy(seg_buf, buf + copied, (size_t) len);
398 static inline struct rte_mbuf *
399 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
401 struct rte_mbuf *pkt;
403 pkt = rte_pktmbuf_alloc(mp);
406 if (((data[0] & 0xF0) >> 4) == IPVERSION) {
407 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
408 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
409 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
411 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
412 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
413 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
415 pkt->l2_len = RTE_ETHER_HDR_LEN;
417 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
418 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
420 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
425 init_mempools(unsigned int nb_mbuf)
427 struct rte_security_ctx *sec_ctx;
428 uint16_t nb_sess = 512;
432 if (mbufpool == NULL) {
433 snprintf(s, sizeof(s), "mbuf_pool");
434 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
435 MEMPOOL_CACHE_SIZE, 0,
436 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
437 if (mbufpool == NULL) {
438 printf("Cannot init mbuf pool\n");
441 printf("Allocated mbuf pool\n");
444 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
445 if (sec_ctx == NULL) {
446 printf("Device does not support Security ctx\n");
449 sess_sz = rte_security_session_get_size(sec_ctx);
450 if (sess_pool == NULL) {
451 snprintf(s, sizeof(s), "sess_pool");
452 sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
453 MEMPOOL_CACHE_SIZE, 0,
454 NULL, NULL, NULL, NULL,
456 if (sess_pool == NULL) {
457 printf("Cannot init sess pool\n");
460 printf("Allocated sess pool\n");
462 if (sess_priv_pool == NULL) {
463 snprintf(s, sizeof(s), "sess_priv_pool");
464 sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
465 MEMPOOL_CACHE_SIZE, 0,
466 NULL, NULL, NULL, NULL,
468 if (sess_priv_pool == NULL) {
469 printf("Cannot init sess_priv pool\n");
472 printf("Allocated sess_priv pool\n");
479 create_default_flow(uint16_t portid)
481 struct rte_flow_action action[2];
482 struct rte_flow_item pattern[2];
483 struct rte_flow_attr attr = {0};
484 struct rte_flow_error err;
485 struct rte_flow *flow;
488 /* Add the default rte_flow to enable SECURITY for all ESP packets */
490 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
491 pattern[0].spec = NULL;
492 pattern[0].mask = NULL;
493 pattern[0].last = NULL;
494 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
496 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
497 action[0].conf = NULL;
498 action[1].type = RTE_FLOW_ACTION_TYPE_END;
499 action[1].conf = NULL;
503 ret = rte_flow_validate(portid, &attr, pattern, action, &err);
505 printf("\nValidate flow failed, ret = %d\n", ret);
508 flow = rte_flow_create(portid, &attr, pattern, action, &err);
510 printf("\nDefault flow rule create failed\n");
514 default_flow[portid] = flow;
520 destroy_default_flow(uint16_t portid)
522 struct rte_flow_error err;
525 if (!default_flow[portid])
527 ret = rte_flow_destroy(portid, default_flow[portid], &err);
529 printf("\nDefault flow rule destroy failed\n");
532 default_flow[portid] = NULL;
535 struct rte_mbuf **tx_pkts_burst;
536 struct rte_mbuf **rx_pkts_burst;
539 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
542 unsigned int nb_segs = m->nb_segs;
543 unsigned int matched = 0;
544 struct rte_mbuf *save = m;
548 if (len > m->data_len)
551 if (memcmp(rte_pktmbuf_mtod(m, char *),
552 ref + matched, len)) {
553 printf("\n====Reassembly case failed: Data Mismatch");
554 rte_hexdump(stdout, "Reassembled",
555 rte_pktmbuf_mtod(m, char *),
557 rte_hexdump(stdout, "reference",
569 printf("\n====Reassembly case failed: Data Missing %u",
571 printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
572 rte_pktmbuf_dump(stderr, save, -1);
579 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
581 static uint64_t ip_reassembly_dynflag;
582 int ip_reassembly_dynflag_offset;
584 if (ip_reassembly_dynflag == 0) {
585 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
586 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
587 if (ip_reassembly_dynflag_offset < 0)
589 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
592 return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
596 free_mbuf(struct rte_mbuf *mbuf)
598 rte_eth_ip_reassembly_dynfield_t dynfield;
603 if (!is_ip_reassembly_incomplete(mbuf)) {
604 rte_pktmbuf_free(mbuf);
606 if (ip_reassembly_dynfield_offset < 0)
610 dynfield = *RTE_MBUF_DYNFIELD(mbuf,
611 ip_reassembly_dynfield_offset,
612 rte_eth_ip_reassembly_dynfield_t *);
613 rte_pktmbuf_free(mbuf);
614 mbuf = dynfield.next_frag;
621 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
622 struct reassembly_vector *vector)
624 rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
627 * IP reassembly offload is incomplete, and fragments are listed in
628 * dynfield which can be reassembled in SW.
630 printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
631 "\nMatching with original frags.");
633 if (ip_reassembly_dynfield_offset < 0)
636 printf("\ncomparing frag: %d", j);
637 /* Skip Ethernet header comparison */
638 rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
639 ret = compare_pkt_data(mbuf, vector->frags[j]->data,
640 vector->frags[j]->len);
644 dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
645 rte_eth_ip_reassembly_dynfield_t *);
646 printf("\ncomparing frag: %d", j);
647 /* Skip Ethernet header comparison */
648 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
649 ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
650 vector->frags[j]->len);
654 while ((dynfield[j]->nb_frags > 1) &&
655 is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
657 dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
658 ip_reassembly_dynfield_offset,
659 rte_eth_ip_reassembly_dynfield_t *);
660 printf("\ncomparing frag: %d", j);
661 /* Skip Ethernet header comparison */
662 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
663 ret = compare_pkt_data(dynfield[j]->next_frag,
664 vector->frags[j]->data, vector->frags[j]->len);
672 test_ipsec_with_reassembly(struct reassembly_vector *vector,
673 const struct ipsec_test_flags *flags)
675 struct rte_security_session *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
676 struct rte_security_session *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
677 struct rte_eth_ip_reassembly_params reass_capa = {0};
678 struct rte_security_session_conf sess_conf_out = {0};
679 struct rte_security_session_conf sess_conf_in = {0};
680 unsigned int nb_tx, burst_sz, nb_sent = 0;
681 struct rte_crypto_sym_xform cipher_out = {0};
682 struct rte_crypto_sym_xform auth_out = {0};
683 struct rte_crypto_sym_xform aead_out = {0};
684 struct rte_crypto_sym_xform cipher_in = {0};
685 struct rte_crypto_sym_xform auth_in = {0};
686 struct rte_crypto_sym_xform aead_in = {0};
687 struct ipsec_test_data sa_data;
688 struct rte_security_ctx *ctx;
689 unsigned int i, nb_rx = 0, j;
693 burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
694 nb_tx = vector->nb_frags * burst_sz;
696 rte_eth_dev_stop(port_id);
698 printf("rte_eth_dev_stop: err=%s, port=%u\n",
699 rte_strerror(-ret), port_id);
702 rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
703 if (reass_capa.max_frags < vector->nb_frags)
705 if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
706 reass_capa.timeout_ms = APP_REASS_TIMEOUT;
707 rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
710 ret = rte_eth_dev_start(port_id);
712 printf("rte_eth_dev_start: err=%d, port=%d\n",
717 memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
718 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
720 for (i = 0; i < nb_tx; i += vector->nb_frags) {
721 for (j = 0; j < vector->nb_frags; j++) {
722 tx_pkts_burst[i+j] = init_packet(mbufpool,
723 vector->frags[j]->data,
724 vector->frags[j]->len);
725 if (tx_pkts_burst[i+j] == NULL) {
727 printf("\n packed init failed\n");
733 for (i = 0; i < burst_sz; i++) {
734 memcpy(&sa_data, vector->sa_data,
735 sizeof(struct ipsec_test_data));
736 /* Update SPI for every new SA */
737 sa_data.ipsec_xform.spi += i;
738 sa_data.ipsec_xform.direction =
739 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
741 sess_conf_out.crypto_xform = &aead_out;
743 sess_conf_out.crypto_xform = &cipher_out;
744 sess_conf_out.crypto_xform->next = &auth_out;
747 /* Create Inline IPsec outbound session. */
748 ret = create_inline_ipsec_session(&sa_data, port_id,
749 &out_ses[i], &ctx, &ol_flags, flags,
752 printf("\nInline outbound session create failed\n");
758 for (i = 0; i < nb_tx; i++) {
759 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
760 rte_security_set_pkt_metadata(ctx,
761 out_ses[j], tx_pkts_burst[i], NULL);
762 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
764 /* Move to next SA after nb_frags */
765 if ((i + 1) % vector->nb_frags == 0)
769 for (i = 0; i < burst_sz; i++) {
770 memcpy(&sa_data, vector->sa_data,
771 sizeof(struct ipsec_test_data));
772 /* Update SPI for every new SA */
773 sa_data.ipsec_xform.spi += i;
774 sa_data.ipsec_xform.direction =
775 RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
778 sess_conf_in.crypto_xform = &aead_in;
780 sess_conf_in.crypto_xform = &auth_in;
781 sess_conf_in.crypto_xform->next = &cipher_in;
783 /* Create Inline IPsec inbound session. */
784 ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
785 &ctx, &ol_flags, flags, &sess_conf_in);
787 printf("\nInline inbound session create failed\n");
792 /* Retrieve reassembly dynfield offset if available */
793 if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
794 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
795 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
798 ret = create_default_flow(port_id);
802 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
803 if (nb_sent != nb_tx) {
805 printf("\nFailed to tx %u pkts", nb_tx);
811 /* Retry few times before giving up */
815 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
821 } while (j < 5 || !nb_rx);
823 /* Check for minimum number of Rx packets expected */
824 if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
825 (vector->nb_frags > 1 && nb_rx < burst_sz)) {
826 printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
831 for (i = 0; i < nb_rx; i++) {
832 if (vector->nb_frags > 1 &&
833 is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
834 ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
836 if (ret != TEST_SUCCESS)
841 if (rx_pkts_burst[i]->ol_flags &
842 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
843 !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
844 printf("\nsecurity offload failed\n");
849 if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
850 rx_pkts_burst[i]->pkt_len) {
851 printf("\nreassembled/decrypted packet length mismatch\n");
855 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
856 ret = compare_pkt_data(rx_pkts_burst[i],
857 vector->full_pkt->data,
858 vector->full_pkt->len);
859 if (ret != TEST_SUCCESS)
864 destroy_default_flow(port_id);
866 /* Clear session data. */
867 for (i = 0; i < burst_sz; i++) {
869 rte_security_session_destroy(ctx, out_ses[i]);
871 rte_security_session_destroy(ctx, in_ses[i]);
874 for (i = nb_sent; i < nb_tx; i++)
875 free_mbuf(tx_pkts_burst[i]);
876 for (i = 0; i < nb_rx; i++)
877 free_mbuf(rx_pkts_burst[i]);
882 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
883 struct ipsec_test_data *res_d,
886 const struct ipsec_test_flags *flags)
888 struct rte_security_session_conf sess_conf = {0};
889 struct rte_crypto_sym_xform cipher = {0};
890 struct rte_crypto_sym_xform auth = {0};
891 struct rte_crypto_sym_xform aead = {0};
892 struct rte_security_session *ses;
893 struct rte_security_ctx *ctx;
894 int nb_rx = 0, nb_sent;
898 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
901 sess_conf.crypto_xform = &aead;
903 if (td->ipsec_xform.direction ==
904 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
905 sess_conf.crypto_xform = &cipher;
906 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
907 sess_conf.crypto_xform->next = &auth;
908 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
910 sess_conf.crypto_xform = &auth;
911 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
912 sess_conf.crypto_xform->next = &cipher;
913 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
917 /* Create Inline IPsec session. */
918 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
919 &ol_flags, flags, &sess_conf);
923 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
924 ret = create_default_flow(port_id);
928 for (i = 0; i < nb_pkts; i++) {
929 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
931 if (tx_pkts_burst[i] == NULL) {
933 rte_pktmbuf_free(tx_pkts_burst[i]);
938 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
939 uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
941 rte_pktmbuf_free(tx_pkts_burst[i]);
946 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
947 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
948 rte_security_set_pkt_metadata(ctx, ses,
949 tx_pkts_burst[i], NULL);
950 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
953 /* Send packet to ethdev for inline IPsec processing. */
954 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
955 if (nb_sent != nb_pkts) {
956 printf("\nUnable to TX %d packets", nb_pkts);
957 for ( ; nb_sent < nb_pkts; nb_sent++)
958 rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
965 /* Receive back packet on loopback interface. */
968 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
970 if (nb_rx >= nb_sent)
972 } while (j++ < 5 || nb_rx == 0);
974 if (nb_rx != nb_sent) {
975 printf("\nUnable to RX all %d packets", nb_sent);
977 rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
982 for (i = 0; i < nb_rx; i++) {
983 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
985 ret = test_ipsec_post_process(rx_pkts_burst[i], td,
986 res_d, silent, flags);
987 if (ret != TEST_SUCCESS) {
988 for ( ; i < nb_rx; i++)
989 rte_pktmbuf_free(rx_pkts_burst[i]);
993 ret = test_ipsec_stats_verify(ctx, ses, flags,
994 td->ipsec_xform.direction);
995 if (ret != TEST_SUCCESS) {
996 for ( ; i < nb_rx; i++)
997 rte_pktmbuf_free(rx_pkts_burst[i]);
1001 rte_pktmbuf_free(rx_pkts_burst[i]);
1002 rx_pkts_burst[i] = NULL;
1006 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1007 destroy_default_flow(port_id);
1009 /* Destroy session so that other cases can create the session again */
1010 rte_security_session_destroy(ctx, ses);
1017 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1019 struct ipsec_test_data td_outb;
1020 struct ipsec_test_data td_inb;
1021 unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1024 if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1025 flags->sa_expiry_pkts_hard)
1026 nb_pkts = IPSEC_TEST_PACKETS_MAX;
1028 for (i = 0; i < RTE_DIM(alg_list); i++) {
1029 test_ipsec_td_prepare(alg_list[i].param1,
1031 flags, &td_outb, 1);
1033 if (!td_outb.aead) {
1034 enum rte_crypto_cipher_algorithm cipher_alg;
1035 enum rte_crypto_auth_algorithm auth_alg;
1037 cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1038 auth_alg = td_outb.xform.chain.auth.auth.algo;
1040 if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1043 /* ICV is not applicable for NULL auth */
1044 if (flags->icv_corrupt &&
1045 auth_alg == RTE_CRYPTO_AUTH_NULL)
1048 /* IV is not applicable for NULL cipher */
1049 if (flags->iv_gen &&
1050 cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1054 if (flags->udp_encap)
1055 td_outb.ipsec_xform.options.udp_encap = 1;
1057 ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1059 if (ret == TEST_SKIPPED)
1062 if (ret == TEST_FAILED) {
1063 printf("\n TEST FAILED");
1064 test_ipsec_display_alg(alg_list[i].param1,
1065 alg_list[i].param2);
1070 test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1072 ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1074 if (ret == TEST_SKIPPED)
1077 if (ret == TEST_FAILED) {
1078 printf("\n TEST FAILED");
1079 test_ipsec_display_alg(alg_list[i].param1,
1080 alg_list[i].param2);
1085 if (flags->display_alg)
1086 test_ipsec_display_alg(alg_list[i].param1,
1087 alg_list[i].param2);
1092 printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1096 return TEST_SUCCESS;
1098 return TEST_SKIPPED;
1103 ut_setup_inline_ipsec(void)
1108 ret = rte_eth_dev_start(port_id);
1110 printf("rte_eth_dev_start: err=%d, port=%d\n",
1114 /* always enable promiscuous */
1115 ret = rte_eth_promiscuous_enable(port_id);
1117 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1118 rte_strerror(-ret), port_id);
1122 check_all_ports_link_status(1, RTE_PORT_ALL);
1128 ut_teardown_inline_ipsec(void)
1130 struct rte_eth_ip_reassembly_params reass_conf = {0};
1134 /* port tear down */
1135 RTE_ETH_FOREACH_DEV(portid) {
1136 ret = rte_eth_dev_stop(portid);
1138 printf("rte_eth_dev_stop: err=%s, port=%u\n",
1139 rte_strerror(-ret), portid);
1141 /* Clear reassembly configuration */
1142 rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1147 inline_ipsec_testsuite_setup(void)
1153 uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1155 printf("Start inline IPsec test.\n");
1157 nb_ports = rte_eth_dev_count_avail();
1158 if (nb_ports < NB_ETHPORTS_USED) {
1159 printf("At least %u port(s) used for test\n",
1161 return TEST_SKIPPED;
1164 ret = init_mempools(NB_MBUF);
1168 if (tx_pkts_burst == NULL) {
1169 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1172 RTE_CACHE_LINE_SIZE);
1176 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1179 RTE_CACHE_LINE_SIZE);
1184 printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1186 nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
1187 nb_txd = RTE_TEST_TX_DESC_DEFAULT;
1189 /* configuring port 0 for the test is enough */
1191 /* port configure */
1192 ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1193 nb_tx_queue, &port_conf);
1195 printf("Cannot configure device: err=%d, port=%d\n",
1199 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1201 printf("Cannot get mac address: err=%d, port=%d\n",
1205 printf("Port %u ", port_id);
1206 print_ethaddr("Address:", &ports_eth_addr[port_id]);
1209 /* tx queue setup */
1210 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1211 SOCKET_ID_ANY, &tx_conf);
1213 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1217 /* rx queue steup */
1218 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1219 &rx_conf, mbufpool);
1221 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1225 test_ipsec_alg_list_populate();
1231 inline_ipsec_testsuite_teardown(void)
1236 /* port tear down */
1237 RTE_ETH_FOREACH_DEV(portid) {
1238 ret = rte_eth_dev_reset(portid);
1240 printf("rte_eth_dev_reset: err=%s, port=%u\n",
1241 rte_strerror(-ret), port_id);
1246 test_inline_ip_reassembly(const void *testdata)
1248 struct reassembly_vector reassembly_td = {0};
1249 const struct reassembly_vector *td = testdata;
1250 struct ip_reassembly_test_packet full_pkt;
1251 struct ip_reassembly_test_packet frags[MAX_FRAGS];
1252 struct ipsec_test_flags flags = {0};
1255 reassembly_td.sa_data = td->sa_data;
1256 reassembly_td.nb_frags = td->nb_frags;
1257 reassembly_td.burst = td->burst;
1259 memcpy(&full_pkt, td->full_pkt,
1260 sizeof(struct ip_reassembly_test_packet));
1261 reassembly_td.full_pkt = &full_pkt;
1263 test_vector_payload_populate(reassembly_td.full_pkt, true);
1264 for (; i < reassembly_td.nb_frags; i++) {
1265 memcpy(&frags[i], td->frags[i],
1266 sizeof(struct ip_reassembly_test_packet));
1267 reassembly_td.frags[i] = &frags[i];
1268 test_vector_payload_populate(reassembly_td.frags[i],
1269 (i == 0) ? true : false);
1272 return test_ipsec_with_reassembly(&reassembly_td, &flags);
1276 test_ipsec_inline_proto_known_vec(const void *test_data)
1278 struct ipsec_test_data td_outb;
1279 struct ipsec_test_flags flags;
1281 memset(&flags, 0, sizeof(flags));
1283 memcpy(&td_outb, test_data, sizeof(td_outb));
1286 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1287 /* Disable IV gen to be able to test with known vectors */
1288 td_outb.ipsec_xform.options.iv_gen_disable = 1;
1291 return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1296 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1298 const struct ipsec_test_data *td = test_data;
1299 struct ipsec_test_flags flags;
1300 struct ipsec_test_data td_inb;
1302 memset(&flags, 0, sizeof(flags));
1304 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1305 test_ipsec_td_in_from_out(td, &td_inb);
1307 memcpy(&td_inb, td, sizeof(td_inb));
1309 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1313 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1315 struct ipsec_test_flags flags;
1317 memset(&flags, 0, sizeof(flags));
1319 flags.display_alg = true;
1321 return test_ipsec_inline_proto_all(&flags);
1325 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
1327 struct ipsec_test_flags flags;
1329 memset(&flags, 0, sizeof(flags));
1331 flags.udp_encap = true;
1333 return test_ipsec_inline_proto_all(&flags);
1337 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
1339 struct ipsec_test_flags flags;
1341 memset(&flags, 0, sizeof(flags));
1343 flags.udp_encap = true;
1344 flags.udp_ports_verify = true;
1346 return test_ipsec_inline_proto_all(&flags);
1350 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
1352 struct ipsec_test_flags flags;
1354 memset(&flags, 0, sizeof(flags));
1356 flags.icv_corrupt = true;
1358 return test_ipsec_inline_proto_all(&flags);
1362 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
1364 struct ipsec_test_flags flags;
1366 memset(&flags, 0, sizeof(flags));
1368 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
1370 return test_ipsec_inline_proto_all(&flags);
1374 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
1376 struct ipsec_test_flags flags;
1378 memset(&flags, 0, sizeof(flags));
1380 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
1382 return test_ipsec_inline_proto_all(&flags);
1386 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
1388 struct ipsec_test_flags flags;
1390 memset(&flags, 0, sizeof(flags));
1392 flags.ip_csum = true;
1394 return test_ipsec_inline_proto_all(&flags);
1398 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
1400 struct ipsec_test_flags flags;
1402 memset(&flags, 0, sizeof(flags));
1404 flags.l4_csum = true;
1406 return test_ipsec_inline_proto_all(&flags);
1410 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
1412 struct ipsec_test_flags flags;
1414 memset(&flags, 0, sizeof(flags));
1417 flags.tunnel_ipv6 = false;
1419 return test_ipsec_inline_proto_all(&flags);
1423 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
1425 struct ipsec_test_flags flags;
1427 memset(&flags, 0, sizeof(flags));
1430 flags.tunnel_ipv6 = true;
1432 return test_ipsec_inline_proto_all(&flags);
1436 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
1438 struct ipsec_test_flags flags;
1440 memset(&flags, 0, sizeof(flags));
1443 flags.tunnel_ipv6 = true;
1445 return test_ipsec_inline_proto_all(&flags);
1449 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
1451 struct ipsec_test_flags flags;
1453 memset(&flags, 0, sizeof(flags));
1456 flags.tunnel_ipv6 = false;
1458 return test_ipsec_inline_proto_all(&flags);
1462 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
1464 struct ipsec_test_flags flags;
1466 memset(&flags, 0, sizeof(flags));
1469 flags.transport = true;
1471 return test_ipsec_inline_proto_all(&flags);
1475 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
1477 struct ipsec_test_flags flags = {
1482 return test_ipsec_inline_proto_all(&flags);
1486 test_ipsec_inline_proto_stats(const void *data __rte_unused)
1488 struct ipsec_test_flags flags;
1490 memset(&flags, 0, sizeof(flags));
1492 flags.stats_success = true;
1494 return test_ipsec_inline_proto_all(&flags);
1498 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
1500 struct ipsec_test_flags flags;
1502 memset(&flags, 0, sizeof(flags));
1504 flags.fragment = true;
1506 return test_ipsec_inline_proto_all(&flags);
1511 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
1513 struct ipsec_test_flags flags;
1515 memset(&flags, 0, sizeof(flags));
1517 flags.df = TEST_IPSEC_COPY_DF_INNER_0;
1519 return test_ipsec_inline_proto_all(&flags);
1523 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
1525 struct ipsec_test_flags flags;
1527 memset(&flags, 0, sizeof(flags));
1529 flags.df = TEST_IPSEC_COPY_DF_INNER_1;
1531 return test_ipsec_inline_proto_all(&flags);
1535 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
1537 struct ipsec_test_flags flags;
1539 memset(&flags, 0, sizeof(flags));
1541 flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
1543 return test_ipsec_inline_proto_all(&flags);
1547 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
1549 struct ipsec_test_flags flags;
1551 memset(&flags, 0, sizeof(flags));
1553 flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
1555 return test_ipsec_inline_proto_all(&flags);
1559 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
1561 struct ipsec_test_flags flags;
1563 memset(&flags, 0, sizeof(flags));
1565 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1567 return test_ipsec_inline_proto_all(&flags);
1571 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
1573 struct ipsec_test_flags flags;
1575 memset(&flags, 0, sizeof(flags));
1577 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1579 return test_ipsec_inline_proto_all(&flags);
1583 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
1585 struct ipsec_test_flags flags;
1587 memset(&flags, 0, sizeof(flags));
1589 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1591 return test_ipsec_inline_proto_all(&flags);
1595 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
1597 struct ipsec_test_flags flags;
1599 memset(&flags, 0, sizeof(flags));
1601 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1603 return test_ipsec_inline_proto_all(&flags);
1607 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
1609 struct ipsec_test_flags flags;
1611 memset(&flags, 0, sizeof(flags));
1614 flags.tunnel_ipv6 = true;
1615 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1617 return test_ipsec_inline_proto_all(&flags);
1621 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
1623 struct ipsec_test_flags flags;
1625 memset(&flags, 0, sizeof(flags));
1628 flags.tunnel_ipv6 = true;
1629 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1631 return test_ipsec_inline_proto_all(&flags);
1635 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
1637 struct ipsec_test_flags flags;
1639 memset(&flags, 0, sizeof(flags));
1642 flags.tunnel_ipv6 = true;
1643 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1645 return test_ipsec_inline_proto_all(&flags);
1649 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
1651 struct ipsec_test_flags flags;
1653 memset(&flags, 0, sizeof(flags));
1656 flags.tunnel_ipv6 = true;
1657 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1659 return test_ipsec_inline_proto_all(&flags);
1663 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
1665 struct ipsec_test_flags flags = {
1666 .dec_ttl_or_hop_limit = true
1669 return test_ipsec_inline_proto_all(&flags);
1673 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
1675 struct ipsec_test_flags flags = {
1677 .dec_ttl_or_hop_limit = true
1680 return test_ipsec_inline_proto_all(&flags);
1684 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
1686 struct ipsec_test_flags flags;
1688 memset(&flags, 0, sizeof(flags));
1690 flags.iv_gen = true;
1692 return test_ipsec_inline_proto_all(&flags);
1696 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
1698 struct ipsec_test_data td_outb;
1699 struct ipsec_test_flags flags;
1701 memset(&flags, 0, sizeof(flags));
1702 flags.fragment = true;
1704 memcpy(&td_outb, test_data, sizeof(td_outb));
1706 /* Disable IV gen to be able to test with known vectors */
1707 td_outb.ipsec_xform.options.iv_gen_disable = 1;
1709 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
1712 static struct unit_test_suite inline_ipsec_testsuite = {
1713 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
1714 .setup = inline_ipsec_testsuite_setup,
1715 .teardown = inline_ipsec_testsuite_teardown,
1716 .unit_test_cases = {
1717 TEST_CASE_NAMED_WITH_DATA(
1718 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
1719 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1720 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
1721 TEST_CASE_NAMED_WITH_DATA(
1722 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
1723 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1724 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
1725 TEST_CASE_NAMED_WITH_DATA(
1726 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
1727 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1728 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
1729 TEST_CASE_NAMED_WITH_DATA(
1730 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1731 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1732 test_ipsec_inline_proto_known_vec,
1733 &pkt_aes_128_cbc_hmac_sha256),
1734 TEST_CASE_NAMED_WITH_DATA(
1735 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
1736 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1737 test_ipsec_inline_proto_known_vec,
1738 &pkt_aes_128_cbc_hmac_sha384),
1739 TEST_CASE_NAMED_WITH_DATA(
1740 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
1741 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1742 test_ipsec_inline_proto_known_vec,
1743 &pkt_aes_128_cbc_hmac_sha512),
1744 TEST_CASE_NAMED_WITH_DATA(
1745 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
1746 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1747 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
1748 TEST_CASE_NAMED_WITH_DATA(
1749 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1750 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1751 test_ipsec_inline_proto_known_vec,
1752 &pkt_aes_128_cbc_hmac_sha256_v6),
1753 TEST_CASE_NAMED_WITH_DATA(
1754 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
1755 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1756 test_ipsec_inline_proto_known_vec,
1757 &pkt_null_aes_xcbc),
1759 TEST_CASE_NAMED_WITH_DATA(
1760 "Outbound fragmented packet",
1761 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1762 test_ipsec_inline_proto_known_vec_fragmented,
1763 &pkt_aes_128_gcm_frag),
1765 TEST_CASE_NAMED_WITH_DATA(
1766 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
1767 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1768 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
1769 TEST_CASE_NAMED_WITH_DATA(
1770 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
1771 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1772 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
1773 TEST_CASE_NAMED_WITH_DATA(
1774 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
1775 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1776 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
1777 TEST_CASE_NAMED_WITH_DATA(
1778 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
1779 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1780 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
1781 TEST_CASE_NAMED_WITH_DATA(
1782 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1783 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1784 test_ipsec_inline_proto_known_vec_inb,
1785 &pkt_aes_128_cbc_hmac_sha256),
1786 TEST_CASE_NAMED_WITH_DATA(
1787 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
1788 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1789 test_ipsec_inline_proto_known_vec_inb,
1790 &pkt_aes_128_cbc_hmac_sha384),
1791 TEST_CASE_NAMED_WITH_DATA(
1792 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
1793 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1794 test_ipsec_inline_proto_known_vec_inb,
1795 &pkt_aes_128_cbc_hmac_sha512),
1796 TEST_CASE_NAMED_WITH_DATA(
1797 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
1798 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1799 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
1800 TEST_CASE_NAMED_WITH_DATA(
1801 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
1802 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1803 test_ipsec_inline_proto_known_vec_inb,
1804 &pkt_aes_128_cbc_hmac_sha256_v6),
1805 TEST_CASE_NAMED_WITH_DATA(
1806 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
1807 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1808 test_ipsec_inline_proto_known_vec_inb,
1809 &pkt_null_aes_xcbc),
1812 "Combined test alg list",
1813 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1814 test_ipsec_inline_proto_display_list),
1817 "UDP encapsulation",
1818 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1819 test_ipsec_inline_proto_udp_encap),
1821 "UDP encapsulation ports verification test",
1822 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1823 test_ipsec_inline_proto_udp_ports_verify),
1825 "Negative test: ICV corruption",
1826 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1827 test_ipsec_inline_proto_err_icv_corrupt),
1829 "Tunnel dst addr verification",
1830 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1831 test_ipsec_inline_proto_tunnel_dst_addr_verify),
1833 "Tunnel src and dst addr verification",
1834 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1835 test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
1837 "Inner IP checksum",
1838 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1839 test_ipsec_inline_proto_inner_ip_csum),
1841 "Inner L4 checksum",
1842 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1843 test_ipsec_inline_proto_inner_l4_csum),
1845 "Tunnel IPv4 in IPv4",
1846 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1847 test_ipsec_inline_proto_tunnel_v4_in_v4),
1849 "Tunnel IPv6 in IPv6",
1850 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1851 test_ipsec_inline_proto_tunnel_v6_in_v6),
1853 "Tunnel IPv4 in IPv6",
1854 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1855 test_ipsec_inline_proto_tunnel_v4_in_v6),
1857 "Tunnel IPv6 in IPv4",
1858 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1859 test_ipsec_inline_proto_tunnel_v6_in_v4),
1862 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1863 test_ipsec_inline_proto_transport_v4),
1865 "Transport l4 checksum",
1866 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1867 test_ipsec_inline_proto_transport_l4_csum),
1869 "Statistics: success",
1870 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1871 test_ipsec_inline_proto_stats),
1873 "Fragmented packet",
1874 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1875 test_ipsec_inline_proto_pkt_fragment),
1877 "Tunnel header copy DF (inner 0)",
1878 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1879 test_ipsec_inline_proto_copy_df_inner_0),
1881 "Tunnel header copy DF (inner 1)",
1882 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1883 test_ipsec_inline_proto_copy_df_inner_1),
1885 "Tunnel header set DF 0 (inner 1)",
1886 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1887 test_ipsec_inline_proto_set_df_0_inner_1),
1889 "Tunnel header set DF 1 (inner 0)",
1890 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1891 test_ipsec_inline_proto_set_df_1_inner_0),
1893 "Tunnel header IPv4 copy DSCP (inner 0)",
1894 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1895 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
1897 "Tunnel header IPv4 copy DSCP (inner 1)",
1898 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1899 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
1901 "Tunnel header IPv4 set DSCP 0 (inner 1)",
1902 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1903 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
1905 "Tunnel header IPv4 set DSCP 1 (inner 0)",
1906 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1907 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
1909 "Tunnel header IPv6 copy DSCP (inner 0)",
1910 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1911 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
1913 "Tunnel header IPv6 copy DSCP (inner 1)",
1914 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1915 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
1917 "Tunnel header IPv6 set DSCP 0 (inner 1)",
1918 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1919 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
1921 "Tunnel header IPv6 set DSCP 1 (inner 0)",
1922 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1923 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
1925 "Tunnel header IPv4 decrement inner TTL",
1926 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1927 test_ipsec_inline_proto_ipv4_ttl_decrement),
1929 "Tunnel header IPv6 decrement inner hop limit",
1930 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1931 test_ipsec_inline_proto_ipv6_hop_limit_decrement),
1934 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1935 test_ipsec_inline_proto_iv_gen),
1938 TEST_CASE_NAMED_WITH_DATA(
1939 "IPv4 Reassembly with 2 fragments",
1940 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1941 test_inline_ip_reassembly, &ipv4_2frag_vector),
1942 TEST_CASE_NAMED_WITH_DATA(
1943 "IPv6 Reassembly with 2 fragments",
1944 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1945 test_inline_ip_reassembly, &ipv6_2frag_vector),
1946 TEST_CASE_NAMED_WITH_DATA(
1947 "IPv4 Reassembly with 4 fragments",
1948 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1949 test_inline_ip_reassembly, &ipv4_4frag_vector),
1950 TEST_CASE_NAMED_WITH_DATA(
1951 "IPv6 Reassembly with 4 fragments",
1952 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1953 test_inline_ip_reassembly, &ipv6_4frag_vector),
1954 TEST_CASE_NAMED_WITH_DATA(
1955 "IPv4 Reassembly with 5 fragments",
1956 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1957 test_inline_ip_reassembly, &ipv4_5frag_vector),
1958 TEST_CASE_NAMED_WITH_DATA(
1959 "IPv6 Reassembly with 5 fragments",
1960 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1961 test_inline_ip_reassembly, &ipv6_5frag_vector),
1962 TEST_CASE_NAMED_WITH_DATA(
1963 "IPv4 Reassembly with incomplete fragments",
1964 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1965 test_inline_ip_reassembly, &ipv4_incomplete_vector),
1966 TEST_CASE_NAMED_WITH_DATA(
1967 "IPv4 Reassembly with overlapping fragments",
1968 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1969 test_inline_ip_reassembly, &ipv4_overlap_vector),
1970 TEST_CASE_NAMED_WITH_DATA(
1971 "IPv4 Reassembly with out of order fragments",
1972 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1973 test_inline_ip_reassembly, &ipv4_out_of_order_vector),
1974 TEST_CASE_NAMED_WITH_DATA(
1975 "IPv4 Reassembly with burst of 4 fragments",
1976 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
1977 test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
1979 TEST_CASES_END() /**< NULL terminate unit test array */
1985 test_inline_ipsec(void)
1987 return unit_test_suite_runner(&inline_ipsec_testsuite);
1990 #endif /* !RTE_EXEC_ENV_WINDOWS */
1992 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);