1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2022 Marvell.
9 #include <rte_ethdev.h>
10 #include <rte_malloc.h>
11 #include <rte_security.h>
14 #include "test_security_inline_proto_vectors.h"
16 #ifdef RTE_EXEC_ENV_WINDOWS
18 test_inline_ipsec(void)
20 printf("Inline ipsec not supported on Windows, skipping test\n");
26 #define NB_ETHPORTS_USED 1
27 #define MEMPOOL_CACHE_SIZE 32
28 #define MAX_PKT_BURST 32
29 #define RTE_TEST_RX_DESC_DEFAULT 1024
30 #define RTE_TEST_TX_DESC_DEFAULT 1024
31 #define RTE_PORT_ALL (~(uint16_t)0x0)
33 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
34 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
35 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
37 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
38 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
39 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
41 #define MAX_TRAFFIC_BURST 2048
44 #define ENCAP_DECAP_BURST_SZ 33
45 #define APP_REASS_TIMEOUT 10
47 extern struct ipsec_test_data pkt_aes_128_gcm;
48 extern struct ipsec_test_data pkt_aes_192_gcm;
49 extern struct ipsec_test_data pkt_aes_256_gcm;
50 extern struct ipsec_test_data pkt_aes_128_gcm_frag;
51 extern struct ipsec_test_data pkt_aes_128_cbc_null;
52 extern struct ipsec_test_data pkt_null_aes_xcbc;
53 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384;
54 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512;
56 static struct rte_mempool *mbufpool;
57 static struct rte_mempool *sess_pool;
58 static struct rte_mempool *sess_priv_pool;
59 /* ethernet addresses of ports */
60 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
62 static struct rte_eth_conf port_conf = {
64 .mq_mode = RTE_ETH_MQ_RX_NONE,
66 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
67 RTE_ETH_RX_OFFLOAD_SECURITY,
70 .mq_mode = RTE_ETH_MQ_TX_NONE,
71 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY |
72 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
74 .lpbk_mode = 1, /* enable loopback */
77 static struct rte_eth_rxconf rx_conf = {
79 .pthresh = RX_PTHRESH,
80 .hthresh = RX_HTHRESH,
81 .wthresh = RX_WTHRESH,
86 static struct rte_eth_txconf tx_conf = {
88 .pthresh = TX_PTHRESH,
89 .hthresh = TX_HTHRESH,
90 .wthresh = TX_WTHRESH,
92 .tx_free_thresh = 32, /* Use PMD default values */
93 .tx_rs_thresh = 32, /* Use PMD default values */
98 static uint64_t link_mbps;
100 static int ip_reassembly_dynfield_offset = -1;
102 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS];
104 /* Create Inline IPsec session */
106 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid,
107 struct rte_security_session **sess, struct rte_security_ctx **ctx,
108 uint32_t *ol_flags, const struct ipsec_test_flags *flags,
109 struct rte_security_session_conf *sess_conf)
111 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000,
113 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174,
115 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2));
116 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1));
117 struct rte_security_capability_idx sec_cap_idx;
118 const struct rte_security_capability *sec_cap;
119 enum rte_security_ipsec_sa_direction dir;
120 struct rte_security_ctx *sec_ctx;
123 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
124 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
125 sess_conf->ipsec = sa->ipsec_xform;
127 dir = sa->ipsec_xform.direction;
128 verify = flags->tunnel_hdr_verify;
130 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
131 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
133 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
137 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
138 if (sa->ipsec_xform.tunnel.type ==
139 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
140 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4,
142 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4,
145 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
146 sess_conf->ipsec.tunnel.ipv4.df = 0;
148 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0)
149 sess_conf->ipsec.tunnel.ipv4.df = 1;
151 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
152 sess_conf->ipsec.tunnel.ipv4.dscp = 0;
154 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
155 sess_conf->ipsec.tunnel.ipv4.dscp =
158 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
159 sess_conf->ipsec.tunnel.ipv6.dscp = 0;
161 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0)
162 sess_conf->ipsec.tunnel.ipv6.dscp =
165 if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1)
166 sess_conf->ipsec.tunnel.ipv6.flabel = 0;
168 if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0)
169 sess_conf->ipsec.tunnel.ipv6.flabel =
170 TEST_IPSEC_FLABEL_VAL;
172 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6,
174 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6,
179 /* Save SA as userdata for the security session. When
180 * the packet is received, this userdata will be
181 * retrieved using the metadata from the packet.
183 * The PMD is expected to set similar metadata for other
184 * operations, like rte_eth_event, which are tied to
185 * security session. In such cases, the userdata could
186 * be obtained to uniquely identify the security
187 * parameters denoted.
190 sess_conf->userdata = (void *) sa;
192 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid);
193 if (sec_ctx == NULL) {
194 printf("Ethernet device doesn't support security features.\n");
198 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
199 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC;
200 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
201 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
202 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
203 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx);
204 if (sec_cap == NULL) {
205 printf("No capabilities registered\n");
209 if (sa->aead || sa->aes_gmac)
210 memcpy(&sess_conf->ipsec.salt, sa->salt.data,
211 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len));
213 /* Copy cipher session parameters */
215 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead,
216 sizeof(struct rte_crypto_sym_xform));
217 sess_conf->crypto_xform->aead.key.data = sa->key.data;
218 /* Verify crypto capabilities */
219 if (test_ipsec_crypto_caps_aead_verify(sec_cap,
220 sess_conf->crypto_xform) != 0) {
222 "Crypto capabilities not supported\n");
226 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
227 rte_memcpy(&sess_conf->crypto_xform->cipher,
228 &sa->xform.chain.cipher.cipher,
229 sizeof(struct rte_crypto_cipher_xform));
231 rte_memcpy(&sess_conf->crypto_xform->next->auth,
232 &sa->xform.chain.auth.auth,
233 sizeof(struct rte_crypto_auth_xform));
234 sess_conf->crypto_xform->cipher.key.data =
236 sess_conf->crypto_xform->next->auth.key.data =
238 /* Verify crypto capabilities */
239 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
240 sess_conf->crypto_xform) != 0) {
242 "Cipher crypto capabilities not supported\n");
246 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
247 sess_conf->crypto_xform->next) != 0) {
249 "Auth crypto capabilities not supported\n");
253 rte_memcpy(&sess_conf->crypto_xform->next->cipher,
254 &sa->xform.chain.cipher.cipher,
255 sizeof(struct rte_crypto_cipher_xform));
256 rte_memcpy(&sess_conf->crypto_xform->auth,
257 &sa->xform.chain.auth.auth,
258 sizeof(struct rte_crypto_auth_xform));
259 sess_conf->crypto_xform->auth.key.data =
261 sess_conf->crypto_xform->next->cipher.key.data =
264 /* Verify crypto capabilities */
265 if (test_ipsec_crypto_caps_cipher_verify(sec_cap,
266 sess_conf->crypto_xform->next) != 0) {
268 "Cipher crypto capabilities not supported\n");
272 if (test_ipsec_crypto_caps_auth_verify(sec_cap,
273 sess_conf->crypto_xform) != 0) {
275 "Auth crypto capabilities not supported\n");
281 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0)
284 if ((sa->ipsec_xform.direction ==
285 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
286 (sa->ipsec_xform.options.iv_gen_disable == 1)) {
287 /* Set env variable when IV generation is disabled */
290 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16;
292 for (; j < iv_len; j++)
293 len += snprintf(arr+len, sizeof(arr) - len,
294 "0x%x, ", sa->iv.data[j]);
295 setenv("ETH_SEC_IV_OVR", arr, 1);
298 *sess = rte_security_session_create(sec_ctx,
299 sess_conf, sess_pool, sess_priv_pool);
301 printf("SEC Session init failed.\n");
305 *ol_flags = sec_cap->ol_flags;
311 /* Check the link status of all ports in up to 3s, and print them finally */
313 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
315 #define CHECK_INTERVAL 100 /* 100ms */
316 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
318 uint8_t count, all_ports_up, print_flag = 0;
319 struct rte_eth_link link;
321 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
323 printf("Checking link statuses...\n");
325 for (count = 0; count <= MAX_CHECK_TIME; count++) {
327 for (portid = 0; portid < port_num; portid++) {
328 if ((port_mask & (1 << portid)) == 0)
330 memset(&link, 0, sizeof(link));
331 ret = rte_eth_link_get_nowait(portid, &link);
335 printf("Port %u link get failed: %s\n",
336 portid, rte_strerror(-ret));
340 /* print link status if flag set */
341 if (print_flag == 1) {
342 if (link.link_status && link_mbps == 0)
343 link_mbps = link.link_speed;
345 rte_eth_link_to_str(link_status,
346 sizeof(link_status), &link);
347 printf("Port %d %s\n", portid, link_status);
350 /* clear all_ports_up flag if any link down */
351 if (link.link_status == RTE_ETH_LINK_DOWN) {
356 /* after finally printing all link status, get out */
360 if (all_ports_up == 0) {
362 rte_delay_ms(CHECK_INTERVAL);
365 /* set the print_flag if all ports up or timeout */
366 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1))
372 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
374 char buf[RTE_ETHER_ADDR_FMT_SIZE];
375 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
376 printf("%s%s", name, buf);
380 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len,
381 struct rte_mbuf *pkt, unsigned int offset)
383 unsigned int copied = 0;
384 unsigned int copy_len;
385 struct rte_mbuf *seg;
389 while (offset >= seg->data_len) {
390 offset -= seg->data_len;
393 copy_len = seg->data_len - offset;
394 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
395 while (len > copy_len) {
396 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len);
400 seg_buf = rte_pktmbuf_mtod(seg, void *);
402 rte_memcpy(seg_buf, buf + copied, (size_t) len);
405 static inline struct rte_mbuf *
406 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
408 struct rte_mbuf *pkt;
410 pkt = rte_pktmbuf_alloc(mp);
413 if (((data[0] & 0xF0) >> 4) == IPVERSION) {
414 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
415 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN);
416 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
418 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN),
419 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN);
420 pkt->l3_len = sizeof(struct rte_ipv6_hdr);
422 pkt->l2_len = RTE_ETHER_HDR_LEN;
424 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN))
425 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len);
427 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN);
432 init_mempools(unsigned int nb_mbuf)
434 struct rte_security_ctx *sec_ctx;
435 uint16_t nb_sess = 512;
439 if (mbufpool == NULL) {
440 snprintf(s, sizeof(s), "mbuf_pool");
441 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf,
442 MEMPOOL_CACHE_SIZE, 0,
443 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY);
444 if (mbufpool == NULL) {
445 printf("Cannot init mbuf pool\n");
448 printf("Allocated mbuf pool\n");
451 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
452 if (sec_ctx == NULL) {
453 printf("Device does not support Security ctx\n");
456 sess_sz = rte_security_session_get_size(sec_ctx);
457 if (sess_pool == NULL) {
458 snprintf(s, sizeof(s), "sess_pool");
459 sess_pool = rte_mempool_create(s, nb_sess, sess_sz,
460 MEMPOOL_CACHE_SIZE, 0,
461 NULL, NULL, NULL, NULL,
463 if (sess_pool == NULL) {
464 printf("Cannot init sess pool\n");
467 printf("Allocated sess pool\n");
469 if (sess_priv_pool == NULL) {
470 snprintf(s, sizeof(s), "sess_priv_pool");
471 sess_priv_pool = rte_mempool_create(s, nb_sess, sess_sz,
472 MEMPOOL_CACHE_SIZE, 0,
473 NULL, NULL, NULL, NULL,
475 if (sess_priv_pool == NULL) {
476 printf("Cannot init sess_priv pool\n");
479 printf("Allocated sess_priv pool\n");
486 create_default_flow(uint16_t portid)
488 struct rte_flow_action action[2];
489 struct rte_flow_item pattern[2];
490 struct rte_flow_attr attr = {0};
491 struct rte_flow_error err;
492 struct rte_flow *flow;
495 /* Add the default rte_flow to enable SECURITY for all ESP packets */
497 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
498 pattern[0].spec = NULL;
499 pattern[0].mask = NULL;
500 pattern[0].last = NULL;
501 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
503 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
504 action[0].conf = NULL;
505 action[1].type = RTE_FLOW_ACTION_TYPE_END;
506 action[1].conf = NULL;
510 ret = rte_flow_validate(portid, &attr, pattern, action, &err);
512 printf("\nValidate flow failed, ret = %d\n", ret);
515 flow = rte_flow_create(portid, &attr, pattern, action, &err);
517 printf("\nDefault flow rule create failed\n");
521 default_flow[portid] = flow;
527 destroy_default_flow(uint16_t portid)
529 struct rte_flow_error err;
532 if (!default_flow[portid])
534 ret = rte_flow_destroy(portid, default_flow[portid], &err);
536 printf("\nDefault flow rule destroy failed\n");
539 default_flow[portid] = NULL;
542 struct rte_mbuf **tx_pkts_burst;
543 struct rte_mbuf **rx_pkts_burst;
546 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len)
549 unsigned int nb_segs = m->nb_segs;
550 unsigned int matched = 0;
551 struct rte_mbuf *save = m;
555 if (len > m->data_len)
558 if (memcmp(rte_pktmbuf_mtod(m, char *),
559 ref + matched, len)) {
560 printf("\n====Reassembly case failed: Data Mismatch");
561 rte_hexdump(stdout, "Reassembled",
562 rte_pktmbuf_mtod(m, char *),
564 rte_hexdump(stdout, "reference",
576 printf("\n====Reassembly case failed: Data Missing %u",
578 printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len);
579 rte_pktmbuf_dump(stderr, save, -1);
586 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
588 static uint64_t ip_reassembly_dynflag;
589 int ip_reassembly_dynflag_offset;
591 if (ip_reassembly_dynflag == 0) {
592 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
593 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
594 if (ip_reassembly_dynflag_offset < 0)
596 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
599 return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
603 free_mbuf(struct rte_mbuf *mbuf)
605 rte_eth_ip_reassembly_dynfield_t dynfield;
610 if (!is_ip_reassembly_incomplete(mbuf)) {
611 rte_pktmbuf_free(mbuf);
613 if (ip_reassembly_dynfield_offset < 0)
617 dynfield = *RTE_MBUF_DYNFIELD(mbuf,
618 ip_reassembly_dynfield_offset,
619 rte_eth_ip_reassembly_dynfield_t *);
620 rte_pktmbuf_free(mbuf);
621 mbuf = dynfield.next_frag;
628 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf,
629 struct reassembly_vector *vector)
631 rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST];
634 * IP reassembly offload is incomplete, and fragments are listed in
635 * dynfield which can be reassembled in SW.
637 printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly,"
638 "\nMatching with original frags.");
640 if (ip_reassembly_dynfield_offset < 0)
643 printf("\ncomparing frag: %d", j);
644 /* Skip Ethernet header comparison */
645 rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN);
646 ret = compare_pkt_data(mbuf, vector->frags[j]->data,
647 vector->frags[j]->len);
651 dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
652 rte_eth_ip_reassembly_dynfield_t *);
653 printf("\ncomparing frag: %d", j);
654 /* Skip Ethernet header comparison */
655 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
656 ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data,
657 vector->frags[j]->len);
661 while ((dynfield[j]->nb_frags > 1) &&
662 is_ip_reassembly_incomplete(dynfield[j]->next_frag)) {
664 dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag,
665 ip_reassembly_dynfield_offset,
666 rte_eth_ip_reassembly_dynfield_t *);
667 printf("\ncomparing frag: %d", j);
668 /* Skip Ethernet header comparison */
669 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN);
670 ret = compare_pkt_data(dynfield[j]->next_frag,
671 vector->frags[j]->data, vector->frags[j]->len);
679 test_ipsec_with_reassembly(struct reassembly_vector *vector,
680 const struct ipsec_test_flags *flags)
682 struct rte_security_session *out_ses[ENCAP_DECAP_BURST_SZ] = {0};
683 struct rte_security_session *in_ses[ENCAP_DECAP_BURST_SZ] = {0};
684 struct rte_eth_ip_reassembly_params reass_capa = {0};
685 struct rte_security_session_conf sess_conf_out = {0};
686 struct rte_security_session_conf sess_conf_in = {0};
687 unsigned int nb_tx, burst_sz, nb_sent = 0;
688 struct rte_crypto_sym_xform cipher_out = {0};
689 struct rte_crypto_sym_xform auth_out = {0};
690 struct rte_crypto_sym_xform aead_out = {0};
691 struct rte_crypto_sym_xform cipher_in = {0};
692 struct rte_crypto_sym_xform auth_in = {0};
693 struct rte_crypto_sym_xform aead_in = {0};
694 struct ipsec_test_data sa_data;
695 struct rte_security_ctx *ctx;
696 unsigned int i, nb_rx = 0, j;
700 burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1;
701 nb_tx = vector->nb_frags * burst_sz;
703 rte_eth_dev_stop(port_id);
705 printf("rte_eth_dev_stop: err=%s, port=%u\n",
706 rte_strerror(-ret), port_id);
709 rte_eth_ip_reassembly_capability_get(port_id, &reass_capa);
710 if (reass_capa.max_frags < vector->nb_frags)
712 if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) {
713 reass_capa.timeout_ms = APP_REASS_TIMEOUT;
714 rte_eth_ip_reassembly_conf_set(port_id, &reass_capa);
717 ret = rte_eth_dev_start(port_id);
719 printf("rte_eth_dev_start: err=%d, port=%d\n",
724 memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx);
725 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx);
727 for (i = 0; i < nb_tx; i += vector->nb_frags) {
728 for (j = 0; j < vector->nb_frags; j++) {
729 tx_pkts_burst[i+j] = init_packet(mbufpool,
730 vector->frags[j]->data,
731 vector->frags[j]->len);
732 if (tx_pkts_burst[i+j] == NULL) {
734 printf("\n packed init failed\n");
740 for (i = 0; i < burst_sz; i++) {
741 memcpy(&sa_data, vector->sa_data,
742 sizeof(struct ipsec_test_data));
743 /* Update SPI for every new SA */
744 sa_data.ipsec_xform.spi += i;
745 sa_data.ipsec_xform.direction =
746 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
748 sess_conf_out.crypto_xform = &aead_out;
750 sess_conf_out.crypto_xform = &cipher_out;
751 sess_conf_out.crypto_xform->next = &auth_out;
754 /* Create Inline IPsec outbound session. */
755 ret = create_inline_ipsec_session(&sa_data, port_id,
756 &out_ses[i], &ctx, &ol_flags, flags,
759 printf("\nInline outbound session create failed\n");
765 for (i = 0; i < nb_tx; i++) {
766 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
767 rte_security_set_pkt_metadata(ctx,
768 out_ses[j], tx_pkts_burst[i], NULL);
769 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
771 /* Move to next SA after nb_frags */
772 if ((i + 1) % vector->nb_frags == 0)
776 for (i = 0; i < burst_sz; i++) {
777 memcpy(&sa_data, vector->sa_data,
778 sizeof(struct ipsec_test_data));
779 /* Update SPI for every new SA */
780 sa_data.ipsec_xform.spi += i;
781 sa_data.ipsec_xform.direction =
782 RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
785 sess_conf_in.crypto_xform = &aead_in;
787 sess_conf_in.crypto_xform = &auth_in;
788 sess_conf_in.crypto_xform->next = &cipher_in;
790 /* Create Inline IPsec inbound session. */
791 ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i],
792 &ctx, &ol_flags, flags, &sess_conf_in);
794 printf("\nInline inbound session create failed\n");
799 /* Retrieve reassembly dynfield offset if available */
800 if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1)
801 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
802 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
805 ret = create_default_flow(port_id);
809 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx);
810 if (nb_sent != nb_tx) {
812 printf("\nFailed to tx %u pkts", nb_tx);
818 /* Retry few times before giving up */
822 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
828 } while (j < 5 || !nb_rx);
830 /* Check for minimum number of Rx packets expected */
831 if ((vector->nb_frags == 1 && nb_rx != nb_tx) ||
832 (vector->nb_frags > 1 && nb_rx < burst_sz)) {
833 printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx);
838 for (i = 0; i < nb_rx; i++) {
839 if (vector->nb_frags > 1 &&
840 is_ip_reassembly_incomplete(rx_pkts_burst[i])) {
841 ret = get_and_verify_incomplete_frags(rx_pkts_burst[i],
843 if (ret != TEST_SUCCESS)
848 if (rx_pkts_burst[i]->ol_flags &
849 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED ||
850 !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) {
851 printf("\nsecurity offload failed\n");
856 if (vector->full_pkt->len + RTE_ETHER_HDR_LEN !=
857 rx_pkts_burst[i]->pkt_len) {
858 printf("\nreassembled/decrypted packet length mismatch\n");
862 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
863 ret = compare_pkt_data(rx_pkts_burst[i],
864 vector->full_pkt->data,
865 vector->full_pkt->len);
866 if (ret != TEST_SUCCESS)
871 destroy_default_flow(port_id);
873 /* Clear session data. */
874 for (i = 0; i < burst_sz; i++) {
876 rte_security_session_destroy(ctx, out_ses[i]);
878 rte_security_session_destroy(ctx, in_ses[i]);
881 for (i = nb_sent; i < nb_tx; i++)
882 free_mbuf(tx_pkts_burst[i]);
883 for (i = 0; i < nb_rx; i++)
884 free_mbuf(rx_pkts_burst[i]);
889 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
890 struct ipsec_test_data *res_d,
893 const struct ipsec_test_flags *flags)
895 struct rte_security_session_conf sess_conf = {0};
896 struct rte_crypto_sym_xform cipher = {0};
897 struct rte_crypto_sym_xform auth = {0};
898 struct rte_crypto_sym_xform aead = {0};
899 struct rte_security_session *ses;
900 struct rte_security_ctx *ctx;
901 int nb_rx = 0, nb_sent;
905 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts);
908 sess_conf.crypto_xform = &aead;
910 if (td->ipsec_xform.direction ==
911 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
912 sess_conf.crypto_xform = &cipher;
913 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
914 sess_conf.crypto_xform->next = &auth;
915 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
917 sess_conf.crypto_xform = &auth;
918 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
919 sess_conf.crypto_xform->next = &cipher;
920 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
924 /* Create Inline IPsec session. */
925 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx,
926 &ol_flags, flags, &sess_conf);
930 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
931 ret = create_default_flow(port_id);
935 for (i = 0; i < nb_pkts; i++) {
936 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data,
938 if (tx_pkts_burst[i] == NULL) {
940 rte_pktmbuf_free(tx_pkts_burst[i]);
945 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i],
946 uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
948 rte_pktmbuf_free(tx_pkts_burst[i]);
953 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
954 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
955 rte_security_set_pkt_metadata(ctx, ses,
956 tx_pkts_burst[i], NULL);
957 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
960 /* Send packet to ethdev for inline IPsec processing. */
961 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
962 if (nb_sent != nb_pkts) {
963 printf("\nUnable to TX %d packets", nb_pkts);
964 for ( ; nb_sent < nb_pkts; nb_sent++)
965 rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
972 /* Receive back packet on loopback interface. */
975 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
977 if (nb_rx >= nb_sent)
979 } while (j++ < 5 || nb_rx == 0);
981 if (nb_rx != nb_sent) {
982 printf("\nUnable to RX all %d packets", nb_sent);
984 rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
989 for (i = 0; i < nb_rx; i++) {
990 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN);
992 ret = test_ipsec_post_process(rx_pkts_burst[i], td,
993 res_d, silent, flags);
994 if (ret != TEST_SUCCESS) {
995 for ( ; i < nb_rx; i++)
996 rte_pktmbuf_free(rx_pkts_burst[i]);
1000 ret = test_ipsec_stats_verify(ctx, ses, flags,
1001 td->ipsec_xform.direction);
1002 if (ret != TEST_SUCCESS) {
1003 for ( ; i < nb_rx; i++)
1004 rte_pktmbuf_free(rx_pkts_burst[i]);
1008 rte_pktmbuf_free(rx_pkts_burst[i]);
1009 rx_pkts_burst[i] = NULL;
1013 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1014 destroy_default_flow(port_id);
1016 /* Destroy session so that other cases can create the session again */
1017 rte_security_session_destroy(ctx, ses);
1024 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags)
1026 struct ipsec_test_data td_outb;
1027 struct ipsec_test_data td_inb;
1028 unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0;
1031 if (flags->iv_gen || flags->sa_expiry_pkts_soft ||
1032 flags->sa_expiry_pkts_hard)
1033 nb_pkts = IPSEC_TEST_PACKETS_MAX;
1035 for (i = 0; i < RTE_DIM(alg_list); i++) {
1036 test_ipsec_td_prepare(alg_list[i].param1,
1038 flags, &td_outb, 1);
1040 if (!td_outb.aead) {
1041 enum rte_crypto_cipher_algorithm cipher_alg;
1042 enum rte_crypto_auth_algorithm auth_alg;
1044 cipher_alg = td_outb.xform.chain.cipher.cipher.algo;
1045 auth_alg = td_outb.xform.chain.auth.auth.algo;
1047 if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL)
1050 /* ICV is not applicable for NULL auth */
1051 if (flags->icv_corrupt &&
1052 auth_alg == RTE_CRYPTO_AUTH_NULL)
1055 /* IV is not applicable for NULL cipher */
1056 if (flags->iv_gen &&
1057 cipher_alg == RTE_CRYPTO_CIPHER_NULL)
1061 if (flags->udp_encap)
1062 td_outb.ipsec_xform.options.udp_encap = 1;
1064 ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts,
1066 if (ret == TEST_SKIPPED)
1069 if (ret == TEST_FAILED) {
1070 printf("\n TEST FAILED");
1071 test_ipsec_display_alg(alg_list[i].param1,
1072 alg_list[i].param2);
1077 test_ipsec_td_update(&td_inb, &td_outb, 1, flags);
1079 ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts,
1081 if (ret == TEST_SKIPPED)
1084 if (ret == TEST_FAILED) {
1085 printf("\n TEST FAILED");
1086 test_ipsec_display_alg(alg_list[i].param1,
1087 alg_list[i].param2);
1092 if (flags->display_alg)
1093 test_ipsec_display_alg(alg_list[i].param1,
1094 alg_list[i].param2);
1099 printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt);
1103 return TEST_SUCCESS;
1105 return TEST_SKIPPED;
1109 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[],
1110 struct ipsec_test_data res_d[],
1113 const struct ipsec_test_flags *flags)
1115 struct rte_security_session_conf sess_conf = {0};
1116 struct ipsec_test_data *res_d_tmp = NULL;
1117 struct rte_crypto_sym_xform cipher = {0};
1118 struct rte_crypto_sym_xform auth = {0};
1119 struct rte_crypto_sym_xform aead = {0};
1120 struct rte_mbuf *rx_pkt = NULL;
1121 struct rte_mbuf *tx_pkt = NULL;
1123 struct rte_security_session *ses;
1124 struct rte_security_ctx *ctx;
1129 sess_conf.crypto_xform = &aead;
1131 if (td[0].ipsec_xform.direction ==
1132 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1133 sess_conf.crypto_xform = &cipher;
1134 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1135 sess_conf.crypto_xform->next = &auth;
1136 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1138 sess_conf.crypto_xform = &auth;
1139 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
1140 sess_conf.crypto_xform->next = &cipher;
1141 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1145 /* Create Inline IPsec session. */
1146 ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx,
1147 &ol_flags, flags, &sess_conf);
1151 if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1152 ret = create_default_flow(port_id);
1157 for (i = 0; i < nb_pkts; i++) {
1158 tx_pkt = init_packet(mbufpool, td[i].input_text.data,
1159 td[i].input_text.len);
1160 if (tx_pkt == NULL) {
1165 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt,
1166 uint8_t *, RTE_ETHER_HDR_LEN), flags)) {
1171 if (td[i].ipsec_xform.direction ==
1172 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1173 if (flags->antireplay) {
1174 sess_conf.ipsec.esn.value =
1175 td[i].ipsec_xform.esn.value;
1176 ret = rte_security_session_update(ctx, ses,
1179 printf("Could not update ESN in session\n");
1180 rte_pktmbuf_free(tx_pkt);
1185 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
1186 rte_security_set_pkt_metadata(ctx, ses,
1188 tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1190 /* Send packet to ethdev for inline IPsec processing. */
1191 nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1);
1193 printf("\nUnable to TX packets");
1194 rte_pktmbuf_free(tx_pkt);
1201 /* Receive back packet on loopback interface. */
1204 nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1);
1205 } while (nb_rx == 0);
1207 rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN);
1210 res_d_tmp = &res_d[i];
1212 ret = test_ipsec_post_process(rx_pkt, &td[i],
1213 res_d_tmp, silent, flags);
1214 if (ret != TEST_SUCCESS) {
1215 rte_pktmbuf_free(rx_pkt);
1219 ret = test_ipsec_stats_verify(ctx, ses, flags,
1220 td->ipsec_xform.direction);
1221 if (ret != TEST_SUCCESS) {
1222 rte_pktmbuf_free(rx_pkt);
1226 rte_pktmbuf_free(rx_pkt);
1232 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
1233 destroy_default_flow(port_id);
1235 /* Destroy session so that other cases can create the session again */
1236 rte_security_session_destroy(ctx, ses);
1243 ut_setup_inline_ipsec(void)
1248 ret = rte_eth_dev_start(port_id);
1250 printf("rte_eth_dev_start: err=%d, port=%d\n",
1254 /* always enable promiscuous */
1255 ret = rte_eth_promiscuous_enable(port_id);
1257 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n",
1258 rte_strerror(-ret), port_id);
1262 check_all_ports_link_status(1, RTE_PORT_ALL);
1268 ut_teardown_inline_ipsec(void)
1270 struct rte_eth_ip_reassembly_params reass_conf = {0};
1274 /* port tear down */
1275 RTE_ETH_FOREACH_DEV(portid) {
1276 ret = rte_eth_dev_stop(portid);
1278 printf("rte_eth_dev_stop: err=%s, port=%u\n",
1279 rte_strerror(-ret), portid);
1281 /* Clear reassembly configuration */
1282 rte_eth_ip_reassembly_conf_set(portid, &reass_conf);
1287 inline_ipsec_testsuite_setup(void)
1293 uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
1295 printf("Start inline IPsec test.\n");
1297 nb_ports = rte_eth_dev_count_avail();
1298 if (nb_ports < NB_ETHPORTS_USED) {
1299 printf("At least %u port(s) used for test\n",
1301 return TEST_SKIPPED;
1304 ret = init_mempools(NB_MBUF);
1308 if (tx_pkts_burst == NULL) {
1309 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
1312 RTE_CACHE_LINE_SIZE);
1316 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
1319 RTE_CACHE_LINE_SIZE);
1324 printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
1326 nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
1327 nb_txd = RTE_TEST_TX_DESC_DEFAULT;
1329 /* configuring port 0 for the test is enough */
1331 /* port configure */
1332 ret = rte_eth_dev_configure(port_id, nb_rx_queue,
1333 nb_tx_queue, &port_conf);
1335 printf("Cannot configure device: err=%d, port=%d\n",
1339 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
1341 printf("Cannot get mac address: err=%d, port=%d\n",
1345 printf("Port %u ", port_id);
1346 print_ethaddr("Address:", &ports_eth_addr[port_id]);
1349 /* tx queue setup */
1350 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
1351 SOCKET_ID_ANY, &tx_conf);
1353 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
1357 /* rx queue steup */
1358 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
1359 &rx_conf, mbufpool);
1361 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
1365 test_ipsec_alg_list_populate();
1371 inline_ipsec_testsuite_teardown(void)
1376 /* port tear down */
1377 RTE_ETH_FOREACH_DEV(portid) {
1378 ret = rte_eth_dev_reset(portid);
1380 printf("rte_eth_dev_reset: err=%s, port=%u\n",
1381 rte_strerror(-ret), port_id);
1386 test_inline_ip_reassembly(const void *testdata)
1388 struct reassembly_vector reassembly_td = {0};
1389 const struct reassembly_vector *td = testdata;
1390 struct ip_reassembly_test_packet full_pkt;
1391 struct ip_reassembly_test_packet frags[MAX_FRAGS];
1392 struct ipsec_test_flags flags = {0};
1395 reassembly_td.sa_data = td->sa_data;
1396 reassembly_td.nb_frags = td->nb_frags;
1397 reassembly_td.burst = td->burst;
1399 memcpy(&full_pkt, td->full_pkt,
1400 sizeof(struct ip_reassembly_test_packet));
1401 reassembly_td.full_pkt = &full_pkt;
1403 test_vector_payload_populate(reassembly_td.full_pkt, true);
1404 for (; i < reassembly_td.nb_frags; i++) {
1405 memcpy(&frags[i], td->frags[i],
1406 sizeof(struct ip_reassembly_test_packet));
1407 reassembly_td.frags[i] = &frags[i];
1408 test_vector_payload_populate(reassembly_td.frags[i],
1409 (i == 0) ? true : false);
1412 return test_ipsec_with_reassembly(&reassembly_td, &flags);
1416 test_ipsec_inline_proto_known_vec(const void *test_data)
1418 struct ipsec_test_data td_outb;
1419 struct ipsec_test_flags flags;
1421 memset(&flags, 0, sizeof(flags));
1423 memcpy(&td_outb, test_data, sizeof(td_outb));
1426 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) {
1427 /* Disable IV gen to be able to test with known vectors */
1428 td_outb.ipsec_xform.options.iv_gen_disable = 1;
1431 return test_ipsec_inline_proto_process(&td_outb, NULL, 1,
1436 test_ipsec_inline_proto_known_vec_inb(const void *test_data)
1438 const struct ipsec_test_data *td = test_data;
1439 struct ipsec_test_flags flags;
1440 struct ipsec_test_data td_inb;
1442 memset(&flags, 0, sizeof(flags));
1444 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
1445 test_ipsec_td_in_from_out(td, &td_inb);
1447 memcpy(&td_inb, td, sizeof(td_inb));
1449 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags);
1453 test_ipsec_inline_proto_display_list(const void *data __rte_unused)
1455 struct ipsec_test_flags flags;
1457 memset(&flags, 0, sizeof(flags));
1459 flags.display_alg = true;
1461 return test_ipsec_inline_proto_all(&flags);
1465 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused)
1467 struct ipsec_test_flags flags;
1469 memset(&flags, 0, sizeof(flags));
1471 flags.udp_encap = true;
1473 return test_ipsec_inline_proto_all(&flags);
1477 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused)
1479 struct ipsec_test_flags flags;
1481 memset(&flags, 0, sizeof(flags));
1483 flags.udp_encap = true;
1484 flags.udp_ports_verify = true;
1486 return test_ipsec_inline_proto_all(&flags);
1490 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused)
1492 struct ipsec_test_flags flags;
1494 memset(&flags, 0, sizeof(flags));
1496 flags.icv_corrupt = true;
1498 return test_ipsec_inline_proto_all(&flags);
1502 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
1504 struct ipsec_test_flags flags;
1506 memset(&flags, 0, sizeof(flags));
1508 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
1510 return test_ipsec_inline_proto_all(&flags);
1514 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
1516 struct ipsec_test_flags flags;
1518 memset(&flags, 0, sizeof(flags));
1520 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
1522 return test_ipsec_inline_proto_all(&flags);
1526 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused)
1528 struct ipsec_test_flags flags;
1530 memset(&flags, 0, sizeof(flags));
1532 flags.ip_csum = true;
1534 return test_ipsec_inline_proto_all(&flags);
1538 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused)
1540 struct ipsec_test_flags flags;
1542 memset(&flags, 0, sizeof(flags));
1544 flags.l4_csum = true;
1546 return test_ipsec_inline_proto_all(&flags);
1550 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused)
1552 struct ipsec_test_flags flags;
1554 memset(&flags, 0, sizeof(flags));
1557 flags.tunnel_ipv6 = false;
1559 return test_ipsec_inline_proto_all(&flags);
1563 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused)
1565 struct ipsec_test_flags flags;
1567 memset(&flags, 0, sizeof(flags));
1570 flags.tunnel_ipv6 = true;
1572 return test_ipsec_inline_proto_all(&flags);
1576 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused)
1578 struct ipsec_test_flags flags;
1580 memset(&flags, 0, sizeof(flags));
1583 flags.tunnel_ipv6 = true;
1585 return test_ipsec_inline_proto_all(&flags);
1589 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused)
1591 struct ipsec_test_flags flags;
1593 memset(&flags, 0, sizeof(flags));
1596 flags.tunnel_ipv6 = false;
1598 return test_ipsec_inline_proto_all(&flags);
1602 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused)
1604 struct ipsec_test_flags flags;
1606 memset(&flags, 0, sizeof(flags));
1609 flags.transport = true;
1611 return test_ipsec_inline_proto_all(&flags);
1615 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused)
1617 struct ipsec_test_flags flags = {
1622 return test_ipsec_inline_proto_all(&flags);
1626 test_ipsec_inline_proto_stats(const void *data __rte_unused)
1628 struct ipsec_test_flags flags;
1630 memset(&flags, 0, sizeof(flags));
1632 flags.stats_success = true;
1634 return test_ipsec_inline_proto_all(&flags);
1638 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused)
1640 struct ipsec_test_flags flags;
1642 memset(&flags, 0, sizeof(flags));
1644 flags.fragment = true;
1646 return test_ipsec_inline_proto_all(&flags);
1651 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused)
1653 struct ipsec_test_flags flags;
1655 memset(&flags, 0, sizeof(flags));
1657 flags.df = TEST_IPSEC_COPY_DF_INNER_0;
1659 return test_ipsec_inline_proto_all(&flags);
1663 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused)
1665 struct ipsec_test_flags flags;
1667 memset(&flags, 0, sizeof(flags));
1669 flags.df = TEST_IPSEC_COPY_DF_INNER_1;
1671 return test_ipsec_inline_proto_all(&flags);
1675 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused)
1677 struct ipsec_test_flags flags;
1679 memset(&flags, 0, sizeof(flags));
1681 flags.df = TEST_IPSEC_SET_DF_0_INNER_1;
1683 return test_ipsec_inline_proto_all(&flags);
1687 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused)
1689 struct ipsec_test_flags flags;
1691 memset(&flags, 0, sizeof(flags));
1693 flags.df = TEST_IPSEC_SET_DF_1_INNER_0;
1695 return test_ipsec_inline_proto_all(&flags);
1699 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused)
1701 struct ipsec_test_flags flags;
1703 memset(&flags, 0, sizeof(flags));
1705 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1707 return test_ipsec_inline_proto_all(&flags);
1711 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused)
1713 struct ipsec_test_flags flags;
1715 memset(&flags, 0, sizeof(flags));
1717 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1719 return test_ipsec_inline_proto_all(&flags);
1723 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused)
1725 struct ipsec_test_flags flags;
1727 memset(&flags, 0, sizeof(flags));
1729 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1731 return test_ipsec_inline_proto_all(&flags);
1735 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused)
1737 struct ipsec_test_flags flags;
1739 memset(&flags, 0, sizeof(flags));
1741 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1743 return test_ipsec_inline_proto_all(&flags);
1747 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused)
1749 struct ipsec_test_flags flags;
1751 memset(&flags, 0, sizeof(flags));
1754 flags.tunnel_ipv6 = true;
1755 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0;
1757 return test_ipsec_inline_proto_all(&flags);
1761 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused)
1763 struct ipsec_test_flags flags;
1765 memset(&flags, 0, sizeof(flags));
1768 flags.tunnel_ipv6 = true;
1769 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1;
1771 return test_ipsec_inline_proto_all(&flags);
1775 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused)
1777 struct ipsec_test_flags flags;
1779 memset(&flags, 0, sizeof(flags));
1782 flags.tunnel_ipv6 = true;
1783 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1;
1785 return test_ipsec_inline_proto_all(&flags);
1789 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused)
1791 struct ipsec_test_flags flags;
1793 memset(&flags, 0, sizeof(flags));
1796 flags.tunnel_ipv6 = true;
1797 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0;
1799 return test_ipsec_inline_proto_all(&flags);
1803 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused)
1805 struct ipsec_test_flags flags;
1807 memset(&flags, 0, sizeof(flags));
1810 flags.tunnel_ipv6 = true;
1811 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0;
1813 return test_ipsec_inline_proto_all(&flags);
1817 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused)
1819 struct ipsec_test_flags flags;
1821 memset(&flags, 0, sizeof(flags));
1824 flags.tunnel_ipv6 = true;
1825 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1;
1827 return test_ipsec_inline_proto_all(&flags);
1831 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused)
1833 struct ipsec_test_flags flags;
1835 memset(&flags, 0, sizeof(flags));
1838 flags.tunnel_ipv6 = true;
1839 flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1;
1841 return test_ipsec_inline_proto_all(&flags);
1845 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused)
1847 struct ipsec_test_flags flags;
1849 memset(&flags, 0, sizeof(flags));
1852 flags.tunnel_ipv6 = true;
1853 flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0;
1855 return test_ipsec_inline_proto_all(&flags);
1859 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused)
1861 struct ipsec_test_flags flags = {
1862 .dec_ttl_or_hop_limit = true
1865 return test_ipsec_inline_proto_all(&flags);
1869 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused)
1871 struct ipsec_test_flags flags = {
1873 .dec_ttl_or_hop_limit = true
1876 return test_ipsec_inline_proto_all(&flags);
1880 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused)
1882 struct ipsec_test_flags flags;
1884 memset(&flags, 0, sizeof(flags));
1886 flags.iv_gen = true;
1888 return test_ipsec_inline_proto_all(&flags);
1892 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data)
1894 struct ipsec_test_data td_outb;
1895 struct ipsec_test_flags flags;
1897 memset(&flags, 0, sizeof(flags));
1898 flags.fragment = true;
1900 memcpy(&td_outb, test_data, sizeof(td_outb));
1902 /* Disable IV gen to be able to test with known vectors */
1903 td_outb.ipsec_xform.options.iv_gen_disable = 1;
1905 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false,
1910 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
1911 bool replayed_pkt[], uint32_t nb_pkts, bool esn_en,
1914 struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX];
1915 struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX];
1916 struct ipsec_test_flags flags;
1917 uint32_t i, ret = 0;
1919 memset(&flags, 0, sizeof(flags));
1920 flags.antireplay = true;
1922 for (i = 0; i < nb_pkts; i++) {
1923 memcpy(&td_outb[i], test_data, sizeof(td_outb));
1924 td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
1925 td_outb[i].ipsec_xform.replay_win_sz = winsz;
1926 td_outb[i].ipsec_xform.options.esn = esn_en;
1929 for (i = 0; i < nb_pkts; i++)
1930 td_outb[i].ipsec_xform.esn.value = esn[i];
1932 ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb,
1933 nb_pkts, true, &flags);
1934 if (ret != TEST_SUCCESS)
1937 test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags);
1939 for (i = 0; i < nb_pkts; i++) {
1940 td_inb[i].ipsec_xform.options.esn = esn_en;
1941 /* Set antireplay flag for packets to be dropped */
1942 td_inb[i].ar_packet = replayed_pkt[i];
1945 ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts,
1952 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz)
1955 uint32_t nb_pkts = 5;
1956 bool replayed_pkt[5];
1959 /* 1. Advance the TOP of the window to WS * 2 */
1961 /* 2. Test sequence number within the new window(WS + 1) */
1963 /* 3. Test sequence number less than the window BOTTOM */
1965 /* 4. Test sequence number in the middle of the window */
1966 esn[3] = winsz + (winsz / 2);
1967 /* 5. Test replay of the packet in the middle of the window */
1968 esn[4] = winsz + (winsz / 2);
1970 replayed_pkt[0] = false;
1971 replayed_pkt[1] = false;
1972 replayed_pkt[2] = true;
1973 replayed_pkt[3] = false;
1974 replayed_pkt[4] = true;
1976 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt,
1977 nb_pkts, false, winsz);
1981 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data)
1983 return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024);
1987 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data)
1989 return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048);
1993 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data)
1995 return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096);
1999 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz)
2002 uint32_t nb_pkts = 7;
2003 bool replayed_pkt[7];
2006 /* Set the initial sequence number */
2007 esn[0] = (uint64_t)(0xFFFFFFFF - winsz);
2008 /* 1. Advance the TOP of the window to (1<<32 + WS/2) */
2009 esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2));
2010 /* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */
2011 esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1);
2012 /* 3. Test with sequence number within window (1<<32 - 1) */
2013 esn[3] = (uint64_t)((1ULL << 32) - 1);
2014 /* 4. Test with sequence number within window (1<<32 - 1) */
2015 esn[4] = (uint64_t)(1ULL << 32);
2016 /* 5. Test with duplicate sequence number within
2017 * new window (1<<32 - 1)
2019 esn[5] = (uint64_t)((1ULL << 32) - 1);
2020 /* 6. Test with duplicate sequence number within new window (1<<32) */
2021 esn[6] = (uint64_t)(1ULL << 32);
2023 replayed_pkt[0] = false;
2024 replayed_pkt[1] = false;
2025 replayed_pkt[2] = false;
2026 replayed_pkt[3] = false;
2027 replayed_pkt[4] = false;
2028 replayed_pkt[5] = true;
2029 replayed_pkt[6] = true;
2031 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts,
2036 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data)
2038 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024);
2042 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data)
2044 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048);
2048 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
2050 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096);
2055 static struct unit_test_suite inline_ipsec_testsuite = {
2056 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
2057 .setup = inline_ipsec_testsuite_setup,
2058 .teardown = inline_ipsec_testsuite_teardown,
2059 .unit_test_cases = {
2060 TEST_CASE_NAMED_WITH_DATA(
2061 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2062 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2063 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm),
2064 TEST_CASE_NAMED_WITH_DATA(
2065 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2066 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2067 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm),
2068 TEST_CASE_NAMED_WITH_DATA(
2069 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2070 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2071 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm),
2072 TEST_CASE_NAMED_WITH_DATA(
2073 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2074 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2075 test_ipsec_inline_proto_known_vec,
2076 &pkt_aes_128_cbc_hmac_sha256),
2077 TEST_CASE_NAMED_WITH_DATA(
2078 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2079 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2080 test_ipsec_inline_proto_known_vec,
2081 &pkt_aes_128_cbc_hmac_sha384),
2082 TEST_CASE_NAMED_WITH_DATA(
2083 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2084 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2085 test_ipsec_inline_proto_known_vec,
2086 &pkt_aes_128_cbc_hmac_sha512),
2087 TEST_CASE_NAMED_WITH_DATA(
2088 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2089 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2090 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6),
2091 TEST_CASE_NAMED_WITH_DATA(
2092 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2093 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2094 test_ipsec_inline_proto_known_vec,
2095 &pkt_aes_128_cbc_hmac_sha256_v6),
2096 TEST_CASE_NAMED_WITH_DATA(
2097 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2098 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2099 test_ipsec_inline_proto_known_vec,
2100 &pkt_null_aes_xcbc),
2102 TEST_CASE_NAMED_WITH_DATA(
2103 "Outbound fragmented packet",
2104 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2105 test_ipsec_inline_proto_known_vec_fragmented,
2106 &pkt_aes_128_gcm_frag),
2108 TEST_CASE_NAMED_WITH_DATA(
2109 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
2110 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2111 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm),
2112 TEST_CASE_NAMED_WITH_DATA(
2113 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)",
2114 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2115 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm),
2116 TEST_CASE_NAMED_WITH_DATA(
2117 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)",
2118 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2119 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm),
2120 TEST_CASE_NAMED_WITH_DATA(
2121 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)",
2122 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2123 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null),
2124 TEST_CASE_NAMED_WITH_DATA(
2125 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2126 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2127 test_ipsec_inline_proto_known_vec_inb,
2128 &pkt_aes_128_cbc_hmac_sha256),
2129 TEST_CASE_NAMED_WITH_DATA(
2130 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])",
2131 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2132 test_ipsec_inline_proto_known_vec_inb,
2133 &pkt_aes_128_cbc_hmac_sha384),
2134 TEST_CASE_NAMED_WITH_DATA(
2135 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])",
2136 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2137 test_ipsec_inline_proto_known_vec_inb,
2138 &pkt_aes_128_cbc_hmac_sha512),
2139 TEST_CASE_NAMED_WITH_DATA(
2140 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)",
2141 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2142 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6),
2143 TEST_CASE_NAMED_WITH_DATA(
2144 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])",
2145 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2146 test_ipsec_inline_proto_known_vec_inb,
2147 &pkt_aes_128_cbc_hmac_sha256_v6),
2148 TEST_CASE_NAMED_WITH_DATA(
2149 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])",
2150 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2151 test_ipsec_inline_proto_known_vec_inb,
2152 &pkt_null_aes_xcbc),
2155 "Combined test alg list",
2156 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2157 test_ipsec_inline_proto_display_list),
2160 "UDP encapsulation",
2161 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2162 test_ipsec_inline_proto_udp_encap),
2164 "UDP encapsulation ports verification test",
2165 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2166 test_ipsec_inline_proto_udp_ports_verify),
2168 "Negative test: ICV corruption",
2169 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2170 test_ipsec_inline_proto_err_icv_corrupt),
2172 "Tunnel dst addr verification",
2173 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2174 test_ipsec_inline_proto_tunnel_dst_addr_verify),
2176 "Tunnel src and dst addr verification",
2177 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2178 test_ipsec_inline_proto_tunnel_src_dst_addr_verify),
2180 "Inner IP checksum",
2181 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2182 test_ipsec_inline_proto_inner_ip_csum),
2184 "Inner L4 checksum",
2185 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2186 test_ipsec_inline_proto_inner_l4_csum),
2188 "Tunnel IPv4 in IPv4",
2189 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2190 test_ipsec_inline_proto_tunnel_v4_in_v4),
2192 "Tunnel IPv6 in IPv6",
2193 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2194 test_ipsec_inline_proto_tunnel_v6_in_v6),
2196 "Tunnel IPv4 in IPv6",
2197 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2198 test_ipsec_inline_proto_tunnel_v4_in_v6),
2200 "Tunnel IPv6 in IPv4",
2201 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2202 test_ipsec_inline_proto_tunnel_v6_in_v4),
2205 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2206 test_ipsec_inline_proto_transport_v4),
2208 "Transport l4 checksum",
2209 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2210 test_ipsec_inline_proto_transport_l4_csum),
2212 "Statistics: success",
2213 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2214 test_ipsec_inline_proto_stats),
2216 "Fragmented packet",
2217 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2218 test_ipsec_inline_proto_pkt_fragment),
2220 "Tunnel header copy DF (inner 0)",
2221 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2222 test_ipsec_inline_proto_copy_df_inner_0),
2224 "Tunnel header copy DF (inner 1)",
2225 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2226 test_ipsec_inline_proto_copy_df_inner_1),
2228 "Tunnel header set DF 0 (inner 1)",
2229 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2230 test_ipsec_inline_proto_set_df_0_inner_1),
2232 "Tunnel header set DF 1 (inner 0)",
2233 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2234 test_ipsec_inline_proto_set_df_1_inner_0),
2236 "Tunnel header IPv4 copy DSCP (inner 0)",
2237 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2238 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0),
2240 "Tunnel header IPv4 copy DSCP (inner 1)",
2241 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2242 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1),
2244 "Tunnel header IPv4 set DSCP 0 (inner 1)",
2245 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2246 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1),
2248 "Tunnel header IPv4 set DSCP 1 (inner 0)",
2249 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2250 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0),
2252 "Tunnel header IPv6 copy DSCP (inner 0)",
2253 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2254 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0),
2256 "Tunnel header IPv6 copy DSCP (inner 1)",
2257 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2258 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1),
2260 "Tunnel header IPv6 set DSCP 0 (inner 1)",
2261 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2262 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1),
2264 "Tunnel header IPv6 set DSCP 1 (inner 0)",
2265 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2266 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0),
2268 "Tunnel header IPv6 copy FLABEL (inner 0)",
2269 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2270 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0),
2272 "Tunnel header IPv6 copy FLABEL (inner 1)",
2273 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2274 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1),
2276 "Tunnel header IPv6 set FLABEL 0 (inner 1)",
2277 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2278 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1),
2280 "Tunnel header IPv6 set FLABEL 1 (inner 0)",
2281 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2282 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0),
2284 "Tunnel header IPv4 decrement inner TTL",
2285 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2286 test_ipsec_inline_proto_ipv4_ttl_decrement),
2288 "Tunnel header IPv6 decrement inner hop limit",
2289 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2290 test_ipsec_inline_proto_ipv6_hop_limit_decrement),
2293 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2294 test_ipsec_inline_proto_iv_gen),
2297 TEST_CASE_NAMED_WITH_DATA(
2298 "Antireplay with window size 1024",
2299 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2300 test_ipsec_inline_proto_pkt_antireplay1024,
2302 TEST_CASE_NAMED_WITH_DATA(
2303 "Antireplay with window size 2048",
2304 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2305 test_ipsec_inline_proto_pkt_antireplay2048,
2307 TEST_CASE_NAMED_WITH_DATA(
2308 "Antireplay with window size 4096",
2309 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2310 test_ipsec_inline_proto_pkt_antireplay4096,
2312 TEST_CASE_NAMED_WITH_DATA(
2313 "ESN and Antireplay with window size 1024",
2314 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2315 test_ipsec_inline_proto_pkt_esn_antireplay1024,
2317 TEST_CASE_NAMED_WITH_DATA(
2318 "ESN and Antireplay with window size 2048",
2319 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2320 test_ipsec_inline_proto_pkt_esn_antireplay2048,
2322 TEST_CASE_NAMED_WITH_DATA(
2323 "ESN and Antireplay with window size 4096",
2324 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2325 test_ipsec_inline_proto_pkt_esn_antireplay4096,
2328 TEST_CASE_NAMED_WITH_DATA(
2329 "IPv4 Reassembly with 2 fragments",
2330 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2331 test_inline_ip_reassembly, &ipv4_2frag_vector),
2332 TEST_CASE_NAMED_WITH_DATA(
2333 "IPv6 Reassembly with 2 fragments",
2334 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2335 test_inline_ip_reassembly, &ipv6_2frag_vector),
2336 TEST_CASE_NAMED_WITH_DATA(
2337 "IPv4 Reassembly with 4 fragments",
2338 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2339 test_inline_ip_reassembly, &ipv4_4frag_vector),
2340 TEST_CASE_NAMED_WITH_DATA(
2341 "IPv6 Reassembly with 4 fragments",
2342 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2343 test_inline_ip_reassembly, &ipv6_4frag_vector),
2344 TEST_CASE_NAMED_WITH_DATA(
2345 "IPv4 Reassembly with 5 fragments",
2346 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2347 test_inline_ip_reassembly, &ipv4_5frag_vector),
2348 TEST_CASE_NAMED_WITH_DATA(
2349 "IPv6 Reassembly with 5 fragments",
2350 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2351 test_inline_ip_reassembly, &ipv6_5frag_vector),
2352 TEST_CASE_NAMED_WITH_DATA(
2353 "IPv4 Reassembly with incomplete fragments",
2354 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2355 test_inline_ip_reassembly, &ipv4_incomplete_vector),
2356 TEST_CASE_NAMED_WITH_DATA(
2357 "IPv4 Reassembly with overlapping fragments",
2358 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2359 test_inline_ip_reassembly, &ipv4_overlap_vector),
2360 TEST_CASE_NAMED_WITH_DATA(
2361 "IPv4 Reassembly with out of order fragments",
2362 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2363 test_inline_ip_reassembly, &ipv4_out_of_order_vector),
2364 TEST_CASE_NAMED_WITH_DATA(
2365 "IPv4 Reassembly with burst of 4 fragments",
2366 ut_setup_inline_ipsec, ut_teardown_inline_ipsec,
2367 test_inline_ip_reassembly, &ipv4_4frag_burst_vector),
2369 TEST_CASES_END() /**< NULL terminate unit test array */
2375 test_inline_ipsec(void)
2377 return unit_test_suite_runner(&inline_ipsec_testsuite);
2380 #endif /* !RTE_EXEC_ENV_WINDOWS */
2382 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);