/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <getopt.h>
#include <rte_common.h>
-#include <rte_common_vect.h>
+#include <rte_vect.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+
#define APP_LOOKUP_EXACT_MATCH 0
#define APP_LOOKUP_LPM 1
#define DO_RFC_1812_CHECKS
#define MEMPOOL_CACHE_SIZE 256
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
-
/*
* This expression is used to calculate the number of mbufs needed depending on user input, taking
* into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
/* ethernet addresses of ports */
+static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
static __m128i val_eth[RTE_MAX_ETHPORTS];
}
#endif
+static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
+ struct lcore_conf *qconf) __attribute__((unused));
+
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
-static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf);
-#define MASK_ALL_PKTS 0xf
-#define EXECLUDE_1ST_PKT 0xe
-#define EXECLUDE_2ND_PKT 0xd
-#define EXECLUDE_3RD_PKT 0xb
-#define EXECLUDE_4TH_PKT 0x7
+#define MASK_ALL_PKTS 0xff
+#define EXCLUDE_1ST_PKT 0xfe
+#define EXCLUDE_2ND_PKT 0xfd
+#define EXCLUDE_3RD_PKT 0xfb
+#define EXCLUDE_4TH_PKT 0xf7
+#define EXCLUDE_5TH_PKT 0xef
+#define EXCLUDE_6TH_PKT 0xdf
+#define EXCLUDE_7TH_PKT 0xbf
+#define EXCLUDE_8TH_PKT 0x7f
static inline void
-simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf)
+simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
{
- struct ether_hdr *eth_hdr[4];
- struct ipv4_hdr *ipv4_hdr[4];
- void *d_addr_bytes[4];
- uint8_t dst_port[4];
- int32_t ret[4];
- union ipv4_5tuple_host key[4];
- __m128i data[4];
+ struct ether_hdr *eth_hdr[8];
+ struct ipv4_hdr *ipv4_hdr[8];
+ uint8_t dst_port[8];
+ int32_t ret[8];
+ union ipv4_5tuple_host key[8];
+ __m128i data[8];
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
+ eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
+ eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
+ eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
+ eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
/* Handle IPv4 headers.*/
- ipv4_hdr[0] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv4_hdr[1] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv4_hdr[2] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv4_hdr[3] = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) +
- sizeof(struct ether_hdr));
+ ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
uint8_t valid_mask = MASK_ALL_PKTS;
if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
rte_pktmbuf_free(m[0]);
- valid_mask &= EXECLUDE_1ST_PKT;
+ valid_mask &= EXCLUDE_1ST_PKT;
}
if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
rte_pktmbuf_free(m[1]);
- valid_mask &= EXECLUDE_2ND_PKT;
+ valid_mask &= EXCLUDE_2ND_PKT;
}
if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
rte_pktmbuf_free(m[2]);
- valid_mask &= EXECLUDE_3RD_PKT;
+ valid_mask &= EXCLUDE_3RD_PKT;
}
if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
rte_pktmbuf_free(m[3]);
- valid_mask &= EXECLUDE_4TH_PKT;
+ valid_mask &= EXCLUDE_4TH_PKT;
+ }
+ if (is_valid_ipv4_pkt(ipv4_hdr[4], m[4]->pkt_len) < 0) {
+ rte_pktmbuf_free(m[4]);
+ valid_mask &= EXCLUDE_5TH_PKT;
+ }
+ if (is_valid_ipv4_pkt(ipv4_hdr[5], m[5]->pkt_len) < 0) {
+ rte_pktmbuf_free(m[5]);
+ valid_mask &= EXCLUDE_6TH_PKT;
+ }
+ if (is_valid_ipv4_pkt(ipv4_hdr[6], m[6]->pkt_len) < 0) {
+ rte_pktmbuf_free(m[6]);
+ valid_mask &= EXCLUDE_7TH_PKT;
+ }
+ if (is_valid_ipv4_pkt(ipv4_hdr[7], m[7]->pkt_len) < 0) {
+ rte_pktmbuf_free(m[7]);
+ valid_mask &= EXCLUDE_8TH_PKT;
}
if (unlikely(valid_mask != MASK_ALL_PKTS)) {
if (valid_mask == 0){
return;
} else {
uint8_t i = 0;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 8; i++) {
if ((0x1 << i) & valid_mask) {
l3fwd_simple_forward(m[i], portid, qconf);
}
}
#endif // End of #ifdef DO_RFC_1812_CHECKS
- data[0] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) +
- sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[1] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) +
- sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[2] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) +
- sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[3] = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) +
- sizeof(struct ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
+ data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
+ data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
+ sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, time_to_live)));
key[0].xmm = _mm_and_si128(data[0], mask0);
key[1].xmm = _mm_and_si128(data[1], mask0);
key[2].xmm = _mm_and_si128(data[2], mask0);
key[3].xmm = _mm_and_si128(data[3], mask0);
+ key[4].xmm = _mm_and_si128(data[4], mask0);
+ key[5].xmm = _mm_and_si128(data[5], mask0);
+ key[6].xmm = _mm_and_si128(data[6], mask0);
+ key[7].xmm = _mm_and_si128(data[7], mask0);
+
+ const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
+ &key[4], &key[5], &key[6], &key[7]};
- const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]};
- rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 4, ret);
+ rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
+ dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
+ dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
+ dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
+ dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0)
dst_port[0] = portid;
dst_port[2] = portid;
if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0)
dst_port[3] = portid;
-
- /* 02:00:00:00:00:xx */
- d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0];
- d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0];
- d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0];
- d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0];
- *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40);
- *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40);
- *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40);
- *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40);
+ if (dst_port[4] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[4]) == 0)
+ dst_port[4] = portid;
+ if (dst_port[5] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[5]) == 0)
+ dst_port[5] = portid;
+ if (dst_port[6] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[6]) == 0)
+ dst_port[6] = portid;
+ if (dst_port[7] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[7]) == 0)
+ dst_port[7] = portid;
#ifdef DO_RFC_1812_CHECKS
/* Update time to live and header checksum */
++(ipv4_hdr[1]->hdr_checksum);
++(ipv4_hdr[2]->hdr_checksum);
++(ipv4_hdr[3]->hdr_checksum);
+ --(ipv4_hdr[4]->time_to_live);
+ --(ipv4_hdr[5]->time_to_live);
+ --(ipv4_hdr[6]->time_to_live);
+ --(ipv4_hdr[7]->time_to_live);
+ ++(ipv4_hdr[4]->hdr_checksum);
+ ++(ipv4_hdr[5]->hdr_checksum);
+ ++(ipv4_hdr[6]->hdr_checksum);
+ ++(ipv4_hdr[7]->hdr_checksum);
#endif
+ /* dst addr */
+ *(uint64_t *)ð_hdr[0]->d_addr = dest_eth_addr[dst_port[0]];
+ *(uint64_t *)ð_hdr[1]->d_addr = dest_eth_addr[dst_port[1]];
+ *(uint64_t *)ð_hdr[2]->d_addr = dest_eth_addr[dst_port[2]];
+ *(uint64_t *)ð_hdr[3]->d_addr = dest_eth_addr[dst_port[3]];
+ *(uint64_t *)ð_hdr[4]->d_addr = dest_eth_addr[dst_port[4]];
+ *(uint64_t *)ð_hdr[5]->d_addr = dest_eth_addr[dst_port[5]];
+ *(uint64_t *)ð_hdr[6]->d_addr = dest_eth_addr[dst_port[6]];
+ *(uint64_t *)ð_hdr[7]->d_addr = dest_eth_addr[dst_port[7]];
+
/* src addr */
ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[4]], ð_hdr[4]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[5]], ð_hdr[5]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[6]], ð_hdr[6]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[7]], ð_hdr[7]->s_addr);
send_single_packet(m[0], (uint8_t)dst_port[0]);
send_single_packet(m[1], (uint8_t)dst_port[1]);
send_single_packet(m[2], (uint8_t)dst_port[2]);
send_single_packet(m[3], (uint8_t)dst_port[3]);
+ send_single_packet(m[4], (uint8_t)dst_port[4]);
+ send_single_packet(m[5], (uint8_t)dst_port[5]);
+ send_single_packet(m[6], (uint8_t)dst_port[6]);
+ send_single_packet(m[7], (uint8_t)dst_port[7]);
}
static inline void get_ipv6_5tuple(struct rte_mbuf* m0, __m128i mask0, __m128i mask1,
union ipv6_5tuple_host * key)
{
- __m128i tmpdata0 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
- + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
- __m128i tmpdata1 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
- + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)
- + sizeof(__m128i)));
- __m128i tmpdata2 = _mm_loadu_si128((__m128i*)(rte_pktmbuf_mtod(m0, unsigned char *)
- + sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)
- + sizeof(__m128i) + sizeof(__m128i)));
+ __m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len)));
+ __m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
+ __m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0, __m128i *, sizeof(struct ether_hdr) + offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) + sizeof(__m128i)));
key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
key->xmm[1] = tmpdata1;
key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
}
static inline void
-simple_ipv6_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf)
+simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf)
{
- struct ether_hdr *eth_hdr[4];
- __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[4];
- void *d_addr_bytes[4];
- uint8_t dst_port[4];
- int32_t ret[4];
- union ipv6_5tuple_host key[4];
+ struct ether_hdr *eth_hdr[8];
+ __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8];
+ uint8_t dst_port[8];
+ int32_t ret[8];
+ union ipv6_5tuple_host key[8];
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
+ eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
+ eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
+ eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
+ eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
/* Handle IPv6 headers.*/
- ipv6_hdr[0] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[0], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv6_hdr[1] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[1], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv6_hdr[2] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[2], unsigned char *) +
- sizeof(struct ether_hdr));
- ipv6_hdr[3] = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m[3], unsigned char *) +
- sizeof(struct ether_hdr));
+ ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
get_ipv6_5tuple(m[0], mask1, mask2, &key[0]);
get_ipv6_5tuple(m[1], mask1, mask2, &key[1]);
get_ipv6_5tuple(m[2], mask1, mask2, &key[2]);
get_ipv6_5tuple(m[3], mask1, mask2, &key[3]);
+ get_ipv6_5tuple(m[4], mask1, mask2, &key[4]);
+ get_ipv6_5tuple(m[5], mask1, mask2, &key[5]);
+ get_ipv6_5tuple(m[6], mask1, mask2, &key[6]);
+ get_ipv6_5tuple(m[7], mask1, mask2, &key[7]);
+
+ const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
+ &key[4], &key[5], &key[6], &key[7]};
- const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]};
rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 4, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0)? portid:ipv6_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0)? portid:ipv6_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0)? portid:ipv6_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0)? portid:ipv6_l3fwd_out_if[ret[3]]);
+ dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid:ipv6_l3fwd_out_if[ret[0]]);
+ dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid:ipv6_l3fwd_out_if[ret[1]]);
+ dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid:ipv6_l3fwd_out_if[ret[2]]);
+ dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid:ipv6_l3fwd_out_if[ret[3]]);
+ dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid:ipv6_l3fwd_out_if[ret[4]]);
+ dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid:ipv6_l3fwd_out_if[ret[5]]);
+ dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid:ipv6_l3fwd_out_if[ret[6]]);
+ dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid:ipv6_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[0]) == 0)
dst_port[0] = portid;
dst_port[2] = portid;
if (dst_port[3] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[3]) == 0)
dst_port[3] = portid;
-
- /* 02:00:00:00:00:xx */
- d_addr_bytes[0] = ð_hdr[0]->d_addr.addr_bytes[0];
- d_addr_bytes[1] = ð_hdr[1]->d_addr.addr_bytes[0];
- d_addr_bytes[2] = ð_hdr[2]->d_addr.addr_bytes[0];
- d_addr_bytes[3] = ð_hdr[3]->d_addr.addr_bytes[0];
- *((uint64_t *)d_addr_bytes[0]) = 0x000000000002 + ((uint64_t)dst_port[0] << 40);
- *((uint64_t *)d_addr_bytes[1]) = 0x000000000002 + ((uint64_t)dst_port[1] << 40);
- *((uint64_t *)d_addr_bytes[2]) = 0x000000000002 + ((uint64_t)dst_port[2] << 40);
- *((uint64_t *)d_addr_bytes[3]) = 0x000000000002 + ((uint64_t)dst_port[3] << 40);
+ if (dst_port[4] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[4]) == 0)
+ dst_port[4] = portid;
+ if (dst_port[5] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[5]) == 0)
+ dst_port[5] = portid;
+ if (dst_port[6] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[6]) == 0)
+ dst_port[6] = portid;
+ if (dst_port[7] >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port[7]) == 0)
+ dst_port[7] = portid;
+
+ /* dst addr */
+ *(uint64_t *)ð_hdr[0]->d_addr = dest_eth_addr[dst_port[0]];
+ *(uint64_t *)ð_hdr[1]->d_addr = dest_eth_addr[dst_port[1]];
+ *(uint64_t *)ð_hdr[2]->d_addr = dest_eth_addr[dst_port[2]];
+ *(uint64_t *)ð_hdr[3]->d_addr = dest_eth_addr[dst_port[3]];
+ *(uint64_t *)ð_hdr[4]->d_addr = dest_eth_addr[dst_port[4]];
+ *(uint64_t *)ð_hdr[5]->d_addr = dest_eth_addr[dst_port[5]];
+ *(uint64_t *)ð_hdr[6]->d_addr = dest_eth_addr[dst_port[6]];
+ *(uint64_t *)ð_hdr[7]->d_addr = dest_eth_addr[dst_port[7]];
/* src addr */
ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[4]], ð_hdr[4]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[5]], ð_hdr[5]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[6]], ð_hdr[6]->s_addr);
+ ether_addr_copy(&ports_eth_addr[dst_port[7]], ð_hdr[7]->s_addr);
send_single_packet(m[0], (uint8_t)dst_port[0]);
send_single_packet(m[1], (uint8_t)dst_port[1]);
send_single_packet(m[2], (uint8_t)dst_port[2]);
send_single_packet(m[3], (uint8_t)dst_port[3]);
+ send_single_packet(m[4], (uint8_t)dst_port[4]);
+ send_single_packet(m[5], (uint8_t)dst_port[5]);
+ send_single_packet(m[6], (uint8_t)dst_port[6]);
+ send_single_packet(m[7], (uint8_t)dst_port[7]);
}
#endif /* APP_LOOKUP_METHOD */
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- void *d_addr_bytes;
uint8_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+#ifdef RTE_NEXT_ABI
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
+#else
if (m->ol_flags & PKT_RX_IPV4_HDR) {
+#endif
/* Handle IPv4 headers.*/
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
- sizeof(struct ether_hdr));
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
(enabled_port_mask & 1 << dst_port) == 0)
dst_port = portid;
- /* 02:00:00:00:00:xx */
- d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
- *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR +
- ((uint64_t)dst_port << 40);
-
#ifdef DO_RFC_1812_CHECKS
/* Update time to live and header checksum */
--(ipv4_hdr->time_to_live);
++(ipv4_hdr->hdr_checksum);
#endif
+ /* dst addr */
+ *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
/* src addr */
ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
send_single_packet(m, dst_port);
-
+#ifdef RTE_NEXT_ABI
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+#else
} else {
+#endif
/* Handle IPv6 headers.*/
struct ipv6_hdr *ipv6_hdr;
- ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
- sizeof(struct ether_hdr));
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
dst_port = portid;
- /* 02:00:00:00:00:xx */
- d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
- *((uint64_t *)d_addr_bytes) = ETHER_LOCAL_ADMIN_ADDR +
- ((uint64_t)dst_port << 40);
+ /* dst addr */
+ *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
/* src addr */
ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
send_single_packet(m, dst_port);
+#ifdef RTE_NEXT_ABI
+ } else
+ /* Free the mbuf that contains non-IPV4/IPV6 packet */
+ rte_pktmbuf_free(m);
+#else
}
-
+#endif
}
#ifdef DO_RFC_1812_CHECKS
* to BAD_PORT value.
*/
static inline __attribute__((always_inline)) void
+#ifdef RTE_NEXT_ABI
+rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+#else
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags)
+#endif
{
uint8_t ihl;
+#ifdef RTE_NEXT_ABI
+ if (RTE_ETH_IS_IPV4_HDR(ptype)) {
+#else
if ((flags & PKT_RX_IPV4_HDR) != 0) {
-
+#endif
ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
ipv4_hdr->time_to_live--;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
+#ifdef RTE_NEXT_ABI
+ if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
+#else
if (pkt->ol_flags & PKT_RX_IPV4_HDR) {
+#endif
if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
&next_hop) != 0)
next_hop = portid;
+#ifdef RTE_NEXT_ABI
+ } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+#else
} else if (pkt->ol_flags & PKT_RX_IPV6_HDR) {
+#endif
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
ve = val_eth[dp];
dst_port[0] = dp;
+#ifdef RTE_NEXT_ABI
+ rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
+#else
rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags);
+#endif
te = _mm_blend_epi16(te, ve, MASK_ETH);
_mm_store_si128((__m128i *)eth_hdr, te);
}
+#ifdef RTE_NEXT_ABI
+/*
+ * Read packet_type and destination IPV4 addresses from 4 mbufs.
+ */
+static inline void
+processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+ __m128i *dip,
+ uint32_t *ipv4_flag)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *eth_hdr;
+ uint32_t x0, x1, x2, x3;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x0 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x1 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[1]->packet_type;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x2 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[2]->packet_type;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x3 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[3]->packet_type;
+
+ dip[0] = _mm_set_epi32(x3, x2, x1, x0);
+}
+#else /* RTE_NEXT_ABI */
/*
* Read ol_flags and destination IPV4 addresses from 4 mbufs.
*/
dip[0] = _mm_set_epi32(x3, x2, x1, x0);
}
+#endif /* RTE_NEXT_ABI */
/*
* Lookup into LPM for destination port.
* If lookup fails, use incoming port (portid) as destination port.
*/
static inline void
+#ifdef RTE_NEXT_ABI
+processx4_step2(const struct lcore_conf *qconf,
+ __m128i dip,
+ uint32_t ipv4_flag,
+ uint8_t portid,
+ struct rte_mbuf *pkt[FWDSTEP],
+ uint16_t dprt[FWDSTEP])
+#else
processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag,
uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP])
+#endif /* RTE_NEXT_ABI */
{
rte_xmm_t dst;
const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
dip = _mm_shuffle_epi8(dip, bswap_mask);
/* if all 4 packets are IPV4. */
+#ifdef RTE_NEXT_ABI
+ if (likely(ipv4_flag)) {
+#else
if (likely(flag != 0)) {
+#endif
rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
} else {
- dst.m = dip;
+ dst.x = dip;
dprt[0] = get_dst_port(qconf, pkt[0], dst.u32[0], portid);
dprt[1] = get_dst_port(qconf, pkt[1], dst.u32[1], portid);
dprt[2] = get_dst_port(qconf, pkt[2], dst.u32[2], portid);
__m128i ve[FWDSTEP];
__m128i *p[FWDSTEP];
- p[0] = (rte_pktmbuf_mtod(pkt[0], __m128i *));
- p[1] = (rte_pktmbuf_mtod(pkt[1], __m128i *));
- p[2] = (rte_pktmbuf_mtod(pkt[2], __m128i *));
- p[3] = (rte_pktmbuf_mtod(pkt[3], __m128i *));
+ p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
+ p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
+ p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
+ p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
ve[0] = val_eth[dst_port[0]];
te[0] = _mm_load_si128(p[0]);
_mm_store_si128(p[2], te[2]);
_mm_store_si128(p[3], te[3]);
+#ifdef RTE_NEXT_ABI
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
+ &dst_port[0], pkt[0]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
+ &dst_port[1], pkt[1]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
+ &dst_port[2], pkt[2]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
+ &dst_port[3], pkt[3]->packet_type);
+#else /* RTE_NEXT_ABI */
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->ol_flags);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
&dst_port[2], pkt[2]->ol_flags);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->ol_flags);
+#endif /* RTE_NEXT_ABI */
}
/*
uint16_t *lp;
uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
+#ifdef RTE_NEXT_ABI
+ uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
+#else
uint32_t flag[MAX_PKT_BURST / FWDSTEP];
+#endif
uint16_t pnum[MAX_PKT_BURST + 1];
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
{
/*
- * Send nb_rx - nb_rx%4 packets
- * in groups of 4.
+ * Send nb_rx - nb_rx%8 packets
+ * in groups of 8.
*/
- int32_t n = RTE_ALIGN_FLOOR(nb_rx, 4);
- for (j = 0; j < n ; j+=4) {
+ int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
+ for (j = 0; j < n; j += 8) {
+#ifdef RTE_NEXT_ABI
+ uint32_t pkt_type =
+ pkts_burst[j]->packet_type &
+ pkts_burst[j+1]->packet_type &
+ pkts_burst[j+2]->packet_type &
+ pkts_burst[j+3]->packet_type &
+ pkts_burst[j+4]->packet_type &
+ pkts_burst[j+5]->packet_type &
+ pkts_burst[j+6]->packet_type &
+ pkts_burst[j+7]->packet_type;
+ if (pkt_type & RTE_PTYPE_L3_IPV4) {
+ simple_ipv4_fwd_8pkts(
+ &pkts_burst[j], portid, qconf);
+ } else if (pkt_type &
+ RTE_PTYPE_L3_IPV6) {
+#else /* RTE_NEXT_ABI */
uint32_t ol_flag = pkts_burst[j]->ol_flags
& pkts_burst[j+1]->ol_flags
& pkts_burst[j+2]->ol_flags
- & pkts_burst[j+3]->ol_flags;
+ & pkts_burst[j+3]->ol_flags
+ & pkts_burst[j+4]->ol_flags
+ & pkts_burst[j+5]->ol_flags
+ & pkts_burst[j+6]->ol_flags
+ & pkts_burst[j+7]->ol_flags;
if (ol_flag & PKT_RX_IPV4_HDR ) {
- simple_ipv4_fwd_4pkts(&pkts_burst[j],
+ simple_ipv8_fwd_4pkts(&pkts_burst[j],
portid, qconf);
} else if (ol_flag & PKT_RX_IPV6_HDR) {
+#endif /* RTE_NEXT_ABI */
simple_ipv6_fwd_4pkts(&pkts_burst[j],
portid, qconf);
} else {
portid, qconf);
l3fwd_simple_forward(pkts_burst[j+3],
portid, qconf);
+ l3fwd_simple_forward(pkts_burst[j+4],
+ portid, qconf);
+ l3fwd_simple_forward(pkts_burst[j+5],
+ portid, qconf);
+ l3fwd_simple_forward(pkts_burst[j+6],
+ portid, qconf);
+ l3fwd_simple_forward(pkts_burst[j+7],
+ portid, qconf);
}
}
for (; j < nb_rx ; j++) {
for (j = 0; j != k; j += FWDSTEP) {
processx4_step1(&pkts_burst[j],
&dip[j / FWDSTEP],
+#ifdef RTE_NEXT_ABI
+ &ipv4_flag[j / FWDSTEP]);
+#else
&flag[j / FWDSTEP]);
+#endif
}
k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
for (j = 0; j != k; j += FWDSTEP) {
processx4_step2(qconf, dip[j / FWDSTEP],
+#ifdef RTE_NEXT_ABI
+ ipv4_flag[j / FWDSTEP], portid,
+#else
flag[j / FWDSTEP], portid,
+#endif
&pkts_burst[j], &dst_port[j]);
}
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" --config (port,queue,lcore): rx queues configuration\n"
+ " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
" --no-numa: optional, disable numa awareness\n"
" --ipv6: optional, specify it if running ipv6 packets\n"
" --enable-jumbo: enable jumbo frame"
return 0;
}
+static void
+parse_eth_dest(const char *optarg)
+{
+ uint8_t portid;
+ char *port_end;
+ uint8_t c, *dest, peer_addr[6];
+
+ errno = 0;
+ portid = strtoul(optarg, &port_end, 10);
+ if (errno != 0 || port_end == optarg || *port_end++ != ',')
+ rte_exit(EXIT_FAILURE,
+ "Invalid eth-dest: %s", optarg);
+ if (portid >= RTE_MAX_ETHPORTS)
+ rte_exit(EXIT_FAILURE,
+ "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
+ portid, RTE_MAX_ETHPORTS);
+
+ if (cmdline_parse_etheraddr(NULL, port_end,
+ &peer_addr, sizeof(peer_addr)) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid ethernet address: %s\n",
+ port_end);
+ dest = (uint8_t *)&dest_eth_addr[portid];
+ for (c = 0; c < 6; c++)
+ dest[c] = peer_addr[c];
+ *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
+}
+
#define CMD_LINE_OPT_CONFIG "config"
+#define CMD_LINE_OPT_ETH_DEST "eth-dest"
#define CMD_LINE_OPT_NO_NUMA "no-numa"
#define CMD_LINE_OPT_IPV6 "ipv6"
#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
char *prgname = argv[0];
static struct option lgopts[] = {
{CMD_LINE_OPT_CONFIG, 1, 0, 0},
+ {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
{CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
{CMD_LINE_OPT_IPV6, 0, 0, 0},
{CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
}
}
+ if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ETH_DEST,
+ sizeof(CMD_LINE_OPT_CONFIG))) {
+ parse_eth_dest(optarg);
+ }
+
if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_NO_NUMA,
sizeof(CMD_LINE_OPT_NO_NUMA))) {
printf("numa is disabled \n");
struct rte_hash_parameters ipv4_l3fwd_hash_params = {
.name = NULL,
.entries = L3FWD_HASH_ENTRIES,
- .bucket_entries = 4,
.key_len = sizeof(union ipv4_5tuple_host),
.hash_func = ipv4_hash_crc,
.hash_func_init_val = 0,
struct rte_hash_parameters ipv6_l3fwd_hash_params = {
.name = NULL,
.entries = L3FWD_HASH_ENTRIES,
- .bucket_entries = 4,
.key_len = sizeof(union ipv6_5tuple_host),
.hash_func = ipv6_hash_crc,
.hash_func_init_val = 0,
if (pktmbuf_pool[socketid] == NULL) {
snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
pktmbuf_pool[socketid] =
- rte_mempool_create(s, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- socketid, 0);
+ rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
if (pktmbuf_pool[socketid] == NULL)
rte_exit(EXIT_FAILURE,
"Cannot init mbuf pool on socket %d\n", socketid);
argc -= ret;
argv += ret;
+ /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ dest_eth_addr[portid] = ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
+ *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
+ }
+
/* parse application arguments (after the EAL ones) */
ret = parse_args(argc, argv);
if (ret < 0)
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
+ print_ethaddr("Destination:",
+ (const struct ether_addr *)&dest_eth_addr[portid]);
+ printf(", ");
/*
- * prepare dst and src MACs for each port.
+ * prepare src MACs for each port.
*/
- *(uint64_t *)(val_eth + portid) =
- ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
ether_addr_copy(&ports_eth_addr[portid],
(struct ether_addr *)(val_eth + portid) + 1);