* Default byte size for the IPv6 Maximum Transfer Unit (MTU).
* This value includes the size of IPv6 header.
*/
-#define IPV4_MTU_DEFAULT ETHER_MTU
-#define IPV6_MTU_DEFAULT ETHER_MTU
+#define IPV4_MTU_DEFAULT RTE_ETHER_MTU
+#define IPV6_MTU_DEFAULT RTE_ETHER_MTU
+
+/*
+ * The overhead from max frame size to MTU.
+ * We have to consider the max possible overhead.
+ */
+#define MTU_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ 2 * sizeof(struct rte_vlan_hdr))
/*
* Default payload in bytes for the IPv6 packet.
*/
-#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr))
-#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct ipv6_hdr))
+#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct rte_ipv4_hdr))
+#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct rte_ipv6_hdr))
/*
* Max number of fragments per packet expected - defined by config file.
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
/* ethernet addresses of ports */
-static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
#ifndef IPv4_BYTES
#define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME),
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
};
struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
- {IPv4(100,10,0,0), 16, 0},
- {IPv4(100,20,0,0), 16, 1},
- {IPv4(100,30,0,0), 16, 2},
- {IPv4(100,40,0,0), 16, 3},
- {IPv4(100,50,0,0), 16, 4},
- {IPv4(100,60,0,0), 16, 5},
- {IPv4(100,70,0,0), 16, 6},
- {IPv4(100,80,0,0), 16, 7},
+ {RTE_IPV4(100,10,0,0), 16, 0},
+ {RTE_IPV4(100,20,0,0), 16, 1},
+ {RTE_IPV4(100,30,0,0), 16, 2},
+ {RTE_IPV4(100,40,0,0), 16, 3},
+ {RTE_IPV4(100,50,0,0), 16, 4},
+ {RTE_IPV4(100,60,0,0), 16, 5},
+ {RTE_IPV4(100,70,0,0), 16, 6},
+ {RTE_IPV4(100,80,0,0), 16, 7},
};
/*
{
struct rx_queue *rxq;
uint32_t i, len, next_hop;
- uint8_t ipv6;
- uint16_t port_out;
+ uint16_t port_out, ether_type;
int32_t len2;
+ uint64_t ol_flags;
+ const struct rte_ether_hdr *eth;
- ipv6 = 0;
+ ol_flags = 0;
rxq = &qconf->rx_queue_list[queueid];
/* by default, send everything back to the source port */
port_out = port_in;
+ /* save ether type of the incoming packet */
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
/* Remove the Ethernet header and trailer from the input packet */
- rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
+ rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
/* Build transmission burst */
len = qconf->tx_mbufs[port_out].len;
/* if this is an IPv4 packet */
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
uint32_t ip_dst;
/* Read the lookup key (i.e. ip_dst) from the input packet */
- ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
+ ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
/* Free input packet */
rte_pktmbuf_free(m);
+ /* request HW to regenerate IPv4 cksum */
+ ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+
/* If we fail to fragment the packet */
if (unlikely (len2 < 0))
return;
}
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if this is an IPv6 packet */
- struct ipv6_hdr *ip_hdr;
-
- ipv6 = 1;
+ struct rte_ipv6_hdr *ip_hdr;
/* Read the lookup key (i.e. ip_dst) from the input packet */
- ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
+ ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
/* Find destination port */
if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
void *d_addr_bytes;
m = qconf->tx_mbufs[port_out].m_table[i];
- struct ether_hdr *eth_hdr = (struct ether_hdr *)
- rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
+ struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(m,
+ (uint16_t)sizeof(struct rte_ether_hdr));
if (eth_hdr == NULL) {
rte_panic("No headroom in mbuf.\n");
}
- m->l2_len = sizeof(struct ether_hdr);
+ m->ol_flags |= ol_flags;
+ m->l2_len = sizeof(struct rte_ether_hdr);
/* 02:00:00:00:00:xx */
d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
- *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)port_out << 40);
+ *((uint64_t *)d_addr_bytes) = 0x000000000002 +
+ ((uint64_t)port_out << 40);
/* src addr */
- ether_addr_copy(&ports_eth_addr[port_out], ð_hdr->s_addr);
- if (ipv6)
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
- else
- eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+ rte_ether_addr_copy(&ports_eth_addr[port_out],
+ ð_hdr->s_addr);
+ eth_hdr->ether_type = ether_type;
}
len += len2;
/* main processing loop */
static int
-main_loop(__attribute__((unused)) void *dummy)
+main_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
}
static void
-print_ethaddr(const char *name, struct ether_addr *eth_addr)
+print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
uint16_t portid;
uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
+ int ret;
printf("\nChecking link status");
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait(portid, &link);
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
"Port%d Link Up .Speed %u Mbps - %s\n",
portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
+ ("full-duplex") : ("half-duplex"));
else
printf("Port %d Link Down\n", portid);
continue;
}
}
-/* Check L3 packet type detection capablity of the NIC port */
+/* Check L3 packet type detection capability of the NIC port */
static int
check_ptype(int portid)
{
static inline void
parse_ptype(struct rte_mbuf *m)
{
- struct ether_hdr *eth_hdr;
+ struct rte_ether_hdr *eth_hdr;
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
uint16_t ether_type;
- eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
m->packet_type = packet_type;
struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int socket, ret;
- unsigned nb_ports;
+ uint16_t nb_ports;
uint16_t queueid = 0;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid arguments");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
struct rte_eth_rxconf rxq_conf;
qconf = &lcore_queue_conf[rx_lcore_id];
/* limit the frame size to the maximum supported by NIC */
- rte_eth_dev_info_get(portid, &dev_info);
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
dev_info.max_rx_pktlen,
local_port_conf.rxmode.max_rx_pkt_len);
n_tx_queue = nb_lcores;
if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
n_tx_queue = MAX_TX_QUEUE_PER_PORT;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
&local_port_conf);
if (ret < 0) {
ret, portid);
}
+ /* set the mtu to the maximum received packet size */
+ ret = rte_eth_dev_set_mtu(portid,
+ local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
+ if (ret < 0) {
+ printf("\n");
+ rte_exit(EXIT_FAILURE, "Set MTU failed: "
+ "err=%d, port=%d\n",
+ ret, portid);
+ }
+
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0) {
ret, portid);
}
- rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+ ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+ if (ret < 0) {
+ printf("\n");
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_macaddr_get: err=%d, port=%d\n",
+ ret, portid);
+ }
+
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf("\n");
/* init one TX queue per couple (lcore,port) */
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
+ if (queueid >= dev_info.nb_tx_queues)
+ break;
+
socket = (int) rte_lcore_to_socket_id(lcore_id);
printf("txq=%u,%d ", lcore_id, queueid);
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socket, txconf);
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
ret, portid);
- rte_eth_promiscuous_enable(portid);
+ ret = rte_eth_promiscuous_enable(portid);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_promiscuous_enable: err=%s, port=%d\n",
+ rte_strerror(-ret), portid);
if (check_ptype(portid) == 0) {
rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);