-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdio.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ip_frag.h>
-#include "main.h"
-
#define MAX_PKT_BURST 32
#define MAX_JUMBO_PKT_LEN 9600
-#define BUF_SIZE 2048
-#define MBUF_SIZE \
- (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define NB_MBUF 8192
+#define MEMPOOL_CACHE_SIZE 256
/* allow max jumbo frame 9.5 KB */
#define JUMBO_FRAME_MAX_SIZE 0x2600
static uint32_t max_flow_num = DEF_FLOW_NUM;
static uint32_t max_flow_ttl = DEF_FLOW_TTL;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
/*
* Configurable number of RX/TX ring descriptors
*/
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
struct rte_mempool *pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
struct tx_lcore_stat {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 1, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .ignore_offload_bitfield = 1,
+ .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP),
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
+ .rss_hf = ETH_RSS_IP,
},
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
+ .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS),
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
/*
* IPv4 forwarding table
*/
static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
-#ifdef IPV6_FRAG_TBL_STAT
+#ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
#define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
#else
#define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
-#endif /* IPV6_FRAG_TBL_STAT */
+#endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
/*
* If number of queued packets reached given threahold, then
* send burst of packets on an output interface.
*/
static inline uint32_t
-send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
{
uint32_t fill, len, k, n;
struct mbuf_table *txmb;
txmb->tail = 0;
}
- return (fill);
+ return fill;
}
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t fill, lcore_id, len;
struct lcore_queue_conf *qconf;
if(++txmb->head == len)
txmb->head = 0;
- return (0);
+ return 0;
}
static inline void
-reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
+reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
struct lcore_queue_conf *qconf, uint64_t tms)
{
struct ether_hdr *eth_hdr;
struct rte_ip_frag_death_row *dr;
struct rx_queue *rxq;
void *d_addr_bytes;
- uint8_t next_hop, dst_port;
+ uint32_t next_hop;
+ uint16_t dst_port;
rxq = &qconf->rx_queue_list[queue];
dst_port = portid;
/* if packet is IPv4 */
- if (m->ol_flags & (PKT_RX_IPV4_HDR)) {
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
struct ipv4_hdr *ip_hdr;
uint32_t ip_dst;
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
- }
- /* if packet is IPv6 */
- else if (m->ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ /* if packet is IPv6 */
struct ipv6_extension_fragment *frag_hdr;
struct ipv6_hdr *ip_hdr;
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
}
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop) == 0 &&
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ &next_hop) == 0 &&
(enabled_port_mask & 1 << next_hop) != 0) {
dst_port = next_hop;
}
unsigned lcore_id;
uint64_t diff_tsc, cur_tsc, prev_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].portid;
- RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
+ RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
portid);
}
errno = 0;
v = strtoul(str, &end, 10);
if (errno != 0 || *end != '\0')
- return (-EINVAL);
+ return -EINVAL;
if (v < min || v > max)
- return (-EINVAL);
+ return -EINVAL;
*val = (uint32_t)v;
- return (0);
+ return 0;
}
static int
errno = 0;
v = strtoul(str, &end, 10);
if (errno != 0)
- return (-EINVAL);
+ return -EINVAL;
if (*end != '\0') {
if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
v *= MS_PER_S;
else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
- return (-EINVAL);
+ return -EINVAL;
}
if (v < min || v > max)
- return (-EINVAL);
+ return -EINVAL;
*val = (uint32_t)v;
- return (0);
+ return 0;
}
static int
optarg,
lgopts[option_index].name);
print_usage(prgname);
- return (ret);
+ return ret;
}
}
optarg,
lgopts[option_index].name);
print_usage(prgname);
- return (ret);
+ return ret;
}
}
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
static void
print_ethaddr(const char *name, const struct ether_addr *eth_addr)
{
- printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
- eth_addr->addr_bytes[0],
- eth_addr->addr_bytes[1],
- eth_addr->addr_bytes[2],
- eth_addr->addr_bytes[3],
- eth_addr->addr_bytes[4],
- eth_addr->addr_bytes[5]);
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", name, buf);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == 0) {
+ if (link.link_status == ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
- if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
+ if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
socket)) == NULL) {
RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
"failed to allocate %zu bytes\n",
}
/*
- * At any given moment up to <max_flow_num * (MAX_FRAG_NUM - 1)>
+ * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
* mbufs could be stored int the fragment table.
* Plus, each TX queue can hold up to <max_flow_num> packets.
*/
- nb_mbuf = 2 * RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
+ nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
- nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
nb_mbuf *= 2; /* ipv4 and ipv6 */
+ nb_mbuf += nb_rxd + nb_txd;
nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
- rte_snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
+ snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
- if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
- RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
+ rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
+ MBUF_DATA_SIZE, socket);
+ if (rxq->pool == NULL) {
+ RTE_LOG(ERR, IP_RSMBL,
+ "rte_pktmbuf_pool_create(%s) failed", buf);
return -1;
}
char buf[PATH_MAX];
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
+ struct rte_lpm_config lpm_config;
int socket;
unsigned lcore_id;
if (socket_lpm[socket] == NULL) {
RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
- rte_snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
+ snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
- lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
+ lpm_config.max_rules = LPM_MAX_RULES;
+ lpm_config.number_tbl8s = 256;
+ lpm_config.flags = 0;
+
+ lpm = rte_lpm_create(buf, socket, &lpm_config);
if (lpm == NULL) {
RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
return -1;
if (socket_lpm6[socket] == NULL) {
RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
- rte_snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
+ snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
- lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
+ lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
if (lpm6 == NULL) {
RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
return -1;
qconf = &lcore_queue_conf[lcore];
for (i = 0; i < qconf->n_rx_queue; i++) {
- fprintf(stdout, " -- lcoreid=%u portid=%hhu "
+ fprintf(stdout, " -- lcoreid=%u portid=%u "
"frag tbl stat:\n",
lcore, qconf->rx_queue_list[i].portid);
rte_ip_frag_table_statistics_dump(stdout,
}
int
-MAIN(int argc, char **argv)
+main(int argc, char **argv)
{
struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int ret, socket;
unsigned nb_ports;
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
- if (rte_eal_pci_probe() < 0)
- rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
-
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
- else if (nb_ports == 0)
+ if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
nb_lcores = rte_lcore_count();
if (init_mem() < 0)
rte_panic("Cannot initialize memory structures!\n");
+ /* check if portmask has non-existent ports */
+ if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
+ rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
+
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_conf local_port_conf = port_conf;
+
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
printf("\nSkipping disabled port %d\n", portid);
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen,
+ local_port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
qconf = &lcore_queue_conf[rx_lcore_id];
}
- socket = rte_eth_dev_socket_id(portid);
+ socket = rte_lcore_to_socket_id(portid);
if (socket == SOCKET_ID_ANY)
socket = 0;
rxq->portid = portid;
rxq->lpm = socket_lpm[socket];
rxq->lpm6 = socket_lpm6[socket];
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
qconf->n_rx_queue++;
n_tx_queue = nb_lcores;
if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
- &port_conf);
+ &local_port_conf);
if (ret < 0) {
printf("\n");
rte_exit(EXIT_FAILURE, "Cannot configure device: "
}
/* init one RX queue */
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- socket, &rx_conf,
+ socket, &rxq_conf,
rxq->pool);
if (ret < 0) {
printf("\n");
printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout);
+
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
+ txconf->offloads = local_port_conf.txmode.offloads;
+
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socket, &tx_conf);
+ socket, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
signal(SIGUSR1, signal_handler);
signal(SIGTERM, signal_handler);