#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF (32 * 1024)
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
struct lcore_conf {
printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid, &tx_conf);
+ socketid,
+ NULL);
if (ret < 0)
rte_panic("rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid, &rx_conf, pktmbuf_pool[socketid]);
+ socketid,
+ NULL,
+ pktmbuf_pool[socketid]);
if (ret < 0)
rte_panic("rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid);
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
-/* RX ring configuration */
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* Ring prefetch threshold */
- .hthresh = 8, /* Ring host threshold */
- .wthresh = 4, /* Ring writeback threshold */
- },
- .rx_free_thresh = 0, /* Immediately free RX descriptors */
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-/* TX ring configuration */
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* Ring prefetch threshold */
- .hthresh = 0, /* Ring host threshold */
- .wthresh = 0, /* Ring writeback threshold */
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
/* Options for configuring ethernet port */
static const struct rte_eth_conf port_conf = {
(unsigned)port, ret);
ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port),
- &rx_conf, pktmbuf_pool);
+ NULL,
+ pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
(unsigned)port, ret);
ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port),
- &tx_conf);
+ NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
(unsigned)port, ret);
#define NB_MBUF 8192
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/*
* IPv4 forwarding table
*/
MAIN(int argc, char **argv)
{
struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int socket, ret;
unsigned nb_ports;
/* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- socket, &rx_conf,
+ socket, NULL,
socket_direct_pool[socket]);
if (ret < 0) {
printf("\n");
socket = (int) rte_lcore_to_socket_id(lcore_id);
printf("txq=%u,%d ", lcore_id, queueid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socket, &tx_conf);
+ socket, txconf);
if (ret < 0) {
printf("\n");
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
static uint32_t max_flow_num = DEF_FLOW_NUM;
static uint32_t max_flow_ttl = DEF_FLOW_TTL;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
/*
* IPv4 forwarding table
*/
MAIN(int argc, char **argv)
{
struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int ret, socket;
unsigned nb_ports;
/* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- socket, &rx_conf,
+ socket, NULL,
rxq->pool);
if (ret < 0) {
printf("\n");
printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = 0;
+
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socket, &tx_conf);
+ socket, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
/* allow max jumbo frame 9.5 KB */
#define JUMBO_FRAME_MAX_SIZE 0x2600
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
static struct rte_mempool *packet_pool, *header_pool, *clone_pool;
MAIN(int argc, char **argv)
{
struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
int ret;
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
printf("rxq=%hu ", queueid);
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- rte_eth_dev_socket_id(portid), &rx_conf,
+ rte_eth_dev_socket_id(portid),
+ NULL,
packet_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n",
continue;
printf("txq=%u,%hu ", lcore_id, queueid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- rte_lcore_to_socket_id(lcore_id), &tx_conf);
+ rte_lcore_to_socket_id(lcore_id), txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
-/* RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* RX ring configuration */
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* Ring prefetch threshold */
- .hthresh = 8, /* Ring host threshold */
- .wthresh = 4, /* Ring writeback threshold */
- },
- .rx_free_thresh = 0, /* Immediately free RX descriptors */
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-/* TX ring configuration */
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* Ring prefetch threshold */
- .hthresh = 0, /* Ring host threshold */
- .wthresh = 0, /* Ring writeback threshold */
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
(unsigned)port, ret);
ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
- rte_eth_dev_socket_id(port), &rx_conf, pktmbuf_pool);
+ rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)\n", (unsigned)port, ret);
ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
- rte_eth_dev_socket_id(port), &tx_conf);
+ rte_eth_dev_socket_id(port), NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
"port%u (%d)\n", (unsigned)port, ret);
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* mask of enabled ports */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
#define METADATA_NAME "l2fwd_ivshmem"
#define CMDLINE_OPT_FWD_CONF "fwd-conf"
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid), &rx_conf,
+ rte_eth_dev_socket_id(portid),
+ NULL,
l2fwd_ivshmem_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
/* init one TX queue on each port */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid), &tx_conf);
+ rte_eth_dev_socket_id(portid),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid);
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- /*
- * As the example won't handle mult-segments and offload cases,
- * set the flag by default.
- */
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
-};
-
struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
/* Per-port statistics struct */
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid), &rx_conf,
+ rte_eth_dev_socket_id(portid),
+ NULL,
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
/* init one TX queue on each port */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid), &tx_conf);
+ rte_eth_dev_socket_id(portid),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid);
nb_lcores * MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
/***********************start of ACL part******************************/
MAIN(int argc, char **argv)
{
struct lcore_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
uint16_t queueid;
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ if (port_conf.rxmode.jumbo_frame)
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid, &tx_conf);
+ socketid, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup: err=%d, "
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid, &rx_conf,
+ socketid, NULL,
pktmbuf_pool[socketid]);
if (ret < 0)
rte_exit(EXIT_FAILURE,
nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
MAIN(int argc, char **argv)
{
struct lcore_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
uint16_t queueid;
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ if (port_conf.rxmode.jumbo_frame)
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid, &tx_conf);
+ socketid, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup: err=%d, "
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid, &rx_conf, pktmbuf_pool[socketid]);
+ socketid, NULL,
+ pktmbuf_pool[socketid]);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP |
- ETH_TXQ_FLAGS_NOXSUMUDP |
- ETH_TXQ_FLAGS_NOXSUMTCP)
-};
-
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
MAIN(int argc, char **argv)
{
struct lcore_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
uint16_t queueid;
printf("txq=%d,%d,%d ", portid, 0, socketid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ if (port_conf.rxmode.jumbo_frame)
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- socketid, &tx_conf);
+ socketid, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid, &rx_conf, pktmbuf_pool[socketid]);
+ socketid, NULL,
+ pktmbuf_pool[socketid]);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid);
nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP |
- ETH_TXQ_FLAGS_NOXSUMUDP |
- ETH_TXQ_FLAGS_NOXSUMTCP)
-
-};
-
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
printf("jumbo frame is enabled - disabling simple TX path\n");
port_conf.rxmode.jumbo_frame = 1;
- tx_conf.txq_flags = 0;
/* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
MAIN(int argc, char **argv)
{
struct lcore_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
uint16_t queueid;
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ if (port_conf.rxmode.jumbo_frame)
+ txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid, &tx_conf);
+ socketid, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid, &rx_conf, pktmbuf_pool[socketid]);
+ socketid,
+ NULL,
+ pktmbuf_pool[socketid]);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid);
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
struct rte_mempool * lsi_pktmbuf_pool = NULL;
/* Per-port statistics struct */
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid), &rx_conf,
+ rte_eth_dev_socket_id(portid),
+ NULL,
lsi_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
/* init one TX queue logical core on each port */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid), &tx_conf);
+ rte_eth_dev_socket_id(portid),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
ret, (unsigned) portid);
},
};
-static struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
- .hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
- .wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
- },
- .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
- .rx_drop_en = APP_DEFAULT_NIC_RX_DROP_EN,
-};
-
-static struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
- .hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
- .wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
- },
- .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
- .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
-};
-
static void
app_assign_worker_ids(void)
{
queue,
(uint16_t) app.nic_rx_ring_size,
socket,
- &rx_conf,
+ NULL,
pool);
if (ret < 0) {
rte_panic("Cannot init RX queue %u for port %u (%d)\n",
0,
(uint16_t) app.nic_tx_ring_size,
socket,
- &tx_conf);
+ NULL);
if (ret < 0) {
rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
port,
#define NO_FLAGS 0
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* Default configuration for rx and tx thresholds etc. */
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define MP_DEFAULT_PTHRESH 36
-#define MP_DEFAULT_RX_HTHRESH 8
-#define MP_DEFAULT_TX_HTHRESH 0
-#define MP_DEFAULT_WTHRESH 0
-
-static const struct rte_eth_rxconf rx_conf_default = {
- .rx_thresh = {
- .pthresh = MP_DEFAULT_PTHRESH,
- .hthresh = MP_DEFAULT_RX_HTHRESH,
- .wthresh = MP_DEFAULT_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf_default = {
- .tx_thresh = {
- .pthresh = MP_DEFAULT_PTHRESH,
- .hthresh = MP_DEFAULT_TX_HTHRESH,
- .wthresh = MP_DEFAULT_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/* The mbuf pool for packet rx */
struct rte_mempool *pktmbuf_pool;
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
- rte_eth_dev_socket_id(port_num), &rx_conf_default, pktmbuf_pool);
+ rte_eth_dev_socket_id(port_num),
+ NULL, pktmbuf_pool);
if (retval < 0) return retval;
}
for ( q = 0; q < tx_rings; q ++ ) {
retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
- rte_eth_dev_socket_id(port_num), &tx_conf_default);
+ rte_eth_dev_socket_id(port_num),
+ NULL);
if (retval < 0) return retval;
}
CMD_STOP,
};
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
-};
-
static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
/* Per-port statistics struct */
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid), &rx_conf,
+ rte_eth_dev_socket_id(portid),
+ NULL,
l2fwd_pktmbuf_pool[portid]);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
/* init one TX queue on each port */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid), &tx_conf);
+ rte_eth_dev_socket_id(portid),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid);
#define PARAM_PROC_ID "proc-id"
#define PARAM_NUM_PROCS "num-procs"
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* Default configuration for rx and tx thresholds etc. */
-static const struct rte_eth_rxconf rx_conf_default = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-static const struct rte_eth_txconf tx_conf_default = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/* for each lcore, record the elements of the ports array to use */
struct lcore_ports{
unsigned start_port;
for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
- rte_eth_dev_socket_id(port), &rx_conf_default,
+ rte_eth_dev_socket_id(port),
+ NULL,
mbuf_pool);
if (retval < 0)
return retval;
for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
- rte_eth_dev_socket_id(port), &tx_conf_default);
+ rte_eth_dev_socket_id(port),
+ NULL);
if (retval < 0)
return retval;
}
},
};
-struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
- .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP |
- ETH_TXQ_FLAGS_NOXSUMUDP |
- ETH_TXQ_FLAGS_NOXSUMTCP)
-};
-
-struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
-};
-
#define MAX_QUEUE_NUM 1
#define RX_QUEUE_NUM 1
#define TX_QUEUE_NUM 1
struct rte_netmap_port_conf port_conf = {
.eth_conf = ð_conf,
- .tx_conf = &tx_conf,
- .rx_conf = &rx_conf,
.socket_id = SOCKET_ID_ANY,
.nr_tx_rings = TX_QUEUE_NUM,
.nr_rx_rings = RX_QUEUE_NUM,
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
- conf->socket_id, conf->tx_conf);
+ conf->socket_id, NULL);
if (ret < 0) {
RTE_LOG(ERR, USER1,
}
ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
- conf->socket_id, conf->rx_conf, conf->pool);
+ conf->socket_id, NULL, conf->pool);
if (ret < 0) {
RTE_LOG(ERR, USER1,
/* copy config to the private storage. */
ports[portid].eth_conf = conf->eth_conf[0];
- ports[portid].rx_conf = conf->rx_conf[0];
- ports[portid].tx_conf = conf->tx_conf[0];
ports[portid].pool = conf->pool;
ports[portid].socket_id = conf->socket_id;
ports[portid].nr_tx_rings = conf->nr_tx_rings;
struct rte_netmap_port_conf {
struct rte_eth_conf *eth_conf;
- struct rte_eth_txconf *tx_conf;
- struct rte_eth_rxconf *rx_conf;
struct rte_mempool *pool;
int32_t socket_id;
uint16_t nr_tx_rings;
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* RX prefetch threshold reg */
- .hthresh = 8, /* RX host threshold reg */
- .wthresh = 4, /* RX write-back threshold reg */
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* TX prefetch threshold reg */
- .hthresh = 0, /* TX host threshold reg */
- .wthresh = 0, /* TX write-back threshold reg */
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
- .txq_flags = 0x0,
-};
-
#define NIC_RX_QUEUE_DESC 128
#define NIC_TX_QUEUE_DESC 512
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
- ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
- ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &tx_conf);
+ ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
- ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
- ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &tx_conf);
+ ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
-};
-
static struct rte_eth_fc_conf fc_conf = {
.mode = RTE_FC_TX_PAUSE,
.high_water = 80 * 510 / 100,
/* Initialize the port's RX queue */
ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id), &rx_conf,
- mbuf_pool);
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Failed to setup RX queue on "
"port %u (error %d)\n", (unsigned) port_id, ret);
/* Initialize the port's TX queue */
ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id), &tx_conf);
+ rte_eth_dev_socket_id(port_id),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Failed to setup TX queue on "
"port %u (error %d)\n", (unsigned) port_id, ret);
/* Enable stats. */
static uint32_t enable_stats = 0;
-/* Default configuration for rx and tx thresholds etc. */
-static const struct rte_eth_rxconf rx_conf_default = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_drop_en = 1,
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-static const struct rte_eth_txconf tx_conf_default = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
if (retval != 0)
return retval;
+ rte_eth_dev_info_get(port, &dev_info);
+ rxconf = &dev_info.default_rxconf;
+ rxconf->rx_drop_en = 1;
/* Setup the queues. */
for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
- rte_eth_dev_socket_id(port), &rx_conf_default,
+ rte_eth_dev_socket_id(port), rxconf,
mbuf_pool);
if (retval < 0)
return retval;
}
for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
- rte_eth_dev_socket_id(port), &tx_conf_default);
+ rte_eth_dev_socket_id(port),
+ NULL);
if (retval < 0)
return retval;
}
#define MBUF_CACHE_SIZE 64
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define MAX_PKT_BURST 32
/*
static uint32_t num_queues = 8;
static uint32_t num_pools = 8;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* Default configuration for rx and tx thresholds etc. */
-static const struct rte_eth_rxconf rx_conf_default = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_drop_en = 1,
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-static const struct rte_eth_txconf tx_conf_default = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rxRings, txRings = (uint16_t)rte_lcore_count();
const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT;
if (retval != 0)
return retval;
+ rte_eth_dev_info_get(port, &dev_info);
+ rxconf = &dev_info.default_rxconf;
+ rxconf->rx_drop_en = 1;
for (q = 0; q < rxRings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
- rte_eth_dev_socket_id(port), &rx_conf_default,
- mbuf_pool);
+ rte_eth_dev_socket_id(port),
+ rxconf,
+ mbuf_pool);
if (retval < 0)
return retval;
}
for (q = 0; q < txRings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize,
- rte_eth_dev_socket_id(port), &tx_conf_default);
+ rte_eth_dev_socket_id(port),
+ NULL);
if (retval < 0)
return retval;
}
/* number of pools (if user does not specify any, 16 by default */
static enum rte_eth_nb_pools num_pools = ETH_16_POOLS;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* Default configuration for rx and tx thresholds etc. */
-static const struct rte_eth_rxconf rx_conf_default = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-static const struct rte_eth_txconf tx_conf_default = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
-
/* empty vmdq+dcb configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_dcb_conf_default = {
.rxmode = {
for (q = 0; q < rxRings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
- rte_eth_dev_socket_id(port), &rx_conf_default,
+ rte_eth_dev_socket_id(port),
+ NULL,
mbuf_pool);
if (retval < 0)
return retval;
for (q = 0; q < txRings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize,
- rte_eth_dev_socket_id(port), &tx_conf_default);
+ rte_eth_dev_socket_id(port),
+ NULL);
if (retval < 0)
return retval;
}