slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
{
int retval;
+ uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
if (portid >= rte_eth_dev_count())
rte_exit(EXIT_FAILURE, "Invalid port\n");
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
portid, retval);
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
+ "failed (res=%d)\n", portid, retval);
+
/* RX setup */
- retval = rte_eth_rx_queue_setup(portid, 0, RTE_RX_DESC_DEFAULT,
+ retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), NULL,
mbuf_pool);
if (retval < 0)
portid, retval);
/* TX setup */
- retval = rte_eth_tx_queue_setup(portid, 0, RTE_TX_DESC_DEFAULT,
+ retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), NULL);
if (retval < 0)
{
int retval;
uint8_t i;
+ uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB,
0 /*SOCKET_ID_ANY*/);
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
BOND_PORT, retval);
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
+ "failed (res=%d)\n", BOND_PORT, retval);
+
/* RX setup */
- retval = rte_eth_rx_queue_setup(BOND_PORT, 0, RTE_RX_DESC_DEFAULT,
+ retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
rte_eth_dev_socket_id(BOND_PORT), NULL,
mbuf_pool);
if (retval < 0)
BOND_PORT, retval);
/* TX setup */
- retval = rte_eth_tx_queue_setup(BOND_PORT, 0, RTE_TX_DESC_DEFAULT,
+ retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
rte_eth_dev_socket_id(BOND_PORT), NULL);
if (retval < 0)
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rxRings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port),
NULL, mbuf_pool);
if (retval < 0)
}
for (q = 0; q < txRings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (retval < 0)
struct rte_eth_conf cfg_port;
struct rte_eth_dev_info dev_info;
char str_name[16];
+ uint16_t nb_rxd = PORT_RX_QUEUE_SIZE;
+ uint16_t nb_txd = PORT_TX_QUEUE_SIZE;
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
if (rte_eth_dev_configure(idx_port, 1, 1, &cfg_port) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_configure failed");
+ if (rte_eth_dev_adjust_nb_rx_tx_desc(idx_port, &nb_rxd,
+ &nb_txd) < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc failed");
if (rte_eth_rx_queue_setup(
- idx_port, 0, PORT_RX_QUEUE_SIZE,
+ idx_port, 0, nb_rxd,
rte_eth_dev_socket_id(idx_port), NULL,
ptr_port->pkt_pool) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup failed"
);
if (rte_eth_tx_queue_setup(
- idx_port, 0, PORT_TX_QUEUE_SIZE,
+ idx_port, 0, nb_txd,
rte_eth_dev_socket_id(idx_port), NULL) < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup failed"
init_port(uint8_t port)
{
int ret;
+ uint16_t nb_rxd = NB_RXD;
+ uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
PRINT_INFO("Initialising port %u ...", (unsigned)port);
FATAL_ERROR("Could not configure port%u (%d)",
(unsigned)port, ret);
- ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port),
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
+ (unsigned)port, ret);
+
+ ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
+ rte_eth_dev_socket_id(port),
NULL,
pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
(unsigned)port, ret);
- ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port),
+ ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
+ rte_eth_dev_socket_id(port),
NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
ret, portid);
}
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0) {
+ printf("\n");
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of "
+ "descriptors: err=%d, port=%d\n", ret, portid);
+ }
+
/* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
socket, NULL,
struct app_pktq_hwq_in_params *p_rxq =
&app->hwq_in_params[j];
uint32_t rxq_link_id, rxq_queue_id;
+ uint16_t nb_rxd = p_rxq->size;
sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
&rxq_link_id, &rxq_queue_id);
if (rxq_link_id != link_id)
continue;
+ status = rte_eth_dev_adjust_nb_rx_tx_desc(
+ p_link->pmd_id,
+ &nb_rxd,
+ NULL);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s adjust number of Rx descriptors "
+ "error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_rxq->name,
+ status);
+
status = rte_eth_rx_queue_setup(
p_link->pmd_id,
rxq_queue_id,
- p_rxq->size,
+ nb_rxd,
app_get_cpu_socket_id(p_link->pmd_id),
&p_rxq->conf,
app->mempool[p_rxq->mempool_id]);
struct app_pktq_hwq_out_params *p_txq =
&app->hwq_out_params[j];
uint32_t txq_link_id, txq_queue_id;
+ uint16_t nb_txd = p_txq->size;
sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
&txq_link_id, &txq_queue_id);
if (txq_link_id != link_id)
continue;
+ status = rte_eth_dev_adjust_nb_rx_tx_desc(
+ p_link->pmd_id,
+ NULL,
+ &nb_txd);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s adjust number of Tx descriptors "
+ "error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_txq->name,
+ status);
+
status = rte_eth_tx_queue_setup(
p_link->pmd_id,
txq_queue_id,
- p_txq->size,
+ nb_txd,
app_get_cpu_socket_id(p_link->pmd_id),
&p_txq->conf);
if (status < 0)
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
nb_mbuf *= 2; /* ipv4 and ipv6 */
- nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
+ nb_mbuf += nb_rxd + nb_txd;
nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
rxq->portid = portid;
rxq->lpm = socket_lpm[socket];
rxq->lpm6 = socket_lpm6[socket];
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
qconf->n_rx_queue++;
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
+ "err=%d, port=%d\n", ret, portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
init_port(uint8_t port)
{
int ret;
+ uint16_t nb_rxd = NB_RXD;
+ uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
(unsigned)port, ret);
- ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
+ "for port%u (%d)\n", (unsigned)port, ret);
+
+ ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)\n", (unsigned)port, ret);
- ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
+ ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
const uint16_t rx_rings = 1, tx_rings = 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
return -1;
}
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (retval < 0) {
+ printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
+ retval, (unsigned) portid);
+ return -1;
+ }
+
/* init one RX queue */
fflush(stdout);
retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
"Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
*/
#define NB_MBUF RTE_MAX(\
- (nb_ports * nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports * nb_lcores * MAX_PKT_BURST + \
- nb_ports * n_tx_queue * RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores * MEMPOOL_CACHE_SIZE), \
+ (nb_ports * nb_rx_queue * nb_rxd + \
+ nb_ports * nb_lcores * MAX_PKT_BURST + \
+ nb_ports * n_tx_queue * nb_txd + \
+ nb_lcores * MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
#define MAX_PKT_BURST 32
"Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
*/
#define NB_MBUF RTE_MAX ( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
+ (nb_ports*nb_rx_queue*nb_rxd + \
nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
+ nb_ports*n_tx_queue*nb_txd + \
nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
* RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
*/
-#define NB_MBUF RTE_MAX ( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+#define NB_MBUF RTE_MAX ( \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
/*
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
* value of 8192
*/
#define NB_MBUF RTE_MAX( \
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
/* Parse the argument given in the command line of the application */
"Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, "
+ "port=%d\n", ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
/* register lsi interrupt callback, need to be after
* rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
* lsc interrupt will be present, and below callback to
/* Init NIC ports and queues, then start the ports */
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
struct rte_mempool *pool;
+ uint16_t nic_rx_ring_size;
+ uint16_t nic_tx_ring_size;
n_rx_queues = app_get_nic_rx_queues_per_port(port);
n_tx_queues = app.nic_tx_port_mask[port];
}
rte_eth_promiscuous_enable(port);
+ nic_rx_ring_size = app.nic_rx_ring_size;
+ nic_tx_ring_size = app.nic_tx_ring_size;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(
+ port, &nic_rx_ring_size, &nic_tx_ring_size);
+ if (ret < 0) {
+ rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
+ (unsigned) port, ret);
+ }
+ app.nic_rx_ring_size = nic_rx_ring_size;
+ app.nic_tx_ring_size = nic_tx_ring_size;
+
/* Init RX queues */
for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
if (app.nic_rx_queue_mask[port][queue] == 0) {
}
};
const uint16_t rx_rings = 1, tx_rings = num_clients;
- const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
uint16_t q;
int retval;
&port_conf)) != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port_num, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
rte_eth_dev_socket_id(port_num),
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
/* init one RX queue */
struct rte_eth_dev_info info;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
if (retval < 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval < 0)
+ return retval;
+
for (q = 0; q < rx_rings; q ++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port),
&info.default_rxconf,
mbuf_pool);
}
for (q = 0; q < tx_rings; q ++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (retval < 0)
return ret;
}
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
+
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Couldn't ot adjust number of descriptors for port %hhu\n",
+ portid);
+ return ret;
+ }
+
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
conf->socket_id, NULL);
const uint8_t nb_ports = rte_eth_dev_count();
int ret;
uint16_t q;
+ uint16_t nb_rxd = RX_DESC_PER_QUEUE;
+ uint16_t nb_txd = TX_DESC_PER_QUEUE;
if (port_id > nb_ports)
return -1;
if (ret != 0)
return ret;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
+ if (ret != 0)
+ return ret;
+
for (q = 0; q < rxRings; q++) {
- ret = rte_eth_rx_queue_setup(port_id, q, RX_DESC_PER_QUEUE,
+ ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
rte_eth_dev_socket_id(port_id), NULL,
mbuf_pool);
if (ret < 0)
}
for (q = 0; q < txRings; q++) {
- ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE,
+ ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
rte_eth_dev_socket_id(port_id), NULL);
if (ret < 0)
return ret;
*/
#define NB_MBUF RTE_MAX(\
- (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
- nb_ports*nb_lcores*MAX_PKT_BURST + \
- nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
- nb_lcores*MEMPOOL_CACHE_SIZE), \
+ (nb_ports*nb_rx_queue*nb_rxd + \
+ nb_ports*nb_lcores*MAX_PKT_BURST + \
+ nb_ports*n_tx_queue*nb_txd + \
+ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192)
#define MAX_PKT_BURST 32
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%d\n",
+ ret, portid);
+
rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
const uint16_t tx_rings = 1;
int retval;
uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
if (port >= rte_eth_dev_count())
return -1;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), txconf);
if (retval < 0)
return retval;
main(int argc, char **argv)
{
uint32_t lcore_id;
+ uint16_t nb_rxd = NIC_RX_QUEUE_DESC;
+ uint16_t nb_txd = NIC_TX_QUEUE_DESC;
int ret;
/* EAL init */
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
- ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_rx, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
+ port_rx, ret);
+
+ ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd,
rte_eth_dev_socket_id(port_rx),
NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
- ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_rx),
NULL);
if (ret < 0)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
- ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ nb_rxd = NIC_RX_QUEUE_DESC;
+ nb_txd = NIC_TX_QUEUE_DESC;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_tx, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
+ port_tx, ret);
+
+ ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, nb_rxd,
rte_eth_dev_socket_id(port_tx),
NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
- ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_tx),
NULL);
if (ret < 0)
struct rte_eth_link link;
struct rte_eth_rxconf rx_conf;
struct rte_eth_txconf tx_conf;
+ uint16_t rx_size;
+ uint16_t tx_size;
/* check if port already initialized (multistream configuration) */
if (app_inited_port_mask & (1u << portid))
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%"PRIu8"\n", ret, portid);
+ rx_size = ring_conf.rx_size;
+ tx_size = ring_conf.tx_size;
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc: "
+ "err=%d, port=%"PRIu8"\n", ret, portid);
+ ring_conf.rx_size = rx_size;
+ ring_conf.tx_size = tx_size;
+
/* init one RX queue */
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
void configure_eth_port(uint8_t port_id)
{
int ret;
+ uint16_t nb_rxd = RX_DESC_PER_QUEUE;
+ uint16_t nb_txd = TX_DESC_PER_QUEUE;
rte_eth_dev_stop(port_id);
rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
(unsigned int) port_id, ret);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors for port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
/* Initialize the port's RX queue */
- ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
+ ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
rte_eth_dev_socket_id(port_id),
NULL,
mbuf_pool);
(unsigned int) port_id, ret);
/* Initialize the port's TX queue */
- ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
+ ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
rte_eth_dev_socket_id(port_id),
NULL);
if (ret < 0)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
int retval;
uint16_t q;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
}
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
}
};
const uint16_t rx_rings = 1, tx_rings = num_nodes;
- const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
uint16_t q;
int retval;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port_num, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
rte_eth_dev_socket_id(port_num),
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
int retval;
uint16_t q;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
rte_eth_dev_socket_id(port), NULL, mbuf_pool);
if (retval < 0)
return retval;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
rte_eth_dev_socket_id(port), NULL);
if (retval < 0)
return retval;
uint16_t q;
struct rte_eth_dev_info dev_info;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
struct rte_eth_udp_tunnel tunnel_udp;
struct rte_eth_rxconf *rxconf;
struct rte_eth_txconf *txconf;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
/* Setup the queues. */
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
return retval;
}
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0) {
+ RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
+ "for port %u: %s.\n", port, strerror(-retval));
+ return retval;
+ }
+ if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
+ RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
+ "for Rx queues on port %u.\n", port);
+ return -1;
+ }
+
/* Setup the queues. */
for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+ if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT ||
+ tx_ring_size > RTE_TEST_TX_DESC_DEFAULT) {
+ RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size for "
+ "port %u.\n", port);
+ return -1;
+ }
+
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf;
uint16_t rxRings, txRings;
- const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
uint16_t queues_per_pool;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
+ &txRingSize);
+ if (retval != 0)
+ return retval;
+ if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
+ RTE_TEST_TX_DESC_DEFAULT)) {
+ printf("Mbuf pool has an insufficient size for port %u.\n",
+ port);
+ return -1;
+ }
+
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
- const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
int retval;
uint16_t q;
uint16_t queues_per_pool;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
+ &txRingSize);
+ if (retval != 0)
+ return retval;
+ if (RTE_MAX(rxRingSize, txRingSize) >
+ RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
+ printf("Mbuf pool has an insufficient size for port %u.\n",
+ port);
+ return -1;
+ }
+
for (q = 0; q < num_queues; q++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
rte_eth_dev_socket_id(port),