static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 1, /* enable loopback */
};
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
static int
exec_burst(uint32_t flags, int lcore)
{
- unsigned i, portid, nb_tx = 0;
+ unsigned int portid, nb_tx = 0;
struct lcore_conf *conf;
uint32_t pkt_per_port;
- int num, idx = 0;
+ int num, i, idx = 0;
int diff_tsc;
conf = &lcore_conf[lcore];
rte_atomic64_set(&start, 1);
/* start xmit */
+ i = 0;
while (num) {
nb_tx = RTE_MIN(MAX_PKT_BURST, num);
- for (i = 0; i < conf->nb_ports; i++) {
- portid = conf->portlist[i];
- nb_tx = rte_eth_tx_burst(portid, 0,
- &tx_burst[idx], nb_tx);
- idx += nb_tx;
- num -= nb_tx;
- }
-
+ portid = conf->portlist[i];
+ nb_tx = rte_eth_tx_burst(portid, 0, &tx_burst[idx], nb_tx);
+ idx += nb_tx;
+ num -= nb_tx;
+ i = (i >= conf->nb_ports - 1) ? 0 : (i + 1);
}
sleep(5);
/* bulk alloc rx, full-featured tx */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
*/
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return 0;
}