/*
* Configurable value of RX free threshold.
*/
-uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
+uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets,
+ This setting is needed for ixgbe to enable bulk alloc or vector
+ receive functionality. */
/*
* Configurable value of RX drop enable.
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
mb->ol_flags = 0;
- mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->vlan_macip.data = 0;
+ mb->tx_offload = 0;
+ mb->vlan_tci = 0;
mb->hash.rss = 0;
}
mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
mbuf_seg_size);
mb_ctor_arg.seg_buf_offset =
- (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
+ (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (fwd_lcores == NULL) {
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
"failed\n", nb_lcores);
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
sizeof(struct fwd_lcore),
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (fwd_lcores[lc_id] == NULL) {
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
"failed\n");
/* Configuration of Ethernet ports. */
ports = rte_zmalloc("testpmd: ports",
sizeof(struct rte_port) * nb_ports,
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (ports == NULL) {
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
"failed\n", nb_ports);
void
-reconfig(portid_t new_port_id)
+reconfig(portid_t new_port_id, unsigned socket_id)
{
struct rte_port *port;
/* Reconfiguration of Ethernet ports. */
ports = rte_realloc(ports,
sizeof(struct rte_port) * nb_ports,
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (ports == NULL) {
rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
nb_ports);
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
init_port_config();
}
/* init new */
nb_fwd_streams = nb_fwd_streams_new;
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
- sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
+ sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
if (fwd_streams == NULL)
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
"failed\n", nb_fwd_streams);
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
- sizeof(struct fwd_stream), CACHE_LINE_SIZE);
+ sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
if (fwd_streams[sm_id] == NULL)
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
" failed\n");
return 0;
}
-#ifdef RTE_EXEC_ENV_BAREMETAL
-#define main _main
-#endif
-
int
main(int argc, char** argv)
{
nb_ports = (portid_t) rte_eth_dev_count();
if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
- "check that "
- "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
- "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
- "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
- "configuration file\n");
+ rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
set_def_fwd_config();
if (nb_lcores == 0)