/*
* Configurable value of RX free threshold.
*/
-uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
+uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets,
+ This setting is needed for ixgbe to enable bulk alloc or vector
+ receive functionality. */
/*
* Configurable value of RX drop enable.
mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
mb = (struct rte_mbuf *) raw_mbuf;
- mb->type = RTE_MBUF_PKT;
mb->pool = mp;
mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
- mb->type = RTE_MBUF_PKT;
mb->ol_flags = 0;
- mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.vlan_macip.data = 0;
- mb->pkt.hash.rss = 0;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->tx_offload = 0;
+ mb->vlan_tci = 0;
+ mb->hash.rss = 0;
}
static void
* Records which Mbuf pool to use by each logical core, if needed.
*/
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
- mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
+ mbp = mbuf_pool_find(
+ rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
+
if (mbp == NULL)
mbp = mbuf_pool_find(0);
fwd_lcores[lc_id]->mbp = mbp;
void
-reconfig(portid_t new_port_id)
+reconfig(portid_t new_port_id, unsigned socket_id)
{
struct rte_port *port;
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
init_port_config();
}
nb_ports = (portid_t) rte_eth_dev_count();
if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
- "check that "
- "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
- "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
- "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
- "configuration file\n");
+ rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
set_def_fwd_config();
if (nb_lcores == 0)