#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
#endif
+ &shared_rxq_engine,
NULL,
};
*/
uint8_t record_burst_stats;
+/*
+ * Number of ports per shared Rx queue group, 0 disable.
+ */
+uint32_t rxq_share;
+
unsigned int num_sockets = 0;
unsigned int socket_ids[RTE_MAX_NUMA_NODES];
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- ret = update_jumbo_frame_offload(pid, 0);
- if (ret != 0)
- fprintf(stderr,
- "Updating jumbo frame offload failed for port %u\n",
- pid);
-
if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
fwd_config_setup();
+ pkt_fwd_config_display(&cur_fwd_config);
+ if (!pkt_fwd_shared_rxq_check())
+ return;
+
port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
if (port_fwd_begin != NULL) {
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
if(!no_flush_rx)
flush_fwd_rx_queues();
- pkt_fwd_config_display(&cur_fwd_config);
rxtx_config_display();
fwd_stats_reset();
if (is_proc_primary()) {
port_flow_flush(pi);
+ port_flex_item_flush(pi);
rte_eth_dev_close(pi);
}
}
static void
-rxtx_port_config(struct rte_port *port)
+rxtx_port_config(portid_t pid)
{
uint16_t qid;
uint64_t offloads;
+ struct rte_port *port = &ports[pid];
for (qid = 0; qid < nb_rxq; qid++) {
offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
+
+ if (rxq_share > 0 &&
+ (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
+ /* Non-zero share group to enable RxQ share. */
+ port->rx_conf[qid].share_group = pid / rxq_share + 1;
+ port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
+ }
+
if (offloads != 0)
port->rx_conf[qid].offloads = offloads;
}
/*
- * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
- * MTU is also aligned.
+ * Helper function to set MTU from frame size
*
* port->dev_info should be set before calling this function.
*
- * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
- * ETH_OVERHEAD". This is useful to update flags but not MTU value.
- *
* return 0 on success, negative on error
*/
int
-update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
+update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
{
struct rte_port *port = &ports[portid];
uint32_t eth_overhead;
- uint64_t rx_offloads;
uint16_t mtu, new_mtu;
- bool on;
eth_overhead = get_eth_overhead(&port->dev_info);
return -1;
}
- if (max_rx_pktlen == 0)
- max_rx_pktlen = mtu + eth_overhead;
-
- rx_offloads = port->dev_conf.rxmode.offloads;
new_mtu = max_rx_pktlen - eth_overhead;
- if (new_mtu <= RTE_ETHER_MTU) {
- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = false;
- } else {
- if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- fprintf(stderr,
- "Frame size (%u) is not supported by port %u\n",
- max_rx_pktlen, portid);
- return -1;
- }
- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = true;
- }
-
- if (rx_offloads != port->dev_conf.rxmode.offloads) {
- uint16_t qid;
-
- port->dev_conf.rxmode.offloads = rx_offloads;
-
- /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
- for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
- if (on)
- port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
- }
-
if (mtu == new_mtu)
return 0;
}
}
- rxtx_port_config(port);
+ rxtx_port_config(pid);
ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
if (ret != 0)
memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
- rxtx_port_config(rte_port);
+ rxtx_port_config(pid);
/* VLAN filter */
rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rte_stats_bitrate_reg(bitrate_data);
}
#endif
-
#ifdef RTE_LIB_CMDLINE
if (strlen(cmdline_filename) != 0)
cmdline_read_from_file(cmdline_filename);