*/
unsigned int num_procs = 1;
+static void
+eth_rx_metadata_negotiate_mp(uint16_t port_id)
+{
+ uint64_t rx_meta_features = 0;
+ int ret;
+
+ if (!is_proc_primary())
+ return;
+
+ rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
+ rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
+ rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
+
+ ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
+ if (ret == 0) {
+ if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
+ TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
+ port_id);
+ }
+
+ if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
+ TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
+ port_id);
+ }
+
+ if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
+ TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
+ port_id);
+ }
+ } else if (ret != -ENOTSUP) {
+ rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
+ port_id, rte_strerror(-ret));
+ }
+}
+
static int
eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
int ret;
int i;
+ eth_rx_metadata_negotiate_mp(pid);
+
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
static void
launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
{
- port_fwd_begin_t port_fwd_begin;
unsigned int i;
unsigned int lc_id;
int diag;
- port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
- if (port_fwd_begin != NULL) {
- for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
- (*port_fwd_begin)(fwd_ports_ids[i]);
- }
for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
lc_id = fwd_lcores_cpuids[i];
if ((interactive == 0) || (lc_id != rte_lcore_id())) {
fprintf(stderr, "Packet forwarding already started\n");
return;
}
- test_done = 0;
fwd_config_setup();
+ port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
+ if (port_fwd_begin != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ if (port_fwd_begin(fwd_ports_ids[i])) {
+ fprintf(stderr,
+ "Packet forwarding is not ready\n");
+ return;
+ }
+ }
+ }
+
+ if (with_tx_first) {
+ port_fwd_begin = tx_only_engine.port_fwd_begin;
+ if (port_fwd_begin != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ if (port_fwd_begin(fwd_ports_ids[i])) {
+ fprintf(stderr,
+ "Packet forwarding is not ready\n");
+ return;
+ }
+ }
+ }
+ }
+
+ test_done = 0;
+
if(!no_flush_rx)
flush_fwd_rx_queues();
fwd_stats_reset();
if (with_tx_first) {
- port_fwd_begin = tx_only_engine.port_fwd_begin;
- if (port_fwd_begin != NULL) {
- for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
- (*port_fwd_begin)(fwd_ports_ids[i]);
- }
while (with_tx_first--) {
launch_packet_forwarding(
run_one_txonly_burst_on_core);
}
rte_port = &ports[pid];
- memset(&port_conf, 0, sizeof(struct rte_eth_conf));
-
- port_conf.rxmode = rte_port->dev_conf.rxmode;
- port_conf.txmode = rte_port->dev_conf.txmode;
+ /* retain the original device configuration. */
+ memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
/*set configuration of DCB in vt mode and DCB in non-vt mode*/
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);