{
struct cmd_config_rx_mode_flag *res = parsed_result;
portid_t pid;
+ int k;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
}
port->dev_conf.rxmode.offloads = rx_offloads;
+ /* Apply Rx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->rx_conf[k].offloads =
+ port->dev_conf.rxmode.offloads;
}
init_port_config();
}
}
+static void
+cmd_config_queue_tx_offloads(struct rte_port *port)
+{
+ int k;
+
+ /* Apply queue tx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->tx_conf[k].offloads =
+ port->dev_conf.txmode.offloads;
+}
+
static void
cmd_csum_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
ports[res->port_id].dev_conf.txmode.offloads &=
(~csum_offloads);
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
}
csum_show(res->port_id);
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(res->port_id, &dev_info);
"if outer L3 is IPv4; not necessary for IPv6\n");
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
cmd_reconfig_device_queue(res->port_id, 1, 1);
}