uint64_t rx_offloads = port->dev_conf.rxmode.offloads;
if (!strcmp(res->name, "max-pkt-len")) {
- if (res->value < ETHER_MIN_LEN) {
+ if (res->value < RTE_ETHER_MIN_LEN) {
printf("max-pkt-len can not be less than %d\n",
- ETHER_MIN_LEN);
+ RTE_ETHER_MIN_LEN);
return;
}
if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
return;
port->dev_conf.rxmode.max_rx_pkt_len = res->value;
- if (res->value > ETHER_MAX_LEN)
+ if (res->value > RTE_ETHER_MAX_LEN)
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
{
struct cmd_config_mtu_result *res = parsed_result;
- if (res->value < ETHER_MIN_LEN) {
- printf("mtu cannot be less than %d\n", ETHER_MIN_LEN);
+ if (res->value < RTE_ETHER_MIN_LEN) {
+ printf("mtu cannot be less than %d\n", RTE_ETHER_MIN_LEN);
return;
}
port_mtu_set(res->port_id, res->value);
{
struct cmd_config_rx_mode_flag *res = parsed_result;
portid_t pid;
+ int k;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
}
port->dev_conf.rxmode.offloads = rx_offloads;
+ /* Apply Rx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->rx_conf[k].offloads =
+ port->dev_conf.rxmode.offloads;
}
init_port_config();
}
}
+static void
+cmd_config_queue_tx_offloads(struct rte_port *port)
+{
+ int k;
+
+ /* Apply queue tx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->tx_conf[k].offloads =
+ port->dev_conf.txmode.offloads;
+}
+
static void
cmd_csum_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
ports[res->port_id].dev_conf.txmode.offloads &=
(~csum_offloads);
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
}
csum_show(res->port_id);
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(res->port_id, &dev_info);
"if outer L3 is IPv4; not necessary for IPv6\n");
}
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
cmd_reconfig_device_queue(res->port_id, 1, 1);
}
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
- rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &res->address, RTE_ETHER_ADDR_LEN);
/* set VF MAC filter */
filter.is_vf = 1;
memset(&tunnel_filter_conf, 0, sizeof(tunnel_filter_conf));
- ether_addr_copy(&res->outer_mac, &tunnel_filter_conf.outer_mac);
- ether_addr_copy(&res->inner_mac, &tunnel_filter_conf.inner_mac);
+ rte_ether_addr_copy(&res->outer_mac, &tunnel_filter_conf.outer_mac);
+ rte_ether_addr_copy(&res->inner_mac, &tunnel_filter_conf.inner_mac);
tunnel_filter_conf.inner_vlan = res->inner_vlan;
if (res->ip_value.family == AF_INET) {
return;
for (i = 0; i < nb_item; i++) {
- if (vlan_list[i] > ETHER_MAX_VLAN_ID) {
+ if (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) {
printf("Invalid vlan_id: must be < 4096\n");
return;
}
" when protocol is TCP.\n");
return;
}
- if (res->tcp_flags_value > TCP_FLAG_ALL) {
+ if (res->tcp_flags_value > RTE_NTUPLE_TCP_FLAGS_MASK) {
printf("invalid TCP flags.\n");
return;
}
" when protocol is TCP.\n");
return;
}
- if (res->tcp_flags_value > TCP_FLAG_ALL) {
+ if (res->tcp_flags_value > RTE_NTUPLE_TCP_FLAGS_MASK) {
printf("invalid TCP flags.\n");
return;
}
{
struct cmd_mcast_addr_result *res = parsed_result;
- if (!is_multicast_ether_addr(&res->mc_addr)) {
+ if (!rte_is_multicast_ether_addr(&res->mc_addr)) {
printf("Invalid multicast addr %02X:%02X:%02X:%02X:%02X:%02X\n",
res->mc_addr.addr_bytes[0], res->mc_addr.addr_bytes[1],
res->mc_addr.addr_bytes[2], res->mc_addr.addr_bytes[3],
if (vxlan_encap_conf.select_vlan)
vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_vxlan = {
if (nvgre_encap_conf.select_vlan)
nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_nvgre = {
if (l2_encap_conf.select_vlan)
l2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_l2_encap = {
if (mplsogre_encap_conf.select_vlan)
mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_mplsogre_encap = {
if (mplsoudp_encap_conf.select_vlan)
mplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
rte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
cmdline_parse_inst_t cmd_set_mplsoudp_encap = {