The testpmd was doing old BSD lint style casts of rte_memcpy
to (void). This is unnecessary.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
- (void)rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
/* set VF MAC filter */
filter.is_vf = 1;
/* set VF MAC filter */
filter.is_vf = 1;
memset(&filter, 0, sizeof(filter));
if (!strcmp(res->mac, "mac_addr")) {
filter.flags |= RTE_ETHTYPE_FLAGS_MAC;
memset(&filter, 0, sizeof(filter));
if (!strcmp(res->mac, "mac_addr")) {
filter.flags |= RTE_ETHTYPE_FLAGS_MAC;
- (void)rte_memcpy(&filter.mac_addr, &res->mac_addr,
+ rte_memcpy(&filter.mac_addr, &res->mac_addr,
sizeof(struct ether_addr));
}
if (!strcmp(res->drop, "drop"))
sizeof(struct ether_addr));
}
if (!strcmp(res->drop, "drop"))
#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \
do { \
if ((ip_addr).family == AF_INET6) \
#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \
do { \
if ((ip_addr).family == AF_INET6) \
- (void)rte_memcpy(&(ip), \
&((ip_addr).addr.ipv6), \
sizeof(struct in6_addr)); \
else { \
&((ip_addr).addr.ipv6), \
sizeof(struct in6_addr)); \
else { \
}
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
}
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
- (void)rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
+ rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
&res->mac_addr,
sizeof(struct ether_addr));
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
&res->mac_addr,
sizeof(struct ether_addr));
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- (void)rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
+ rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
&res->mac_addr,
sizeof(struct ether_addr));
entry.input.flow.tunnel_flow.tunnel_type =
&res->mac_addr,
sizeof(struct ether_addr));
entry.input.flow.tunnel_flow.tunnel_type =
rte_cpu_to_be_32(res->tunnel_id_value);
}
rte_cpu_to_be_32(res->tunnel_id_value);
}
- (void)rte_memcpy(entry.input.flow_ext.flexbytes,
+ rte_memcpy(entry.input.flow_ext.flexbytes,
flexbytes,
RTE_ETH_FDIR_MAX_FLEXLEN);
flexbytes,
RTE_ETH_FDIR_MAX_FLEXLEN);
memset(&port->dev_conf.fdir_conf.flex_conf.flex_mask[i],
0, sizeof(struct rte_eth_fdir_flex_mask));
port->dev_conf.fdir_conf.flex_conf.nb_flexmasks = 1;
memset(&port->dev_conf.fdir_conf.flex_conf.flex_mask[i],
0, sizeof(struct rte_eth_fdir_flex_mask));
port->dev_conf.fdir_conf.flex_conf.nb_flexmasks = 1;
- (void)rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
+ rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
&flex_mask,
sizeof(struct rte_eth_fdir_flex_mask));
cmd_reconfig_device_queue(res->port_id, 1, 1);
&flex_mask,
sizeof(struct rte_eth_fdir_flex_mask));
cmd_reconfig_device_queue(res->port_id, 1, 1);
- (void)rte_memcpy(&flex_conf->flex_mask[idx],
+ rte_memcpy(&flex_conf->flex_mask[idx],
cfg,
sizeof(struct rte_eth_fdir_flex_mask));
}
cfg,
sizeof(struct rte_eth_fdir_flex_mask));
}
- (void)rte_memcpy(&flex_conf->flex_set[idx],
+ rte_memcpy(&flex_conf->flex_set[idx],
cfg,
sizeof(struct rte_eth_flex_payload_cfg));
cfg,
sizeof(struct rte_eth_flex_payload_cfg));
* Set the numbers of RX & TX queues to 0, so
* the RX & TX queues will not be setup.
*/
* Set the numbers of RX & TX queues to 0, so
* the RX & TX queues will not be setup.
*/
- (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
+ rte_eth_dev_configure(pid, 0, 0, &port_conf);
rte_eth_dev_info_get(pid, &rte_port->dev_info);
rte_eth_dev_info_get(pid, &rte_port->dev_info);