#include "failsafe_private.h"
-static struct rte_eth_dev_info default_infos = {
- /* Max possible number of elements */
- .max_rx_pktlen = UINT32_MAX,
- .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
- .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
- .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
- .max_hash_mac_addrs = UINT32_MAX,
- .max_vfs = UINT16_MAX,
- .max_vmdq_pools = UINT16_MAX,
- .rx_desc_lim = {
- .nb_max = UINT16_MAX,
- .nb_min = 0,
- .nb_align = 1,
- .nb_seg_max = UINT16_MAX,
- .nb_mtu_seg_max = UINT16_MAX,
- },
- .tx_desc_lim = {
- .nb_max = UINT16_MAX,
- .nb_min = 0,
- .nb_align = 1,
- .nb_seg_max = UINT16_MAX,
- .nb_mtu_seg_max = UINT16_MAX,
- },
- /*
- * Set of capabilities that can be verified upon
- * configuring a sub-device.
- */
- .rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY,
- .rx_queue_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_MACSEC_STRIP |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_SECURITY,
- .tx_offload_capa =
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO,
- .flow_type_rss_offloads =
- ETH_RSS_IP |
- ETH_RSS_UDP |
- ETH_RSS_TCP,
-};
-
static int
fs_dev_configure(struct rte_eth_dev *dev)
{
fs_set_queues_state_start(struct rte_eth_dev *dev)
{
struct rxq *rxq;
+ struct txq *txq;
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->info.conf.rx_deferred_start)
+ if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
dev->data->rx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STARTED;
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL && !txq->info.conf.tx_deferred_start)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
}
static int
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++)
- dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (dev->data->rx_queues[i] != NULL)
+ dev->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ if (dev->data->tx_queues[i] != NULL)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
}
static void
return 0;
}
+static int
+fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int err = 0;
+ bool failure = true;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue stop failed for subdevice %d", i);
+ err = ret;
+ } else {
+ failure = false;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ fs_unlock(dev, 0);
+ /* Return 0 in case of at least one successful queue stop */
+ return (failure) ? err : 0;
+}
+
+static int
+fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue start failed for subdevice %d", i);
+ fs_tx_queue_stop(dev, tx_queue_id);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
static void
fs_rx_queue_release(void *queue)
{
if (queue == NULL)
return;
rxq = queue;
- dev = rxq->priv->dev;
+ dev = &rte_eth_devices[rxq->priv->data->port_id];
fs_lock(dev, 0);
if (rxq->event_fd > 0)
close(rxq->event_fd);
if (queue == NULL)
return;
txq = queue;
- dev = txq->priv->dev;
+ dev = &rte_eth_devices[txq->priv->data->port_id];
fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
if (ETH(sdev)->data->tx_queues != NULL &&
uint8_t i;
int ret;
+ fs_lock(dev, 0);
if (tx_conf->tx_deferred_start) {
- ERROR("Tx queue deferred start is not supported");
- return -EINVAL;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ if (SUBOPS(sdev, tx_queue_start) == NULL) {
+ ERROR("Tx queue deferred start is not "
+ "supported for subdevice %d", i);
+ fs_unlock(dev, 0);
+ return -EINVAL;
+ }
+ }
}
-
- fs_lock(dev, 0);
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
dev->data->nb_tx_queues = 0;
}
-static void
+static int
fs_promiscuous_enable(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
+ int ret = 0;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- rte_eth_promiscuous_enable(PORT_ID(sdev));
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_promiscuous_enable(PORT_ID(sdev));
+ ret = fs_err(sdev, ret);
+ if (ret != 0) {
+ ERROR("Promiscuous mode enable failed for subdevice %d",
+ PORT_ID(sdev));
+ break;
+ }
+ }
+ if (ret != 0) {
+ /* Rollback in the case of failure */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_promiscuous_disable(PORT_ID(sdev));
+ ret = fs_err(sdev, ret);
+ if (ret != 0)
+ ERROR("Promiscuous mode disable during rollback failed for subdevice %d",
+ PORT_ID(sdev));
+ }
+ }
fs_unlock(dev, 0);
+
+ return ret;
}
-static void
+static int
fs_promiscuous_disable(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
+ int ret = 0;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- rte_eth_promiscuous_disable(PORT_ID(sdev));
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_promiscuous_disable(PORT_ID(sdev));
+ ret = fs_err(sdev, ret);
+ if (ret != 0) {
+ ERROR("Promiscuous mode disable failed for subdevice %d",
+ PORT_ID(sdev));
+ break;
+ }
+ }
+ if (ret != 0) {
+ /* Rollback in the case of failure */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_promiscuous_enable(PORT_ID(sdev));
+ ret = fs_err(sdev, ret);
+ if (ret != 0)
+ ERROR("Promiscuous mode enable during rollback failed for subdevice %d",
+ PORT_ID(sdev));
+ }
+ }
fs_unlock(dev, 0);
+
+ return ret;
}
static void
return 0;
}
-static void
+static int
fs_stats_reset(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
+ int ret;
fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
- rte_eth_stats_reset(PORT_ID(sdev));
+ ret = rte_eth_stats_reset(PORT_ID(sdev));
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+
+ ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
}
memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
fs_unlock(dev, 0);
+
+ return 0;
+}
+
+static void
+fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to,
+ const struct rte_eth_desc_lim *from)
+{
+ to->nb_max = RTE_MIN(to->nb_max, from->nb_max);
+ to->nb_min = RTE_MAX(to->nb_min, from->nb_min);
+ to->nb_align = RTE_MAX(to->nb_align, from->nb_align);
+
+ to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max);
+ to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max);
+}
+
+/*
+ * Merge the information from sub-devices.
+ *
+ * The reported values must be the common subset of all sub devices
+ */
+static void
+fs_dev_merge_info(struct rte_eth_dev_info *info,
+ const struct rte_eth_dev_info *sinfo)
+{
+ info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen);
+ info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues);
+ info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues);
+ info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs);
+ info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs,
+ sinfo->max_hash_mac_addrs);
+ info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools);
+ info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs);
+
+ fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim);
+ fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim);
+
+ info->rx_offload_capa &= sinfo->rx_offload_capa;
+ info->tx_offload_capa &= sinfo->tx_offload_capa;
+ info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa;
+ info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa;
+ info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads;
}
/**
* all sub_devices and the default capabilities.
* Uses a logical AND of TX capabilities among
* the active probed sub_device and the default capabilities.
+ * Uses a logical AND of device capabilities among
+ * all sub_devices and the default capabilities.
*
*/
-static void
+static int
fs_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *infos)
{
struct sub_device *sdev;
uint8_t i;
+ int ret;
- sdev = TX_SUBDEV(dev);
- if (sdev == NULL) {
- DEBUG("No probed device, using default infos");
- rte_memcpy(&PRIV(dev)->infos, &default_infos,
- sizeof(default_infos));
- } else {
- uint64_t rx_offload_capa;
- uint64_t rxq_offload_capa;
- uint64_t rss_hf_offload_capa;
-
- rx_offload_capa = default_infos.rx_offload_capa;
- rxq_offload_capa = default_infos.rx_queue_offload_capa;
- rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
- rte_eth_dev_info_get(PORT_ID(sdev),
- &PRIV(dev)->infos);
- rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
- rxq_offload_capa &=
- PRIV(dev)->infos.rx_queue_offload_capa;
- rss_hf_offload_capa &=
- PRIV(dev)->infos.flow_type_rss_offloads;
- }
- sdev = TX_SUBDEV(dev);
- rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
- PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
- PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
- PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
- PRIV(dev)->infos.tx_offload_capa &=
- default_infos.tx_offload_capa;
- PRIV(dev)->infos.tx_queue_offload_capa &=
- default_infos.tx_queue_offload_capa;
+ /* Use maximum upper bounds by default */
+ infos->max_rx_pktlen = UINT32_MAX;
+ infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
+ infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
+ infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR;
+ infos->max_hash_mac_addrs = UINT32_MAX;
+ infos->max_vfs = UINT16_MAX;
+ infos->max_vmdq_pools = UINT16_MAX;
+
+ /*
+ * Set of capabilities that can be verified upon
+ * configuring a sub-device.
+ */
+ infos->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY;
+
+ infos->rx_queue_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY;
+
+ infos->tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ infos->flow_type_rss_offloads =
+ ETH_RSS_IP |
+ ETH_RSS_UDP |
+ ETH_RSS_TCP;
+ infos->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ struct rte_eth_dev_info sub_info;
+
+ ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info);
+ ret = fs_err(sdev, ret);
+ if (ret != 0)
+ return ret;
+
+ fs_dev_merge_info(infos, &sub_info);
}
- rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
+
+ return 0;
}
static const uint32_t *
static int
fs_mac_addr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
uint32_t index,
uint32_t vmdq)
{
}
static int
-fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
{
struct sub_device *sdev;
uint8_t i;
return 0;
}
+static int
+fs_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ void *mcast_addrs;
+
+ fs_lock(dev, 0);
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ mc_addr_set, nb_mc_addr);
+ if (ret != 0) {
+ ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
+ i, ret);
+ goto rollback;
+ }
+ }
+
+ mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
+ if (mcast_addrs == NULL && nb_mc_addr > 0) {
+ ret = -ENOMEM;
+ goto rollback;
+ }
+ rte_memcpy(mcast_addrs, mc_addr_set,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
+ PRIV(dev)->nb_mcast_addr = nb_mc_addr;
+ PRIV(dev)->mcast_addrs = mcast_addrs;
+
+ fs_unlock(dev, 0);
+ return 0;
+
+rollback:
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr);
+ if (rc != 0) {
+ ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
+ i, rc);
+ }
+ }
+
+ fs_unlock(dev, 0);
+ return ret;
+}
+
static int
fs_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
}
static int
-fs_filter_ctrl(struct rte_eth_dev *dev,
+fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type type,
enum rte_filter_op op,
void *arg)
{
- struct sub_device *sdev;
- uint8_t i;
- int ret;
-
if (type == RTE_ETH_FILTER_GENERIC &&
op == RTE_ETH_FILTER_GET) {
*(const void **)arg = &fs_flow_ops;
return 0;
}
- fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
- DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
- ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
- if ((ret = fs_err(sdev, ret))) {
- ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
- " with error %d", i, ret);
- fs_unlock(dev, 0);
- return ret;
- }
- }
- fs_unlock(dev, 0);
- return 0;
+ return -ENOTSUP;
}
const struct eth_dev_ops failsafe_ops = {
.vlan_filter_set = fs_vlan_filter_set,
.rx_queue_start = fs_rx_queue_start,
.rx_queue_stop = fs_rx_queue_stop,
+ .tx_queue_start = fs_tx_queue_start,
+ .tx_queue_stop = fs_tx_queue_stop,
.rx_queue_setup = fs_rx_queue_setup,
.tx_queue_setup = fs_tx_queue_setup,
.rx_queue_release = fs_rx_queue_release,
.mac_addr_remove = fs_mac_addr_remove,
.mac_addr_add = fs_mac_addr_add,
.mac_addr_set = fs_mac_addr_set,
+ .set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
};