X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Ffailsafe%2Ffailsafe_ether.c;h=2b748bd8b426f78afeeb4fa7d07ca3361df2fde6;hb=1af745211344622b50ffd7d67a618dcdce85fef8;hp=643f3d6959967f54d9d9767e431d87464d2ce957;hpb=fac0ae546e5f205bf08f9f0f530ccd0c28271c6e;p=dpdk.git diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c index 643f3d6959..2b748bd8b4 100644 --- a/drivers/net/failsafe/failsafe_ether.c +++ b/drivers/net/failsafe/failsafe_ether.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -154,9 +126,13 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, if (dev->data->promiscuous != edev->data->promiscuous) { DEBUG("Configuring promiscuous"); if (dev->data->promiscuous) - rte_eth_promiscuous_enable(PORT_ID(sdev)); + ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); else - rte_eth_promiscuous_disable(PORT_ID(sdev)); + ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply promiscuous mode"); + return ret; + } } else { DEBUG("promiscuous already set"); } @@ -164,9 +140,13 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, if (dev->data->all_multicast != edev->data->all_multicast) { DEBUG("Configuring all_multicast"); if (dev->data->all_multicast) - rte_eth_allmulticast_enable(PORT_ID(sdev)); + ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); else - rte_eth_allmulticast_disable(PORT_ID(sdev)); + ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply allmulticast mode"); + return ret; + } } else { DEBUG("all_multicast already set"); } @@ -194,19 +174,37 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, DEBUG("Configure additional MAC address%s", (PRIV(dev)->nb_mac_addr > 2 ? "es" : "")); for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) { - struct ether_addr *ea; + struct rte_ether_addr *ea; ea = &dev->data->mac_addrs[i]; ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea, PRIV(dev)->mac_addr_pool[i]); if (ret) { - char ea_fmt[ETHER_ADDR_FMT_SIZE]; + char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; - ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea); + rte_ether_format_addr(ea_fmt, + RTE_ETHER_ADDR_FMT_SIZE, ea); ERROR("Adding MAC address %s failed", ea_fmt); return ret; } } + /* + * Propagate multicast MAC addresses to sub-devices, + * if non zero number of addresses is set. + * The condition is required to avoid breakage of failsafe + * for sub-devices which do not support the operation + * if the feature is really not used. + */ + if (PRIV(dev)->nb_mcast_addr > 0) { + DEBUG("Configuring multicast MAC addresses"); + ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), + PRIV(dev)->mcast_addrs, + PRIV(dev)->nb_mcast_addr); + if (ret) { + ERROR("Failed to apply multicast MAC addresses"); + return ret; + } + } /* VLAN filter */ vfc1 = &dev->data->vlan_filter_conf; vfc2 = &edev->data->vlan_filter_conf; @@ -258,9 +256,9 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, DEBUG("Creating flow #%" PRIu32, i++); flow->flows[SUB_ID(sdev)] = rte_flow_create(PORT_ID(sdev), - &flow->fd->attr, - flow->fd->items, - flow->fd->actions, + flow->rule.attr, + flow->rule.pattern, + flow->rule.actions, &ferror); ret = rte_errno; if (ret) @@ -288,13 +286,13 @@ fs_dev_remove(struct sub_device *sdev) sdev->state = DEV_ACTIVE; /* fallthrough */ case DEV_ACTIVE: + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_PROBED; /* fallthrough */ case DEV_PROBED: - ret = rte_eal_hotplug_remove(sdev->bus->name, - sdev->dev->name); - if (ret) { + ret = rte_dev_remove(sdev->dev); + if (ret < 0) { ERROR("Bus detach failed for sub_device %u", SUB_ID(sdev)); } else { @@ -305,10 +303,12 @@ fs_dev_remove(struct sub_device *sdev) case DEV_PARSED: case DEV_UNDEFINED: sdev->state = DEV_UNDEFINED; + sdev->sdev_port_id = RTE_MAX_ETHPORTS; /* the end */ break; } - failsafe_hotplug_alarm_install(sdev->fs_dev); + sdev->remove = 0; + failsafe_hotplug_alarm_install(fs_dev(sdev)); } static void @@ -328,8 +328,9 @@ fs_dev_stats_save(struct sub_device *sdev) WARN("Using latest snapshot taken before %"PRIu64" seconds.\n", (rte_rdtsc() - timestamp) / rte_get_tsc_hz()); } - failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator, - err ? &sdev->stats_snapshot.stats : &stats); + failsafe_stats_increment + (&PRIV(fs_dev(sdev))->stats_accumulator, + err ? &sdev->stats_snapshot.stats : &stats); memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot)); } @@ -347,6 +348,35 @@ fs_rxtx_clean(struct sub_device *sdev) return 1; } +void +failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + if (sdev->rmv_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to unregister RMV callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->rmv_callback = 0; + } + if (sdev->lsc_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + sdev); + if (ret) + WARN("Failed to unregister LSC callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->lsc_callback = 0; + } +} + void failsafe_dev_remove(struct rte_eth_dev *dev) { @@ -355,9 +385,94 @@ failsafe_dev_remove(struct rte_eth_dev *dev) FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) if (sdev->remove && fs_rxtx_clean(sdev)) { + if (fs_lock(dev, 1) != 0) + return; fs_dev_stats_save(sdev); fs_dev_remove(sdev); + fs_unlock(dev, 1); + } +} + +static int +failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev) +{ + struct rxq *rxq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq->info.conf.rx_deferred_start && + dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Rx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Rx + * queue has been started earlier. + */ + ret = dev->dev_ops->rx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } + } else if (dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Rx queue needs to be stopped manually + * in case an appropriate failsafe Rx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->rx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } } + } + return 0; +} + +static int +failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev) +{ + struct txq *txq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq->info.conf.tx_deferred_start && + dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Tx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Tx + * queue has been started earlier. + */ + ret = dev->dev_ops->tx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } else if (dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Tx queue needs to be stopped manually + * in case an appropriate failsafe Tx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->tx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } + } + return 0; } int @@ -416,6 +531,12 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) if (PRIV(dev)->state < DEV_STARTED) return 0; ret = dev->dev_ops->dev_start(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_rx_queues_sync(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_tx_queues_sync(dev); if (ret) goto err_remove; return 0; @@ -456,15 +577,17 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused, { struct sub_device *sdev = cb_arg; + fs_lock(fs_dev(sdev), 0); /* Switch as soon as possible tx_dev. */ - fs_switch_dev(sdev->fs_dev, sdev); + fs_switch_dev(fs_dev(sdev), sdev); /* Use safe bursts in any case. */ - set_burst_fn(sdev->fs_dev, 1); + failsafe_set_burst_fn(fs_dev(sdev), 1); /* * Async removal, the sub-PMD will try to unregister * the callback at the source of the current thread context. */ sdev->remove = 1; + fs_unlock(fs_dev(sdev), 0); return 0; } @@ -485,3 +608,31 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, else return 0; } + +/* Take sub-device ownership before it becomes exposed to the application. */ +int +failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *fs_dev = cb_arg; + struct sub_device *sdev; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { + if (sdev->state >= DEV_PROBED) + continue; + if (dev->device == NULL) { + WARN("Trying to probe malformed device %s.\n", + sdev->devargs.name); + continue; + } + if (strcmp(sdev->devargs.name, dev->device->name) != 0) + continue; + rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); + /* The actual owner will be checked after the port probing. */ + break; + } + return 0; +}