X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffailsafe%2Ffailsafe_ether.c;h=93deacd1341d5f27d4535fdd43a7b9746d7ab347;hb=23bdcedcd8caa0d268b615df3bdb08411f97856e;hp=4c6e938d653ed54634887649aa2d7db7e022ad14;hpb=009c327c886432018e23ecb88c25513d69a73661;p=dpdk.git diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c index 4c6e938d65..93deacd134 100644 --- a/drivers/net/failsafe/failsafe_ether.c +++ b/drivers/net/failsafe/failsafe_ether.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -126,9 +126,13 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, if (dev->data->promiscuous != edev->data->promiscuous) { DEBUG("Configuring promiscuous"); if (dev->data->promiscuous) - rte_eth_promiscuous_enable(PORT_ID(sdev)); + ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); else - rte_eth_promiscuous_disable(PORT_ID(sdev)); + ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply promiscuous mode"); + return ret; + } } else { DEBUG("promiscuous already set"); } @@ -136,9 +140,13 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, if (dev->data->all_multicast != edev->data->all_multicast) { DEBUG("Configuring all_multicast"); if (dev->data->all_multicast) - rte_eth_allmulticast_enable(PORT_ID(sdev)); + ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); else - rte_eth_allmulticast_disable(PORT_ID(sdev)); + ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply allmulticast mode"); + return ret; + } } else { DEBUG("all_multicast already set"); } @@ -166,19 +174,37 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, DEBUG("Configure additional MAC address%s", (PRIV(dev)->nb_mac_addr > 2 ? "es" : "")); for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) { - struct ether_addr *ea; + struct rte_ether_addr *ea; ea = &dev->data->mac_addrs[i]; ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea, PRIV(dev)->mac_addr_pool[i]); if (ret) { - char ea_fmt[ETHER_ADDR_FMT_SIZE]; + char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; - ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea); + rte_ether_format_addr(ea_fmt, + RTE_ETHER_ADDR_FMT_SIZE, ea); ERROR("Adding MAC address %s failed", ea_fmt); return ret; } } + /* + * Propagate multicast MAC addresses to sub-devices, + * if non zero number of addresses is set. + * The condition is required to avoid breakage of failsafe + * for sub-devices which do not support the operation + * if the feature is really not used. + */ + if (PRIV(dev)->nb_mcast_addr > 0) { + DEBUG("Configuring multicast MAC addresses"); + ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), + PRIV(dev)->mcast_addrs, + PRIV(dev)->nb_mcast_addr); + if (ret) { + ERROR("Failed to apply multicast MAC addresses"); + return ret; + } + } /* VLAN filter */ vfc1 = &dev->data->vlan_filter_conf; vfc2 = &edev->data->vlan_filter_conf; @@ -230,9 +256,9 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev, DEBUG("Creating flow #%" PRIu32, i++); flow->flows[SUB_ID(sdev)] = rte_flow_create(PORT_ID(sdev), - &flow->fd->attr, - flow->fd->items, - flow->fd->actions, + flow->rule.attr, + flow->rule.pattern, + flow->rule.actions, &ferror); ret = rte_errno; if (ret) @@ -260,13 +286,13 @@ fs_dev_remove(struct sub_device *sdev) sdev->state = DEV_ACTIVE; /* fallthrough */ case DEV_ACTIVE: + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_PROBED; /* fallthrough */ case DEV_PROBED: - ret = rte_eal_hotplug_remove(sdev->bus->name, - sdev->dev->name); - if (ret) { + ret = rte_dev_remove(sdev->dev); + if (ret < 0) { ERROR("Bus detach failed for sub_device %u", SUB_ID(sdev)); } else { @@ -277,10 +303,12 @@ fs_dev_remove(struct sub_device *sdev) case DEV_PARSED: case DEV_UNDEFINED: sdev->state = DEV_UNDEFINED; + sdev->sdev_port_id = RTE_MAX_ETHPORTS; /* the end */ break; } - failsafe_hotplug_alarm_install(sdev->fs_dev); + sdev->remove = 0; + failsafe_hotplug_alarm_install(fs_dev(sdev)); } static void @@ -300,8 +328,9 @@ fs_dev_stats_save(struct sub_device *sdev) WARN("Using latest snapshot taken before %"PRIu64" seconds.\n", (rte_rdtsc() - timestamp) / rte_get_tsc_hz()); } - failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator, - err ? &sdev->stats_snapshot.stats : &stats); + failsafe_stats_increment + (&PRIV(fs_dev(sdev))->stats_accumulator, + err ? &sdev->stats_snapshot.stats : &stats); memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot)); } @@ -319,6 +348,35 @@ fs_rxtx_clean(struct sub_device *sdev) return 1; } +void +failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + if (sdev->rmv_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to unregister RMV callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->rmv_callback = 0; + } + if (sdev->lsc_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + sdev); + if (ret) + WARN("Failed to unregister LSC callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->lsc_callback = 0; + } +} + void failsafe_dev_remove(struct rte_eth_dev *dev) { @@ -327,11 +385,96 @@ failsafe_dev_remove(struct rte_eth_dev *dev) FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) if (sdev->remove && fs_rxtx_clean(sdev)) { + if (fs_lock(dev, 1) != 0) + return; fs_dev_stats_save(sdev); fs_dev_remove(sdev); + fs_unlock(dev, 1); } } +static int +failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev) +{ + struct rxq *rxq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq->info.conf.rx_deferred_start && + dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Rx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Rx + * queue has been started earlier. + */ + ret = dev->dev_ops->rx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } + } else if (dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Rx queue needs to be stopped manually + * in case an appropriate failsafe Rx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->rx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } + } + } + return 0; +} + +static int +failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev) +{ + struct txq *txq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq->info.conf.tx_deferred_start && + dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Tx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Tx + * queue has been started earlier. + */ + ret = dev->dev_ops->tx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } else if (dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Tx queue needs to be stopped manually + * in case an appropriate failsafe Tx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->tx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } + } + return 0; +} + int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) { @@ -388,6 +531,12 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) if (PRIV(dev)->state < DEV_STARTED) return 0; ret = dev->dev_ops->dev_start(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_rx_queues_sync(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_tx_queues_sync(dev); if (ret) goto err_remove; return 0; @@ -428,15 +577,17 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused, { struct sub_device *sdev = cb_arg; + fs_lock(fs_dev(sdev), 0); /* Switch as soon as possible tx_dev. */ - fs_switch_dev(sdev->fs_dev, sdev); + fs_switch_dev(fs_dev(sdev), sdev); /* Use safe bursts in any case. */ - set_burst_fn(sdev->fs_dev, 1); + failsafe_set_burst_fn(fs_dev(sdev), 1); /* * Async removal, the sub-PMD will try to unregister * the callback at the source of the current thread context. */ sdev->remove = 1; + fs_unlock(fs_dev(sdev), 0); return 0; } @@ -457,3 +608,26 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, else return 0; } + +/* Take sub-device ownership before it becomes exposed to the application. */ +int +failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *fs_dev = cb_arg; + struct sub_device *sdev; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { + if (sdev->state >= DEV_PROBED) + continue; + if (strcmp(sdev->devargs.name, dev->device->name) != 0) + continue; + rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); + /* The actual owner will be checked after the port probing. */ + break; + } + return 0; +}