1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
18 #include "failsafe_private.h"
21 fs_dev_configure(struct rte_eth_dev *dev)
23 struct sub_device *sdev;
28 FOREACH_SUBDEV(sdev, i, dev) {
29 int rmv_interrupt = 0;
30 int lsc_interrupt = 0;
33 if (sdev->state != DEV_PROBED &&
34 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
37 rmv_interrupt = ETH(sdev)->data->dev_flags &
40 DEBUG("Enabling RMV interrupts for sub_device %d", i);
41 dev->data->dev_conf.intr_conf.rmv = 1;
43 DEBUG("sub_device %d does not support RMV event", i);
45 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
46 lsc_interrupt = lsc_enabled &&
47 (ETH(sdev)->data->dev_flags &
48 RTE_ETH_DEV_INTR_LSC);
50 DEBUG("Enabling LSC interrupts for sub_device %d", i);
51 dev->data->dev_conf.intr_conf.lsc = 1;
52 } else if (lsc_enabled && !lsc_interrupt) {
53 DEBUG("Disabling LSC interrupts for sub_device %d", i);
54 dev->data->dev_conf.intr_conf.lsc = 0;
56 DEBUG("Configuring sub-device %d", i);
57 ret = rte_eth_dev_configure(PORT_ID(sdev),
58 dev->data->nb_rx_queues,
59 dev->data->nb_tx_queues,
60 &dev->data->dev_conf);
62 if (!fs_err(sdev, ret))
64 ERROR("Could not configure sub_device %d", i);
68 if (rmv_interrupt && sdev->rmv_callback == 0) {
69 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
70 RTE_ETH_EVENT_INTR_RMV,
71 failsafe_eth_rmv_event_callback,
74 WARN("Failed to register RMV callback for sub_device %d",
77 sdev->rmv_callback = 1;
79 dev->data->dev_conf.intr_conf.rmv = 0;
80 if (lsc_interrupt && sdev->lsc_callback == 0) {
81 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
82 RTE_ETH_EVENT_INTR_LSC,
83 failsafe_eth_lsc_event_callback,
86 WARN("Failed to register LSC callback for sub_device %d",
89 sdev->lsc_callback = 1;
91 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
92 sdev->state = DEV_ACTIVE;
94 if (PRIV(dev)->state < DEV_ACTIVE)
95 PRIV(dev)->state = DEV_ACTIVE;
101 fs_set_queues_state_start(struct rte_eth_dev *dev)
107 for (i = 0; i < dev->data->nb_rx_queues; i++) {
108 rxq = dev->data->rx_queues[i];
109 if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
110 dev->data->rx_queue_state[i] =
111 RTE_ETH_QUEUE_STATE_STARTED;
113 for (i = 0; i < dev->data->nb_tx_queues; i++) {
114 txq = dev->data->tx_queues[i];
115 if (txq != NULL && !txq->info.conf.tx_deferred_start)
116 dev->data->tx_queue_state[i] =
117 RTE_ETH_QUEUE_STATE_STARTED;
122 fs_dev_start(struct rte_eth_dev *dev)
124 struct sub_device *sdev;
129 ret = failsafe_rx_intr_install(dev);
134 FOREACH_SUBDEV(sdev, i, dev) {
135 if (sdev->state != DEV_ACTIVE)
137 DEBUG("Starting sub_device %d", i);
138 ret = rte_eth_dev_start(PORT_ID(sdev));
140 if (!fs_err(sdev, ret))
145 ret = failsafe_rx_intr_install_subdevice(sdev);
147 if (!fs_err(sdev, ret))
149 rte_eth_dev_stop(PORT_ID(sdev));
153 sdev->state = DEV_STARTED;
155 if (PRIV(dev)->state < DEV_STARTED) {
156 PRIV(dev)->state = DEV_STARTED;
157 fs_set_queues_state_start(dev);
159 fs_switch_dev(dev, NULL);
165 fs_set_queues_state_stop(struct rte_eth_dev *dev)
169 for (i = 0; i < dev->data->nb_rx_queues; i++)
170 if (dev->data->rx_queues[i] != NULL)
171 dev->data->rx_queue_state[i] =
172 RTE_ETH_QUEUE_STATE_STOPPED;
173 for (i = 0; i < dev->data->nb_tx_queues; i++)
174 if (dev->data->tx_queues[i] != NULL)
175 dev->data->tx_queue_state[i] =
176 RTE_ETH_QUEUE_STATE_STOPPED;
180 fs_dev_stop(struct rte_eth_dev *dev)
182 struct sub_device *sdev;
186 PRIV(dev)->state = DEV_STARTED - 1;
187 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
188 rte_eth_dev_stop(PORT_ID(sdev));
189 failsafe_rx_intr_uninstall_subdevice(sdev);
190 sdev->state = DEV_STARTED - 1;
192 failsafe_rx_intr_uninstall(dev);
193 fs_set_queues_state_stop(dev);
198 fs_dev_set_link_up(struct rte_eth_dev *dev)
200 struct sub_device *sdev;
205 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
206 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
207 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
208 if ((ret = fs_err(sdev, ret))) {
209 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
210 " with error %d", i, ret);
220 fs_dev_set_link_down(struct rte_eth_dev *dev)
222 struct sub_device *sdev;
227 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
228 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
229 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
230 if ((ret = fs_err(sdev, ret))) {
231 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
232 " with error %d", i, ret);
241 static void fs_dev_free_queues(struct rte_eth_dev *dev);
243 fs_dev_close(struct rte_eth_dev *dev)
245 struct sub_device *sdev;
249 failsafe_hotplug_alarm_cancel(dev);
250 if (PRIV(dev)->state == DEV_STARTED)
251 dev->dev_ops->dev_stop(dev);
252 PRIV(dev)->state = DEV_ACTIVE - 1;
253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
254 DEBUG("Closing sub_device %d", i);
255 failsafe_eth_dev_unregister_callbacks(sdev);
256 rte_eth_dev_close(PORT_ID(sdev));
257 sdev->state = DEV_ACTIVE - 1;
259 fs_dev_free_queues(dev);
264 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
266 struct sub_device *sdev;
273 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
274 uint16_t port_id = ETH(sdev)->data->port_id;
276 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
277 ret = fs_err(sdev, ret);
279 ERROR("Rx queue stop failed for subdevice %d", i);
285 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
287 /* Return 0 in case of at least one successful queue stop */
288 return (failure) ? err : 0;
292 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
294 struct sub_device *sdev;
299 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
300 uint16_t port_id = ETH(sdev)->data->port_id;
302 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
303 ret = fs_err(sdev, ret);
305 ERROR("Rx queue start failed for subdevice %d", i);
306 fs_rx_queue_stop(dev, rx_queue_id);
311 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
317 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
319 struct sub_device *sdev;
326 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
327 uint16_t port_id = ETH(sdev)->data->port_id;
329 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
330 ret = fs_err(sdev, ret);
332 ERROR("Tx queue stop failed for subdevice %d", i);
338 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
340 /* Return 0 in case of at least one successful queue stop */
341 return (failure) ? err : 0;
345 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
347 struct sub_device *sdev;
352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
353 uint16_t port_id = ETH(sdev)->data->port_id;
355 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
356 ret = fs_err(sdev, ret);
358 ERROR("Tx queue start failed for subdevice %d", i);
359 fs_tx_queue_stop(dev, tx_queue_id);
364 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
370 fs_rx_queue_release(void *queue)
372 struct rte_eth_dev *dev;
373 struct sub_device *sdev;
380 dev = &rte_eth_devices[rxq->priv->data->port_id];
382 if (rxq->event_fd > 0)
383 close(rxq->event_fd);
384 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
385 if (ETH(sdev)->data->rx_queues != NULL &&
386 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
387 SUBOPS(sdev, rx_queue_release)
388 (ETH(sdev)->data->rx_queues[rxq->qid]);
391 dev->data->rx_queues[rxq->qid] = NULL;
397 fs_rx_queue_setup(struct rte_eth_dev *dev,
398 uint16_t rx_queue_id,
400 unsigned int socket_id,
401 const struct rte_eth_rxconf *rx_conf,
402 struct rte_mempool *mb_pool)
405 * FIXME: Add a proper interface in rte_eal_interrupts for
406 * allocating eventfd as an interrupt vector.
407 * For the time being, fake as if we are using MSIX interrupts,
408 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
410 struct rte_intr_handle intr_handle = {
411 .type = RTE_INTR_HANDLE_VFIO_MSIX,
414 struct sub_device *sdev;
420 if (rx_conf->rx_deferred_start) {
421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
422 if (SUBOPS(sdev, rx_queue_start) == NULL) {
423 ERROR("Rx queue deferred start is not "
424 "supported for subdevice %d", i);
430 rxq = dev->data->rx_queues[rx_queue_id];
432 fs_rx_queue_release(rxq);
433 dev->data->rx_queues[rx_queue_id] = NULL;
435 rxq = rte_zmalloc(NULL,
437 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
438 RTE_CACHE_LINE_SIZE);
443 FOREACH_SUBDEV(sdev, i, dev)
444 rte_atomic64_init(&rxq->refcnt[i]);
445 rxq->qid = rx_queue_id;
446 rxq->socket_id = socket_id;
447 rxq->info.mp = mb_pool;
448 rxq->info.conf = *rx_conf;
449 rxq->info.nb_desc = nb_rx_desc;
450 rxq->priv = PRIV(dev);
451 rxq->sdev = PRIV(dev)->subs;
452 ret = rte_intr_efd_enable(&intr_handle, 1);
457 rxq->event_fd = intr_handle.efds[0];
458 dev->data->rx_queues[rx_queue_id] = rxq;
459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
460 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
462 nb_rx_desc, socket_id,
464 if ((ret = fs_err(sdev, ret))) {
465 ERROR("RX queue setup failed for sub_device %d", i);
472 fs_rx_queue_release(rxq);
478 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
481 struct sub_device *sdev;
487 if (idx >= dev->data->nb_rx_queues) {
491 rxq = dev->data->rx_queues[idx];
492 if (rxq == NULL || rxq->event_fd <= 0) {
496 /* Fail if proxy service is nor running. */
497 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
498 ERROR("failsafe interrupt services are not running");
502 rxq->enable_events = 1;
503 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
504 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
505 ret = fs_err(sdev, ret);
517 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
520 struct sub_device *sdev;
527 if (idx >= dev->data->nb_rx_queues) {
531 rxq = dev->data->rx_queues[idx];
532 if (rxq == NULL || rxq->event_fd <= 0) {
536 rxq->enable_events = 0;
537 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
538 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
539 ret = fs_err(sdev, ret);
543 /* Clear pending events */
544 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
554 fs_tx_queue_release(void *queue)
556 struct rte_eth_dev *dev;
557 struct sub_device *sdev;
564 dev = &rte_eth_devices[txq->priv->data->port_id];
566 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
567 if (ETH(sdev)->data->tx_queues != NULL &&
568 ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
569 SUBOPS(sdev, tx_queue_release)
570 (ETH(sdev)->data->tx_queues[txq->qid]);
573 dev->data->tx_queues[txq->qid] = NULL;
579 fs_tx_queue_setup(struct rte_eth_dev *dev,
580 uint16_t tx_queue_id,
582 unsigned int socket_id,
583 const struct rte_eth_txconf *tx_conf)
585 struct sub_device *sdev;
591 if (tx_conf->tx_deferred_start) {
592 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
593 if (SUBOPS(sdev, tx_queue_start) == NULL) {
594 ERROR("Tx queue deferred start is not "
595 "supported for subdevice %d", i);
601 txq = dev->data->tx_queues[tx_queue_id];
603 fs_tx_queue_release(txq);
604 dev->data->tx_queues[tx_queue_id] = NULL;
606 txq = rte_zmalloc("ethdev TX queue",
608 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
609 RTE_CACHE_LINE_SIZE);
614 FOREACH_SUBDEV(sdev, i, dev)
615 rte_atomic64_init(&txq->refcnt[i]);
616 txq->qid = tx_queue_id;
617 txq->socket_id = socket_id;
618 txq->info.conf = *tx_conf;
619 txq->info.nb_desc = nb_tx_desc;
620 txq->priv = PRIV(dev);
621 dev->data->tx_queues[tx_queue_id] = txq;
622 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
623 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
625 nb_tx_desc, socket_id,
627 if ((ret = fs_err(sdev, ret))) {
628 ERROR("TX queue setup failed for sub_device %d", i);
635 fs_tx_queue_release(txq);
641 fs_dev_free_queues(struct rte_eth_dev *dev)
645 for (i = 0; i < dev->data->nb_rx_queues; i++) {
646 fs_rx_queue_release(dev->data->rx_queues[i]);
647 dev->data->rx_queues[i] = NULL;
649 dev->data->nb_rx_queues = 0;
650 for (i = 0; i < dev->data->nb_tx_queues; i++) {
651 fs_tx_queue_release(dev->data->tx_queues[i]);
652 dev->data->tx_queues[i] = NULL;
654 dev->data->nb_tx_queues = 0;
658 fs_promiscuous_enable(struct rte_eth_dev *dev)
660 struct sub_device *sdev;
664 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
665 rte_eth_promiscuous_enable(PORT_ID(sdev));
670 fs_promiscuous_disable(struct rte_eth_dev *dev)
672 struct sub_device *sdev;
676 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
677 rte_eth_promiscuous_disable(PORT_ID(sdev));
682 fs_allmulticast_enable(struct rte_eth_dev *dev)
684 struct sub_device *sdev;
688 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
689 rte_eth_allmulticast_enable(PORT_ID(sdev));
694 fs_allmulticast_disable(struct rte_eth_dev *dev)
696 struct sub_device *sdev;
700 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
701 rte_eth_allmulticast_disable(PORT_ID(sdev));
706 fs_link_update(struct rte_eth_dev *dev,
707 int wait_to_complete)
709 struct sub_device *sdev;
714 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
715 DEBUG("Calling link_update on sub_device %d", i);
716 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
717 if (ret && ret != -1 && sdev->remove == 0 &&
718 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
719 ERROR("Link update failed for sub_device %d with error %d",
725 if (TX_SUBDEV(dev)) {
726 struct rte_eth_link *l1;
727 struct rte_eth_link *l2;
729 l1 = &dev->data->dev_link;
730 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
731 if (memcmp(l1, l2, sizeof(*l1))) {
742 fs_stats_get(struct rte_eth_dev *dev,
743 struct rte_eth_stats *stats)
745 struct rte_eth_stats backup;
746 struct sub_device *sdev;
751 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
752 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
753 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
754 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
756 rte_memcpy(&backup, snapshot, sizeof(backup));
757 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
759 if (!fs_err(sdev, ret)) {
760 rte_memcpy(snapshot, &backup, sizeof(backup));
763 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
769 *timestamp = rte_rdtsc();
771 failsafe_stats_increment(stats, snapshot);
778 fs_stats_reset(struct rte_eth_dev *dev)
780 struct sub_device *sdev;
784 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
785 rte_eth_stats_reset(PORT_ID(sdev));
786 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
788 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
793 fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to,
794 const struct rte_eth_desc_lim *from)
796 to->nb_max = RTE_MIN(to->nb_max, from->nb_max);
797 to->nb_min = RTE_MAX(to->nb_min, from->nb_min);
798 to->nb_align = RTE_MAX(to->nb_align, from->nb_align);
800 to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max);
801 to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max);
805 * Merge the information from sub-devices.
807 * The reported values must be the common subset of all sub devices
810 fs_dev_merge_info(struct rte_eth_dev_info *info,
811 const struct rte_eth_dev_info *sinfo)
813 info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen);
814 info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues);
815 info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues);
816 info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs);
817 info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs,
818 sinfo->max_hash_mac_addrs);
819 info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools);
820 info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs);
822 fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim);
823 fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim);
825 info->rx_offload_capa &= sinfo->rx_offload_capa;
826 info->tx_offload_capa &= sinfo->tx_offload_capa;
827 info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa;
828 info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa;
829 info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads;
833 * Fail-safe dev_infos_get rules:
837 * Use the maximum possible values for any field, so as not
838 * to impede any further configuration effort.
840 * Limits capabilities to those that are understood by the
841 * fail-safe PMD. This understanding stems from the fail-safe
842 * being capable of verifying that the related capability is
843 * expressed within the device configuration (struct rte_eth_conf).
845 * At least one probed sub_device:
847 * Uses values from the active probed sub_device
848 * The rationale here is that if any sub_device is less capable
849 * (for example concerning the number of queues) than the active
850 * sub_device, then its subsequent configuration will fail.
851 * It is impossible to foresee this failure when the failing sub_device
852 * is supposed to be plugged-in later on, so the configuration process
853 * is the single point of failure and error reporting.
855 * Uses a logical AND of RX capabilities among
856 * all sub_devices and the default capabilities.
857 * Uses a logical AND of TX capabilities among
858 * the active probed sub_device and the default capabilities.
859 * Uses a logical AND of device capabilities among
860 * all sub_devices and the default capabilities.
864 fs_dev_infos_get(struct rte_eth_dev *dev,
865 struct rte_eth_dev_info *infos)
867 struct sub_device *sdev;
870 /* Use maximum upper bounds by default */
871 infos->max_rx_pktlen = UINT32_MAX;
872 infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
873 infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
874 infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR;
875 infos->max_hash_mac_addrs = UINT32_MAX;
876 infos->max_vfs = UINT16_MAX;
877 infos->max_vmdq_pools = UINT16_MAX;
880 * Set of capabilities that can be verified upon
881 * configuring a sub-device.
883 infos->rx_offload_capa =
884 DEV_RX_OFFLOAD_VLAN_STRIP |
885 DEV_RX_OFFLOAD_IPV4_CKSUM |
886 DEV_RX_OFFLOAD_UDP_CKSUM |
887 DEV_RX_OFFLOAD_TCP_CKSUM |
888 DEV_RX_OFFLOAD_TCP_LRO |
889 DEV_RX_OFFLOAD_QINQ_STRIP |
890 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
891 DEV_RX_OFFLOAD_MACSEC_STRIP |
892 DEV_RX_OFFLOAD_HEADER_SPLIT |
893 DEV_RX_OFFLOAD_VLAN_FILTER |
894 DEV_RX_OFFLOAD_VLAN_EXTEND |
895 DEV_RX_OFFLOAD_JUMBO_FRAME |
896 DEV_RX_OFFLOAD_SCATTER |
897 DEV_RX_OFFLOAD_TIMESTAMP |
898 DEV_RX_OFFLOAD_SECURITY;
900 infos->rx_queue_offload_capa =
901 DEV_RX_OFFLOAD_VLAN_STRIP |
902 DEV_RX_OFFLOAD_IPV4_CKSUM |
903 DEV_RX_OFFLOAD_UDP_CKSUM |
904 DEV_RX_OFFLOAD_TCP_CKSUM |
905 DEV_RX_OFFLOAD_TCP_LRO |
906 DEV_RX_OFFLOAD_QINQ_STRIP |
907 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
908 DEV_RX_OFFLOAD_MACSEC_STRIP |
909 DEV_RX_OFFLOAD_HEADER_SPLIT |
910 DEV_RX_OFFLOAD_VLAN_FILTER |
911 DEV_RX_OFFLOAD_VLAN_EXTEND |
912 DEV_RX_OFFLOAD_JUMBO_FRAME |
913 DEV_RX_OFFLOAD_SCATTER |
914 DEV_RX_OFFLOAD_TIMESTAMP |
915 DEV_RX_OFFLOAD_SECURITY;
917 infos->tx_offload_capa =
918 DEV_TX_OFFLOAD_MULTI_SEGS |
919 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
920 DEV_TX_OFFLOAD_IPV4_CKSUM |
921 DEV_TX_OFFLOAD_UDP_CKSUM |
922 DEV_TX_OFFLOAD_TCP_CKSUM |
923 DEV_TX_OFFLOAD_TCP_TSO;
925 infos->flow_type_rss_offloads =
930 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
931 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
933 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
934 struct rte_eth_dev_info sub_info;
936 rte_eth_dev_info_get(PORT_ID(sdev), &sub_info);
938 fs_dev_merge_info(infos, &sub_info);
942 static const uint32_t *
943 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
945 struct sub_device *sdev;
946 struct rte_eth_dev *edev;
950 sdev = TX_SUBDEV(dev);
956 /* ENOTSUP: counts as no supported ptypes */
957 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
962 * The API does not permit to do a clean AND of all ptypes,
963 * It is also incomplete by design and we do not really care
964 * to have a best possible value in this context.
965 * We just return the ptypes of the device of highest
966 * priority, usually the PREFERRED device.
968 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
975 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
977 struct sub_device *sdev;
982 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
983 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
984 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
985 if ((ret = fs_err(sdev, ret))) {
986 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
997 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
999 struct sub_device *sdev;
1004 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1005 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
1006 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
1007 if ((ret = fs_err(sdev, ret))) {
1008 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
1009 " with error %d", i, ret);
1019 fs_flow_ctrl_get(struct rte_eth_dev *dev,
1020 struct rte_eth_fc_conf *fc_conf)
1022 struct sub_device *sdev;
1026 sdev = TX_SUBDEV(dev);
1031 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
1035 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
1042 fs_flow_ctrl_set(struct rte_eth_dev *dev,
1043 struct rte_eth_fc_conf *fc_conf)
1045 struct sub_device *sdev;
1050 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1051 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
1052 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
1053 if ((ret = fs_err(sdev, ret))) {
1054 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
1055 " with error %d", i, ret);
1065 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1067 struct sub_device *sdev;
1071 /* No check: already done within the rte_eth_dev_mac_addr_remove
1072 * call for the fail-safe device.
1074 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
1075 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
1076 &dev->data->mac_addrs[index]);
1077 PRIV(dev)->mac_addr_pool[index] = 0;
1082 fs_mac_addr_add(struct rte_eth_dev *dev,
1083 struct rte_ether_addr *mac_addr,
1087 struct sub_device *sdev;
1091 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
1093 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1094 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
1095 if ((ret = fs_err(sdev, ret))) {
1096 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
1097 PRIu8 " with error %d", i, ret);
1102 if (index >= PRIV(dev)->nb_mac_addr) {
1103 DEBUG("Growing mac_addrs array");
1104 PRIV(dev)->nb_mac_addr = index;
1106 PRIV(dev)->mac_addr_pool[index] = vmdq;
1112 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1114 struct sub_device *sdev;
1119 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1120 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1121 ret = fs_err(sdev, ret);
1123 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1135 fs_set_mc_addr_list(struct rte_eth_dev *dev,
1136 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1138 struct sub_device *sdev;
1145 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1146 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1147 mc_addr_set, nb_mc_addr);
1149 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
1155 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
1156 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
1157 if (mcast_addrs == NULL && nb_mc_addr > 0) {
1161 rte_memcpy(mcast_addrs, mc_addr_set,
1162 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
1163 PRIV(dev)->nb_mcast_addr = nb_mc_addr;
1164 PRIV(dev)->mcast_addrs = mcast_addrs;
1170 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1171 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1172 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr);
1174 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
1184 fs_rss_hash_update(struct rte_eth_dev *dev,
1185 struct rte_eth_rss_conf *rss_conf)
1187 struct sub_device *sdev;
1192 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1193 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
1194 ret = fs_err(sdev, ret);
1196 ERROR("Operation rte_eth_dev_rss_hash_update"
1197 " failed for sub_device %d with error %d",
1209 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
1210 enum rte_filter_type type,
1211 enum rte_filter_op op,
1214 if (type == RTE_ETH_FILTER_GENERIC &&
1215 op == RTE_ETH_FILTER_GET) {
1216 *(const void **)arg = &fs_flow_ops;
1222 const struct eth_dev_ops failsafe_ops = {
1223 .dev_configure = fs_dev_configure,
1224 .dev_start = fs_dev_start,
1225 .dev_stop = fs_dev_stop,
1226 .dev_set_link_down = fs_dev_set_link_down,
1227 .dev_set_link_up = fs_dev_set_link_up,
1228 .dev_close = fs_dev_close,
1229 .promiscuous_enable = fs_promiscuous_enable,
1230 .promiscuous_disable = fs_promiscuous_disable,
1231 .allmulticast_enable = fs_allmulticast_enable,
1232 .allmulticast_disable = fs_allmulticast_disable,
1233 .link_update = fs_link_update,
1234 .stats_get = fs_stats_get,
1235 .stats_reset = fs_stats_reset,
1236 .dev_infos_get = fs_dev_infos_get,
1237 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1238 .mtu_set = fs_mtu_set,
1239 .vlan_filter_set = fs_vlan_filter_set,
1240 .rx_queue_start = fs_rx_queue_start,
1241 .rx_queue_stop = fs_rx_queue_stop,
1242 .tx_queue_start = fs_tx_queue_start,
1243 .tx_queue_stop = fs_tx_queue_stop,
1244 .rx_queue_setup = fs_rx_queue_setup,
1245 .tx_queue_setup = fs_tx_queue_setup,
1246 .rx_queue_release = fs_rx_queue_release,
1247 .tx_queue_release = fs_tx_queue_release,
1248 .rx_queue_intr_enable = fs_rx_intr_enable,
1249 .rx_queue_intr_disable = fs_rx_intr_disable,
1250 .flow_ctrl_get = fs_flow_ctrl_get,
1251 .flow_ctrl_set = fs_flow_ctrl_set,
1252 .mac_addr_remove = fs_mac_addr_remove,
1253 .mac_addr_add = fs_mac_addr_add,
1254 .mac_addr_set = fs_mac_addr_set,
1255 .set_mc_addr_list = fs_set_mc_addr_list,
1256 .rss_hash_update = fs_rss_hash_update,
1257 .filter_ctrl = fs_filter_ctrl,