1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
18 #include "failsafe_private.h"
20 static struct rte_eth_dev_info default_infos = {
21 /* Max possible number of elements */
22 .max_rx_pktlen = UINT32_MAX,
23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
26 .max_hash_mac_addrs = UINT32_MAX,
27 .max_vfs = UINT16_MAX,
28 .max_vmdq_pools = UINT16_MAX,
33 .nb_seg_max = UINT16_MAX,
34 .nb_mtu_seg_max = UINT16_MAX,
40 .nb_seg_max = UINT16_MAX,
41 .nb_mtu_seg_max = UINT16_MAX,
44 * Set of capabilities that can be verified upon
45 * configuring a sub-device.
48 DEV_RX_OFFLOAD_VLAN_STRIP |
49 DEV_RX_OFFLOAD_IPV4_CKSUM |
50 DEV_RX_OFFLOAD_UDP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_CKSUM |
52 DEV_RX_OFFLOAD_TCP_LRO |
53 DEV_RX_OFFLOAD_QINQ_STRIP |
54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
55 DEV_RX_OFFLOAD_MACSEC_STRIP |
56 DEV_RX_OFFLOAD_HEADER_SPLIT |
57 DEV_RX_OFFLOAD_VLAN_FILTER |
58 DEV_RX_OFFLOAD_VLAN_EXTEND |
59 DEV_RX_OFFLOAD_JUMBO_FRAME |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_SCATTER |
77 DEV_RX_OFFLOAD_TIMESTAMP |
78 DEV_RX_OFFLOAD_SECURITY,
80 DEV_TX_OFFLOAD_MULTI_SEGS |
81 DEV_TX_OFFLOAD_IPV4_CKSUM |
82 DEV_TX_OFFLOAD_UDP_CKSUM |
83 DEV_TX_OFFLOAD_TCP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_TSO,
85 .flow_type_rss_offloads =
90 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP,
94 fs_dev_configure(struct rte_eth_dev *dev)
96 struct sub_device *sdev;
101 FOREACH_SUBDEV(sdev, i, dev) {
102 int rmv_interrupt = 0;
103 int lsc_interrupt = 0;
106 if (sdev->state != DEV_PROBED &&
107 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
110 rmv_interrupt = ETH(sdev)->data->dev_flags &
111 RTE_ETH_DEV_INTR_RMV;
113 DEBUG("Enabling RMV interrupts for sub_device %d", i);
114 dev->data->dev_conf.intr_conf.rmv = 1;
116 DEBUG("sub_device %d does not support RMV event", i);
118 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
119 lsc_interrupt = lsc_enabled &&
120 (ETH(sdev)->data->dev_flags &
121 RTE_ETH_DEV_INTR_LSC);
123 DEBUG("Enabling LSC interrupts for sub_device %d", i);
124 dev->data->dev_conf.intr_conf.lsc = 1;
125 } else if (lsc_enabled && !lsc_interrupt) {
126 DEBUG("Disabling LSC interrupts for sub_device %d", i);
127 dev->data->dev_conf.intr_conf.lsc = 0;
129 DEBUG("Configuring sub-device %d", i);
130 ret = rte_eth_dev_configure(PORT_ID(sdev),
131 dev->data->nb_rx_queues,
132 dev->data->nb_tx_queues,
133 &dev->data->dev_conf);
135 if (!fs_err(sdev, ret))
137 ERROR("Could not configure sub_device %d", i);
141 if (rmv_interrupt && sdev->rmv_callback == 0) {
142 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
143 RTE_ETH_EVENT_INTR_RMV,
144 failsafe_eth_rmv_event_callback,
147 WARN("Failed to register RMV callback for sub_device %d",
150 sdev->rmv_callback = 1;
152 dev->data->dev_conf.intr_conf.rmv = 0;
153 if (lsc_interrupt && sdev->lsc_callback == 0) {
154 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
155 RTE_ETH_EVENT_INTR_LSC,
156 failsafe_eth_lsc_event_callback,
159 WARN("Failed to register LSC callback for sub_device %d",
162 sdev->lsc_callback = 1;
164 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
165 sdev->state = DEV_ACTIVE;
167 if (PRIV(dev)->state < DEV_ACTIVE)
168 PRIV(dev)->state = DEV_ACTIVE;
174 fs_set_queues_state_start(struct rte_eth_dev *dev)
180 for (i = 0; i < dev->data->nb_rx_queues; i++) {
181 rxq = dev->data->rx_queues[i];
182 if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
183 dev->data->rx_queue_state[i] =
184 RTE_ETH_QUEUE_STATE_STARTED;
186 for (i = 0; i < dev->data->nb_tx_queues; i++) {
187 txq = dev->data->tx_queues[i];
188 if (!txq->info.conf.tx_deferred_start)
189 dev->data->tx_queue_state[i] =
190 RTE_ETH_QUEUE_STATE_STARTED;
195 fs_dev_start(struct rte_eth_dev *dev)
197 struct sub_device *sdev;
202 ret = failsafe_rx_intr_install(dev);
207 FOREACH_SUBDEV(sdev, i, dev) {
208 if (sdev->state != DEV_ACTIVE)
210 DEBUG("Starting sub_device %d", i);
211 ret = rte_eth_dev_start(PORT_ID(sdev));
213 if (!fs_err(sdev, ret))
218 ret = failsafe_rx_intr_install_subdevice(sdev);
220 if (!fs_err(sdev, ret))
222 rte_eth_dev_stop(PORT_ID(sdev));
226 sdev->state = DEV_STARTED;
228 if (PRIV(dev)->state < DEV_STARTED) {
229 PRIV(dev)->state = DEV_STARTED;
230 fs_set_queues_state_start(dev);
232 fs_switch_dev(dev, NULL);
238 fs_set_queues_state_stop(struct rte_eth_dev *dev)
242 for (i = 0; i < dev->data->nb_rx_queues; i++)
243 if (dev->data->rx_queues[i] != NULL)
244 dev->data->rx_queue_state[i] =
245 RTE_ETH_QUEUE_STATE_STOPPED;
246 for (i = 0; i < dev->data->nb_tx_queues; i++)
247 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
251 fs_dev_stop(struct rte_eth_dev *dev)
253 struct sub_device *sdev;
257 PRIV(dev)->state = DEV_STARTED - 1;
258 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
259 rte_eth_dev_stop(PORT_ID(sdev));
260 failsafe_rx_intr_uninstall_subdevice(sdev);
261 sdev->state = DEV_STARTED - 1;
263 failsafe_rx_intr_uninstall(dev);
264 fs_set_queues_state_stop(dev);
269 fs_dev_set_link_up(struct rte_eth_dev *dev)
271 struct sub_device *sdev;
276 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
277 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
278 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
279 if ((ret = fs_err(sdev, ret))) {
280 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
281 " with error %d", i, ret);
291 fs_dev_set_link_down(struct rte_eth_dev *dev)
293 struct sub_device *sdev;
298 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
299 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
300 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
301 if ((ret = fs_err(sdev, ret))) {
302 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
303 " with error %d", i, ret);
312 static void fs_dev_free_queues(struct rte_eth_dev *dev);
314 fs_dev_close(struct rte_eth_dev *dev)
316 struct sub_device *sdev;
320 failsafe_hotplug_alarm_cancel(dev);
321 if (PRIV(dev)->state == DEV_STARTED)
322 dev->dev_ops->dev_stop(dev);
323 PRIV(dev)->state = DEV_ACTIVE - 1;
324 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
325 DEBUG("Closing sub_device %d", i);
326 failsafe_eth_dev_unregister_callbacks(sdev);
327 rte_eth_dev_close(PORT_ID(sdev));
328 sdev->state = DEV_ACTIVE - 1;
330 fs_dev_free_queues(dev);
335 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
337 struct sub_device *sdev;
344 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
345 uint16_t port_id = ETH(sdev)->data->port_id;
347 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
348 ret = fs_err(sdev, ret);
350 ERROR("Rx queue stop failed for subdevice %d", i);
356 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
358 /* Return 0 in case of at least one successful queue stop */
359 return (failure) ? err : 0;
363 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
365 struct sub_device *sdev;
370 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
371 uint16_t port_id = ETH(sdev)->data->port_id;
373 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
374 ret = fs_err(sdev, ret);
376 ERROR("Rx queue start failed for subdevice %d", i);
377 fs_rx_queue_stop(dev, rx_queue_id);
382 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
388 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
390 struct sub_device *sdev;
397 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
398 uint16_t port_id = ETH(sdev)->data->port_id;
400 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
401 ret = fs_err(sdev, ret);
403 ERROR("Tx queue stop failed for subdevice %d", i);
409 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
411 /* Return 0 in case of at least one successful queue stop */
412 return (failure) ? err : 0;
416 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
418 struct sub_device *sdev;
423 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
424 uint16_t port_id = ETH(sdev)->data->port_id;
426 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
427 ret = fs_err(sdev, ret);
429 ERROR("Tx queue start failed for subdevice %d", i);
430 fs_tx_queue_stop(dev, tx_queue_id);
435 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
441 fs_rx_queue_release(void *queue)
443 struct rte_eth_dev *dev;
444 struct sub_device *sdev;
451 dev = rxq->priv->dev;
453 if (rxq->event_fd > 0)
454 close(rxq->event_fd);
455 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
456 if (ETH(sdev)->data->rx_queues != NULL &&
457 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
458 SUBOPS(sdev, rx_queue_release)
459 (ETH(sdev)->data->rx_queues[rxq->qid]);
462 dev->data->rx_queues[rxq->qid] = NULL;
468 fs_rx_queue_setup(struct rte_eth_dev *dev,
469 uint16_t rx_queue_id,
471 unsigned int socket_id,
472 const struct rte_eth_rxconf *rx_conf,
473 struct rte_mempool *mb_pool)
476 * FIXME: Add a proper interface in rte_eal_interrupts for
477 * allocating eventfd as an interrupt vector.
478 * For the time being, fake as if we are using MSIX interrupts,
479 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
481 struct rte_intr_handle intr_handle = {
482 .type = RTE_INTR_HANDLE_VFIO_MSIX,
485 struct sub_device *sdev;
491 if (rx_conf->rx_deferred_start) {
492 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
493 if (SUBOPS(sdev, rx_queue_start) == NULL) {
494 ERROR("Rx queue deferred start is not "
495 "supported for subdevice %d", i);
501 rxq = dev->data->rx_queues[rx_queue_id];
503 fs_rx_queue_release(rxq);
504 dev->data->rx_queues[rx_queue_id] = NULL;
506 rxq = rte_zmalloc(NULL,
508 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
509 RTE_CACHE_LINE_SIZE);
514 FOREACH_SUBDEV(sdev, i, dev)
515 rte_atomic64_init(&rxq->refcnt[i]);
516 rxq->qid = rx_queue_id;
517 rxq->socket_id = socket_id;
518 rxq->info.mp = mb_pool;
519 rxq->info.conf = *rx_conf;
520 rxq->info.nb_desc = nb_rx_desc;
521 rxq->priv = PRIV(dev);
522 rxq->sdev = PRIV(dev)->subs;
523 ret = rte_intr_efd_enable(&intr_handle, 1);
528 rxq->event_fd = intr_handle.efds[0];
529 dev->data->rx_queues[rx_queue_id] = rxq;
530 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
531 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
533 nb_rx_desc, socket_id,
535 if ((ret = fs_err(sdev, ret))) {
536 ERROR("RX queue setup failed for sub_device %d", i);
543 fs_rx_queue_release(rxq);
549 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
552 struct sub_device *sdev;
558 if (idx >= dev->data->nb_rx_queues) {
562 rxq = dev->data->rx_queues[idx];
563 if (rxq == NULL || rxq->event_fd <= 0) {
567 /* Fail if proxy service is nor running. */
568 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
569 ERROR("failsafe interrupt services are not running");
573 rxq->enable_events = 1;
574 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
575 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
576 ret = fs_err(sdev, ret);
588 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
591 struct sub_device *sdev;
598 if (idx >= dev->data->nb_rx_queues) {
602 rxq = dev->data->rx_queues[idx];
603 if (rxq == NULL || rxq->event_fd <= 0) {
607 rxq->enable_events = 0;
608 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
609 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
610 ret = fs_err(sdev, ret);
614 /* Clear pending events */
615 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
625 fs_tx_queue_release(void *queue)
627 struct rte_eth_dev *dev;
628 struct sub_device *sdev;
635 dev = txq->priv->dev;
637 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
638 if (ETH(sdev)->data->tx_queues != NULL &&
639 ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
640 SUBOPS(sdev, tx_queue_release)
641 (ETH(sdev)->data->tx_queues[txq->qid]);
644 dev->data->tx_queues[txq->qid] = NULL;
650 fs_tx_queue_setup(struct rte_eth_dev *dev,
651 uint16_t tx_queue_id,
653 unsigned int socket_id,
654 const struct rte_eth_txconf *tx_conf)
656 struct sub_device *sdev;
662 if (tx_conf->tx_deferred_start) {
663 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
664 if (SUBOPS(sdev, tx_queue_start) == NULL) {
665 ERROR("Tx queue deferred start is not "
666 "supported for subdevice %d", i);
672 txq = dev->data->tx_queues[tx_queue_id];
674 fs_tx_queue_release(txq);
675 dev->data->tx_queues[tx_queue_id] = NULL;
677 txq = rte_zmalloc("ethdev TX queue",
679 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
680 RTE_CACHE_LINE_SIZE);
685 FOREACH_SUBDEV(sdev, i, dev)
686 rte_atomic64_init(&txq->refcnt[i]);
687 txq->qid = tx_queue_id;
688 txq->socket_id = socket_id;
689 txq->info.conf = *tx_conf;
690 txq->info.nb_desc = nb_tx_desc;
691 txq->priv = PRIV(dev);
692 dev->data->tx_queues[tx_queue_id] = txq;
693 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
694 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
696 nb_tx_desc, socket_id,
698 if ((ret = fs_err(sdev, ret))) {
699 ERROR("TX queue setup failed for sub_device %d", i);
706 fs_tx_queue_release(txq);
712 fs_dev_free_queues(struct rte_eth_dev *dev)
716 for (i = 0; i < dev->data->nb_rx_queues; i++) {
717 fs_rx_queue_release(dev->data->rx_queues[i]);
718 dev->data->rx_queues[i] = NULL;
720 dev->data->nb_rx_queues = 0;
721 for (i = 0; i < dev->data->nb_tx_queues; i++) {
722 fs_tx_queue_release(dev->data->tx_queues[i]);
723 dev->data->tx_queues[i] = NULL;
725 dev->data->nb_tx_queues = 0;
729 fs_promiscuous_enable(struct rte_eth_dev *dev)
731 struct sub_device *sdev;
735 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
736 rte_eth_promiscuous_enable(PORT_ID(sdev));
741 fs_promiscuous_disable(struct rte_eth_dev *dev)
743 struct sub_device *sdev;
747 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
748 rte_eth_promiscuous_disable(PORT_ID(sdev));
753 fs_allmulticast_enable(struct rte_eth_dev *dev)
755 struct sub_device *sdev;
759 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
760 rte_eth_allmulticast_enable(PORT_ID(sdev));
765 fs_allmulticast_disable(struct rte_eth_dev *dev)
767 struct sub_device *sdev;
771 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
772 rte_eth_allmulticast_disable(PORT_ID(sdev));
777 fs_link_update(struct rte_eth_dev *dev,
778 int wait_to_complete)
780 struct sub_device *sdev;
785 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
786 DEBUG("Calling link_update on sub_device %d", i);
787 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
788 if (ret && ret != -1 && sdev->remove == 0 &&
789 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
790 ERROR("Link update failed for sub_device %d with error %d",
796 if (TX_SUBDEV(dev)) {
797 struct rte_eth_link *l1;
798 struct rte_eth_link *l2;
800 l1 = &dev->data->dev_link;
801 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
802 if (memcmp(l1, l2, sizeof(*l1))) {
813 fs_stats_get(struct rte_eth_dev *dev,
814 struct rte_eth_stats *stats)
816 struct rte_eth_stats backup;
817 struct sub_device *sdev;
822 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
823 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
824 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
825 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
827 rte_memcpy(&backup, snapshot, sizeof(backup));
828 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
830 if (!fs_err(sdev, ret)) {
831 rte_memcpy(snapshot, &backup, sizeof(backup));
834 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
840 *timestamp = rte_rdtsc();
842 failsafe_stats_increment(stats, snapshot);
849 fs_stats_reset(struct rte_eth_dev *dev)
851 struct sub_device *sdev;
855 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
856 rte_eth_stats_reset(PORT_ID(sdev));
857 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
859 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
864 * Fail-safe dev_infos_get rules:
868 * Use the maximum possible values for any field, so as not
869 * to impede any further configuration effort.
871 * Limits capabilities to those that are understood by the
872 * fail-safe PMD. This understanding stems from the fail-safe
873 * being capable of verifying that the related capability is
874 * expressed within the device configuration (struct rte_eth_conf).
876 * At least one probed sub_device:
878 * Uses values from the active probed sub_device
879 * The rationale here is that if any sub_device is less capable
880 * (for example concerning the number of queues) than the active
881 * sub_device, then its subsequent configuration will fail.
882 * It is impossible to foresee this failure when the failing sub_device
883 * is supposed to be plugged-in later on, so the configuration process
884 * is the single point of failure and error reporting.
886 * Uses a logical AND of RX capabilities among
887 * all sub_devices and the default capabilities.
888 * Uses a logical AND of TX capabilities among
889 * the active probed sub_device and the default capabilities.
893 fs_dev_infos_get(struct rte_eth_dev *dev,
894 struct rte_eth_dev_info *infos)
896 struct sub_device *sdev;
899 sdev = TX_SUBDEV(dev);
901 DEBUG("No probed device, using default infos");
902 rte_memcpy(&PRIV(dev)->infos, &default_infos,
903 sizeof(default_infos));
905 uint64_t rx_offload_capa;
906 uint64_t rxq_offload_capa;
907 uint64_t rss_hf_offload_capa;
909 rx_offload_capa = default_infos.rx_offload_capa;
910 rxq_offload_capa = default_infos.rx_queue_offload_capa;
911 rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
912 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
913 rte_eth_dev_info_get(PORT_ID(sdev),
915 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
917 PRIV(dev)->infos.rx_queue_offload_capa;
918 rss_hf_offload_capa &=
919 PRIV(dev)->infos.flow_type_rss_offloads;
921 sdev = TX_SUBDEV(dev);
922 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
923 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
924 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
925 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
926 PRIV(dev)->infos.tx_offload_capa &=
927 default_infos.tx_offload_capa;
928 PRIV(dev)->infos.tx_queue_offload_capa &=
929 default_infos.tx_queue_offload_capa;
931 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
934 static const uint32_t *
935 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
937 struct sub_device *sdev;
938 struct rte_eth_dev *edev;
942 sdev = TX_SUBDEV(dev);
948 /* ENOTSUP: counts as no supported ptypes */
949 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
954 * The API does not permit to do a clean AND of all ptypes,
955 * It is also incomplete by design and we do not really care
956 * to have a best possible value in this context.
957 * We just return the ptypes of the device of highest
958 * priority, usually the PREFERRED device.
960 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
967 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
969 struct sub_device *sdev;
974 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
975 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
976 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
977 if ((ret = fs_err(sdev, ret))) {
978 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
989 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
991 struct sub_device *sdev;
996 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
997 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
998 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
999 if ((ret = fs_err(sdev, ret))) {
1000 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
1001 " with error %d", i, ret);
1011 fs_flow_ctrl_get(struct rte_eth_dev *dev,
1012 struct rte_eth_fc_conf *fc_conf)
1014 struct sub_device *sdev;
1018 sdev = TX_SUBDEV(dev);
1023 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
1027 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
1034 fs_flow_ctrl_set(struct rte_eth_dev *dev,
1035 struct rte_eth_fc_conf *fc_conf)
1037 struct sub_device *sdev;
1042 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1043 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
1044 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
1045 if ((ret = fs_err(sdev, ret))) {
1046 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
1047 " with error %d", i, ret);
1057 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1059 struct sub_device *sdev;
1063 /* No check: already done within the rte_eth_dev_mac_addr_remove
1064 * call for the fail-safe device.
1066 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
1067 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
1068 &dev->data->mac_addrs[index]);
1069 PRIV(dev)->mac_addr_pool[index] = 0;
1074 fs_mac_addr_add(struct rte_eth_dev *dev,
1075 struct ether_addr *mac_addr,
1079 struct sub_device *sdev;
1083 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
1085 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1086 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
1087 if ((ret = fs_err(sdev, ret))) {
1088 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
1089 PRIu8 " with error %d", i, ret);
1094 if (index >= PRIV(dev)->nb_mac_addr) {
1095 DEBUG("Growing mac_addrs array");
1096 PRIV(dev)->nb_mac_addr = index;
1098 PRIV(dev)->mac_addr_pool[index] = vmdq;
1104 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1106 struct sub_device *sdev;
1111 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1112 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1113 ret = fs_err(sdev, ret);
1115 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1127 fs_rss_hash_update(struct rte_eth_dev *dev,
1128 struct rte_eth_rss_conf *rss_conf)
1130 struct sub_device *sdev;
1135 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1136 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
1137 ret = fs_err(sdev, ret);
1139 ERROR("Operation rte_eth_dev_rss_hash_update"
1140 " failed for sub_device %d with error %d",
1152 fs_filter_ctrl(struct rte_eth_dev *dev,
1153 enum rte_filter_type type,
1154 enum rte_filter_op op,
1157 struct sub_device *sdev;
1161 if (type == RTE_ETH_FILTER_GENERIC &&
1162 op == RTE_ETH_FILTER_GET) {
1163 *(const void **)arg = &fs_flow_ops;
1167 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1168 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
1169 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
1170 if ((ret = fs_err(sdev, ret))) {
1171 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
1172 " with error %d", i, ret);
1181 const struct eth_dev_ops failsafe_ops = {
1182 .dev_configure = fs_dev_configure,
1183 .dev_start = fs_dev_start,
1184 .dev_stop = fs_dev_stop,
1185 .dev_set_link_down = fs_dev_set_link_down,
1186 .dev_set_link_up = fs_dev_set_link_up,
1187 .dev_close = fs_dev_close,
1188 .promiscuous_enable = fs_promiscuous_enable,
1189 .promiscuous_disable = fs_promiscuous_disable,
1190 .allmulticast_enable = fs_allmulticast_enable,
1191 .allmulticast_disable = fs_allmulticast_disable,
1192 .link_update = fs_link_update,
1193 .stats_get = fs_stats_get,
1194 .stats_reset = fs_stats_reset,
1195 .dev_infos_get = fs_dev_infos_get,
1196 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1197 .mtu_set = fs_mtu_set,
1198 .vlan_filter_set = fs_vlan_filter_set,
1199 .rx_queue_start = fs_rx_queue_start,
1200 .rx_queue_stop = fs_rx_queue_stop,
1201 .tx_queue_start = fs_tx_queue_start,
1202 .tx_queue_stop = fs_tx_queue_stop,
1203 .rx_queue_setup = fs_rx_queue_setup,
1204 .tx_queue_setup = fs_tx_queue_setup,
1205 .rx_queue_release = fs_rx_queue_release,
1206 .tx_queue_release = fs_tx_queue_release,
1207 .rx_queue_intr_enable = fs_rx_intr_enable,
1208 .rx_queue_intr_disable = fs_rx_intr_disable,
1209 .flow_ctrl_get = fs_flow_ctrl_get,
1210 .flow_ctrl_set = fs_flow_ctrl_set,
1211 .mac_addr_remove = fs_mac_addr_remove,
1212 .mac_addr_add = fs_mac_addr_add,
1213 .mac_addr_set = fs_mac_addr_set,
1214 .rss_hash_update = fs_rss_hash_update,
1215 .filter_ctrl = fs_filter_ctrl,