1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
17 #include "failsafe_private.h"
19 static struct rte_eth_dev_info default_infos = {
20 /* Max possible number of elements */
21 .max_rx_pktlen = UINT32_MAX,
22 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
23 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
25 .max_hash_mac_addrs = UINT32_MAX,
26 .max_vfs = UINT16_MAX,
27 .max_vmdq_pools = UINT16_MAX,
32 .nb_seg_max = UINT16_MAX,
33 .nb_mtu_seg_max = UINT16_MAX,
39 .nb_seg_max = UINT16_MAX,
40 .nb_mtu_seg_max = UINT16_MAX,
43 * Set of capabilities that can be verified upon
44 * configuring a sub-device.
47 DEV_RX_OFFLOAD_VLAN_STRIP |
48 DEV_RX_OFFLOAD_IPV4_CKSUM |
49 DEV_RX_OFFLOAD_UDP_CKSUM |
50 DEV_RX_OFFLOAD_TCP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_LRO |
52 DEV_RX_OFFLOAD_QINQ_STRIP |
53 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
54 DEV_RX_OFFLOAD_MACSEC_STRIP |
55 DEV_RX_OFFLOAD_HEADER_SPLIT |
56 DEV_RX_OFFLOAD_VLAN_FILTER |
57 DEV_RX_OFFLOAD_VLAN_EXTEND |
58 DEV_RX_OFFLOAD_JUMBO_FRAME |
59 DEV_RX_OFFLOAD_CRC_STRIP |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_CRC_STRIP |
77 DEV_RX_OFFLOAD_SCATTER |
78 DEV_RX_OFFLOAD_TIMESTAMP |
79 DEV_RX_OFFLOAD_SECURITY,
81 DEV_TX_OFFLOAD_MULTI_SEGS |
82 DEV_TX_OFFLOAD_IPV4_CKSUM |
83 DEV_TX_OFFLOAD_UDP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_TSO,
86 .flow_type_rss_offloads = 0x0,
90 fs_dev_configure(struct rte_eth_dev *dev)
92 struct sub_device *sdev;
93 uint64_t supp_tx_offloads;
99 supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
100 tx_offloads = dev->data->dev_conf.txmode.offloads;
101 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
103 ERROR("Some Tx offloads are not supported, "
104 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
105 tx_offloads, supp_tx_offloads);
109 FOREACH_SUBDEV(sdev, i, dev) {
110 int rmv_interrupt = 0;
111 int lsc_interrupt = 0;
114 if (sdev->state != DEV_PROBED &&
115 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
118 rmv_interrupt = ETH(sdev)->data->dev_flags &
119 RTE_ETH_DEV_INTR_RMV;
121 DEBUG("Enabling RMV interrupts for sub_device %d", i);
122 dev->data->dev_conf.intr_conf.rmv = 1;
124 DEBUG("sub_device %d does not support RMV event", i);
126 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
127 lsc_interrupt = lsc_enabled &&
128 (ETH(sdev)->data->dev_flags &
129 RTE_ETH_DEV_INTR_LSC);
131 DEBUG("Enabling LSC interrupts for sub_device %d", i);
132 dev->data->dev_conf.intr_conf.lsc = 1;
133 } else if (lsc_enabled && !lsc_interrupt) {
134 DEBUG("Disabling LSC interrupts for sub_device %d", i);
135 dev->data->dev_conf.intr_conf.lsc = 0;
137 DEBUG("Configuring sub-device %d", i);
138 ret = rte_eth_dev_configure(PORT_ID(sdev),
139 dev->data->nb_rx_queues,
140 dev->data->nb_tx_queues,
141 &dev->data->dev_conf);
143 if (!fs_err(sdev, ret))
145 ERROR("Could not configure sub_device %d", i);
150 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
151 RTE_ETH_EVENT_INTR_RMV,
152 failsafe_eth_rmv_event_callback,
155 WARN("Failed to register RMV callback for sub_device %d",
158 dev->data->dev_conf.intr_conf.rmv = 0;
160 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
161 RTE_ETH_EVENT_INTR_LSC,
162 failsafe_eth_lsc_event_callback,
165 WARN("Failed to register LSC callback for sub_device %d",
168 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
169 sdev->state = DEV_ACTIVE;
171 if (PRIV(dev)->state < DEV_ACTIVE)
172 PRIV(dev)->state = DEV_ACTIVE;
178 fs_dev_start(struct rte_eth_dev *dev)
180 struct sub_device *sdev;
185 ret = failsafe_rx_intr_install(dev);
190 FOREACH_SUBDEV(sdev, i, dev) {
191 if (sdev->state != DEV_ACTIVE)
193 DEBUG("Starting sub_device %d", i);
194 ret = rte_eth_dev_start(PORT_ID(sdev));
196 if (!fs_err(sdev, ret))
201 ret = failsafe_rx_intr_install_subdevice(sdev);
203 if (!fs_err(sdev, ret))
205 rte_eth_dev_stop(PORT_ID(sdev));
209 sdev->state = DEV_STARTED;
211 if (PRIV(dev)->state < DEV_STARTED)
212 PRIV(dev)->state = DEV_STARTED;
213 fs_switch_dev(dev, NULL);
219 fs_dev_stop(struct rte_eth_dev *dev)
221 struct sub_device *sdev;
225 PRIV(dev)->state = DEV_STARTED - 1;
226 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
227 rte_eth_dev_stop(PORT_ID(sdev));
228 failsafe_rx_intr_uninstall_subdevice(sdev);
229 sdev->state = DEV_STARTED - 1;
231 failsafe_rx_intr_uninstall(dev);
236 fs_dev_set_link_up(struct rte_eth_dev *dev)
238 struct sub_device *sdev;
243 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
244 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
245 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
246 if ((ret = fs_err(sdev, ret))) {
247 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
248 " with error %d", i, ret);
258 fs_dev_set_link_down(struct rte_eth_dev *dev)
260 struct sub_device *sdev;
265 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
266 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
267 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
268 if ((ret = fs_err(sdev, ret))) {
269 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
270 " with error %d", i, ret);
279 static void fs_dev_free_queues(struct rte_eth_dev *dev);
281 fs_dev_close(struct rte_eth_dev *dev)
283 struct sub_device *sdev;
287 failsafe_hotplug_alarm_cancel(dev);
288 if (PRIV(dev)->state == DEV_STARTED)
289 dev->dev_ops->dev_stop(dev);
290 PRIV(dev)->state = DEV_ACTIVE - 1;
291 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
292 DEBUG("Closing sub_device %d", i);
293 rte_eth_dev_close(PORT_ID(sdev));
294 sdev->state = DEV_ACTIVE - 1;
296 fs_dev_free_queues(dev);
301 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
303 uint64_t port_offloads;
304 uint64_t queue_supp_offloads;
305 uint64_t port_supp_offloads;
307 port_offloads = dev->data->dev_conf.rxmode.offloads;
308 queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
309 port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
310 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
313 /* Verify we have no conflict with port offloads */
314 if ((port_offloads ^ offloads) & port_supp_offloads)
320 fs_rx_queue_release(void *queue)
322 struct rte_eth_dev *dev;
323 struct sub_device *sdev;
330 dev = rxq->priv->dev;
332 if (rxq->event_fd > 0)
333 close(rxq->event_fd);
334 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
335 SUBOPS(sdev, rx_queue_release)
336 (ETH(sdev)->data->rx_queues[rxq->qid]);
337 dev->data->rx_queues[rxq->qid] = NULL;
343 fs_rx_queue_setup(struct rte_eth_dev *dev,
344 uint16_t rx_queue_id,
346 unsigned int socket_id,
347 const struct rte_eth_rxconf *rx_conf,
348 struct rte_mempool *mb_pool)
351 * FIXME: Add a proper interface in rte_eal_interrupts for
352 * allocating eventfd as an interrupt vector.
353 * For the time being, fake as if we are using MSIX interrupts,
354 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
356 struct rte_intr_handle intr_handle = {
357 .type = RTE_INTR_HANDLE_VFIO_MSIX,
360 struct sub_device *sdev;
366 rxq = dev->data->rx_queues[rx_queue_id];
368 fs_rx_queue_release(rxq);
369 dev->data->rx_queues[rx_queue_id] = NULL;
371 /* Verify application offloads are valid for our port and queue. */
372 if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
374 ERROR("Rx queue offloads 0x%" PRIx64
375 " don't match port offloads 0x%" PRIx64
376 " or supported offloads 0x%" PRIx64,
378 dev->data->dev_conf.rxmode.offloads,
379 PRIV(dev)->infos.rx_offload_capa |
380 PRIV(dev)->infos.rx_queue_offload_capa);
384 rxq = rte_zmalloc(NULL,
386 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
387 RTE_CACHE_LINE_SIZE);
392 FOREACH_SUBDEV(sdev, i, dev)
393 rte_atomic64_init(&rxq->refcnt[i]);
394 rxq->qid = rx_queue_id;
395 rxq->socket_id = socket_id;
396 rxq->info.mp = mb_pool;
397 rxq->info.conf = *rx_conf;
398 rxq->info.nb_desc = nb_rx_desc;
399 rxq->priv = PRIV(dev);
400 rxq->sdev = PRIV(dev)->subs;
401 ret = rte_intr_efd_enable(&intr_handle, 1);
406 rxq->event_fd = intr_handle.efds[0];
407 dev->data->rx_queues[rx_queue_id] = rxq;
408 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
409 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
411 nb_rx_desc, socket_id,
413 if ((ret = fs_err(sdev, ret))) {
414 ERROR("RX queue setup failed for sub_device %d", i);
421 fs_rx_queue_release(rxq);
427 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
430 struct sub_device *sdev;
436 if (idx >= dev->data->nb_rx_queues) {
440 rxq = dev->data->rx_queues[idx];
441 if (rxq == NULL || rxq->event_fd <= 0) {
445 /* Fail if proxy service is nor running. */
446 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
447 ERROR("failsafe interrupt services are not running");
451 rxq->enable_events = 1;
452 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
453 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
454 ret = fs_err(sdev, ret);
466 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
469 struct sub_device *sdev;
476 if (idx >= dev->data->nb_rx_queues) {
480 rxq = dev->data->rx_queues[idx];
481 if (rxq == NULL || rxq->event_fd <= 0) {
485 rxq->enable_events = 0;
486 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
487 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
488 ret = fs_err(sdev, ret);
492 /* Clear pending events */
493 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
503 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
505 uint64_t port_offloads;
506 uint64_t queue_supp_offloads;
507 uint64_t port_supp_offloads;
509 port_offloads = dev->data->dev_conf.txmode.offloads;
510 queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
511 port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
512 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
515 /* Verify we have no conflict with port offloads */
516 if ((port_offloads ^ offloads) & port_supp_offloads)
522 fs_tx_queue_release(void *queue)
524 struct rte_eth_dev *dev;
525 struct sub_device *sdev;
532 dev = txq->priv->dev;
534 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
535 SUBOPS(sdev, tx_queue_release)
536 (ETH(sdev)->data->tx_queues[txq->qid]);
537 dev->data->tx_queues[txq->qid] = NULL;
543 fs_tx_queue_setup(struct rte_eth_dev *dev,
544 uint16_t tx_queue_id,
546 unsigned int socket_id,
547 const struct rte_eth_txconf *tx_conf)
549 struct sub_device *sdev;
555 txq = dev->data->tx_queues[tx_queue_id];
557 fs_tx_queue_release(txq);
558 dev->data->tx_queues[tx_queue_id] = NULL;
561 * Don't verify queue offloads for applications which
564 if (tx_conf != NULL &&
565 (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
566 fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
568 ERROR("Tx queue offloads 0x%" PRIx64
569 " don't match port offloads 0x%" PRIx64
570 " or supported offloads 0x%" PRIx64,
572 dev->data->dev_conf.txmode.offloads,
573 PRIV(dev)->infos.tx_offload_capa |
574 PRIV(dev)->infos.tx_queue_offload_capa);
578 txq = rte_zmalloc("ethdev TX queue",
580 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
581 RTE_CACHE_LINE_SIZE);
586 FOREACH_SUBDEV(sdev, i, dev)
587 rte_atomic64_init(&txq->refcnt[i]);
588 txq->qid = tx_queue_id;
589 txq->socket_id = socket_id;
590 txq->info.conf = *tx_conf;
591 txq->info.nb_desc = nb_tx_desc;
592 txq->priv = PRIV(dev);
593 dev->data->tx_queues[tx_queue_id] = txq;
594 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
595 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
597 nb_tx_desc, socket_id,
599 if ((ret = fs_err(sdev, ret))) {
600 ERROR("TX queue setup failed for sub_device %d", i);
607 fs_tx_queue_release(txq);
613 fs_dev_free_queues(struct rte_eth_dev *dev)
617 for (i = 0; i < dev->data->nb_rx_queues; i++) {
618 fs_rx_queue_release(dev->data->rx_queues[i]);
619 dev->data->rx_queues[i] = NULL;
621 dev->data->nb_rx_queues = 0;
622 for (i = 0; i < dev->data->nb_tx_queues; i++) {
623 fs_tx_queue_release(dev->data->tx_queues[i]);
624 dev->data->tx_queues[i] = NULL;
626 dev->data->nb_tx_queues = 0;
630 fs_promiscuous_enable(struct rte_eth_dev *dev)
632 struct sub_device *sdev;
636 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
637 rte_eth_promiscuous_enable(PORT_ID(sdev));
642 fs_promiscuous_disable(struct rte_eth_dev *dev)
644 struct sub_device *sdev;
648 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
649 rte_eth_promiscuous_disable(PORT_ID(sdev));
654 fs_allmulticast_enable(struct rte_eth_dev *dev)
656 struct sub_device *sdev;
660 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
661 rte_eth_allmulticast_enable(PORT_ID(sdev));
666 fs_allmulticast_disable(struct rte_eth_dev *dev)
668 struct sub_device *sdev;
672 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
673 rte_eth_allmulticast_disable(PORT_ID(sdev));
678 fs_link_update(struct rte_eth_dev *dev,
679 int wait_to_complete)
681 struct sub_device *sdev;
686 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
687 DEBUG("Calling link_update on sub_device %d", i);
688 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
689 if (ret && ret != -1 && sdev->remove == 0 &&
690 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
691 ERROR("Link update failed for sub_device %d with error %d",
697 if (TX_SUBDEV(dev)) {
698 struct rte_eth_link *l1;
699 struct rte_eth_link *l2;
701 l1 = &dev->data->dev_link;
702 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
703 if (memcmp(l1, l2, sizeof(*l1))) {
714 fs_stats_get(struct rte_eth_dev *dev,
715 struct rte_eth_stats *stats)
717 struct rte_eth_stats backup;
718 struct sub_device *sdev;
723 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
724 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
725 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
726 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
728 rte_memcpy(&backup, snapshot, sizeof(backup));
729 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
731 if (!fs_err(sdev, ret)) {
732 rte_memcpy(snapshot, &backup, sizeof(backup));
735 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
741 *timestamp = rte_rdtsc();
743 failsafe_stats_increment(stats, snapshot);
750 fs_stats_reset(struct rte_eth_dev *dev)
752 struct sub_device *sdev;
756 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
757 rte_eth_stats_reset(PORT_ID(sdev));
758 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
760 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
765 * Fail-safe dev_infos_get rules:
769 * Use the maximum possible values for any field, so as not
770 * to impede any further configuration effort.
772 * Limits capabilities to those that are understood by the
773 * fail-safe PMD. This understanding stems from the fail-safe
774 * being capable of verifying that the related capability is
775 * expressed within the device configuration (struct rte_eth_conf).
777 * At least one probed sub_device:
779 * Uses values from the active probed sub_device
780 * The rationale here is that if any sub_device is less capable
781 * (for example concerning the number of queues) than the active
782 * sub_device, then its subsequent configuration will fail.
783 * It is impossible to foresee this failure when the failing sub_device
784 * is supposed to be plugged-in later on, so the configuration process
785 * is the single point of failure and error reporting.
787 * Uses a logical AND of RX capabilities among
788 * all sub_devices and the default capabilities.
789 * Uses a logical AND of TX capabilities among
790 * the active probed sub_device and the default capabilities.
794 fs_dev_infos_get(struct rte_eth_dev *dev,
795 struct rte_eth_dev_info *infos)
797 struct sub_device *sdev;
800 sdev = TX_SUBDEV(dev);
802 DEBUG("No probed device, using default infos");
803 rte_memcpy(&PRIV(dev)->infos, &default_infos,
804 sizeof(default_infos));
806 uint64_t rx_offload_capa;
807 uint64_t rxq_offload_capa;
809 rx_offload_capa = default_infos.rx_offload_capa;
810 rxq_offload_capa = default_infos.rx_queue_offload_capa;
811 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
812 rte_eth_dev_info_get(PORT_ID(sdev),
814 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
816 PRIV(dev)->infos.rx_queue_offload_capa;
818 sdev = TX_SUBDEV(dev);
819 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
820 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
821 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
822 PRIV(dev)->infos.tx_offload_capa &=
823 default_infos.tx_offload_capa;
824 PRIV(dev)->infos.tx_queue_offload_capa &=
825 default_infos.tx_queue_offload_capa;
826 PRIV(dev)->infos.flow_type_rss_offloads &=
827 default_infos.flow_type_rss_offloads;
829 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
832 static const uint32_t *
833 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
835 struct sub_device *sdev;
836 struct rte_eth_dev *edev;
840 sdev = TX_SUBDEV(dev);
846 /* ENOTSUP: counts as no supported ptypes */
847 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
852 * The API does not permit to do a clean AND of all ptypes,
853 * It is also incomplete by design and we do not really care
854 * to have a best possible value in this context.
855 * We just return the ptypes of the device of highest
856 * priority, usually the PREFERRED device.
858 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
865 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
867 struct sub_device *sdev;
872 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
873 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
874 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
875 if ((ret = fs_err(sdev, ret))) {
876 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
887 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
889 struct sub_device *sdev;
894 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
895 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
896 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
897 if ((ret = fs_err(sdev, ret))) {
898 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
899 " with error %d", i, ret);
909 fs_flow_ctrl_get(struct rte_eth_dev *dev,
910 struct rte_eth_fc_conf *fc_conf)
912 struct sub_device *sdev;
916 sdev = TX_SUBDEV(dev);
921 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
925 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
932 fs_flow_ctrl_set(struct rte_eth_dev *dev,
933 struct rte_eth_fc_conf *fc_conf)
935 struct sub_device *sdev;
940 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
941 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
942 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
943 if ((ret = fs_err(sdev, ret))) {
944 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
945 " with error %d", i, ret);
955 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
957 struct sub_device *sdev;
961 /* No check: already done within the rte_eth_dev_mac_addr_remove
962 * call for the fail-safe device.
964 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
965 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
966 &dev->data->mac_addrs[index]);
967 PRIV(dev)->mac_addr_pool[index] = 0;
972 fs_mac_addr_add(struct rte_eth_dev *dev,
973 struct ether_addr *mac_addr,
977 struct sub_device *sdev;
981 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
983 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
984 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
985 if ((ret = fs_err(sdev, ret))) {
986 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
987 PRIu8 " with error %d", i, ret);
992 if (index >= PRIV(dev)->nb_mac_addr) {
993 DEBUG("Growing mac_addrs array");
994 PRIV(dev)->nb_mac_addr = index;
996 PRIV(dev)->mac_addr_pool[index] = vmdq;
1002 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1004 struct sub_device *sdev;
1009 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1010 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1011 ret = fs_err(sdev, ret);
1013 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1025 fs_filter_ctrl(struct rte_eth_dev *dev,
1026 enum rte_filter_type type,
1027 enum rte_filter_op op,
1030 struct sub_device *sdev;
1034 if (type == RTE_ETH_FILTER_GENERIC &&
1035 op == RTE_ETH_FILTER_GET) {
1036 *(const void **)arg = &fs_flow_ops;
1040 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1041 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
1042 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
1043 if ((ret = fs_err(sdev, ret))) {
1044 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
1045 " with error %d", i, ret);
1054 const struct eth_dev_ops failsafe_ops = {
1055 .dev_configure = fs_dev_configure,
1056 .dev_start = fs_dev_start,
1057 .dev_stop = fs_dev_stop,
1058 .dev_set_link_down = fs_dev_set_link_down,
1059 .dev_set_link_up = fs_dev_set_link_up,
1060 .dev_close = fs_dev_close,
1061 .promiscuous_enable = fs_promiscuous_enable,
1062 .promiscuous_disable = fs_promiscuous_disable,
1063 .allmulticast_enable = fs_allmulticast_enable,
1064 .allmulticast_disable = fs_allmulticast_disable,
1065 .link_update = fs_link_update,
1066 .stats_get = fs_stats_get,
1067 .stats_reset = fs_stats_reset,
1068 .dev_infos_get = fs_dev_infos_get,
1069 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1070 .mtu_set = fs_mtu_set,
1071 .vlan_filter_set = fs_vlan_filter_set,
1072 .rx_queue_setup = fs_rx_queue_setup,
1073 .tx_queue_setup = fs_tx_queue_setup,
1074 .rx_queue_release = fs_rx_queue_release,
1075 .tx_queue_release = fs_tx_queue_release,
1076 .rx_queue_intr_enable = fs_rx_intr_enable,
1077 .rx_queue_intr_disable = fs_rx_intr_disable,
1078 .flow_ctrl_get = fs_flow_ctrl_get,
1079 .flow_ctrl_set = fs_flow_ctrl_set,
1080 .mac_addr_remove = fs_mac_addr_remove,
1081 .mac_addr_add = fs_mac_addr_add,
1082 .mac_addr_set = fs_mac_addr_set,
1083 .filter_ctrl = fs_filter_ctrl,