1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
17 #include "failsafe_private.h"
19 static struct rte_eth_dev_info default_infos = {
20 /* Max possible number of elements */
21 .max_rx_pktlen = UINT32_MAX,
22 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
23 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
25 .max_hash_mac_addrs = UINT32_MAX,
26 .max_vfs = UINT16_MAX,
27 .max_vmdq_pools = UINT16_MAX,
32 .nb_seg_max = UINT16_MAX,
33 .nb_mtu_seg_max = UINT16_MAX,
39 .nb_seg_max = UINT16_MAX,
40 .nb_mtu_seg_max = UINT16_MAX,
43 * Set of capabilities that can be verified upon
44 * configuring a sub-device.
47 DEV_RX_OFFLOAD_VLAN_STRIP |
48 DEV_RX_OFFLOAD_IPV4_CKSUM |
49 DEV_RX_OFFLOAD_UDP_CKSUM |
50 DEV_RX_OFFLOAD_TCP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_LRO |
52 DEV_RX_OFFLOAD_QINQ_STRIP |
53 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
54 DEV_RX_OFFLOAD_MACSEC_STRIP |
55 DEV_RX_OFFLOAD_HEADER_SPLIT |
56 DEV_RX_OFFLOAD_VLAN_FILTER |
57 DEV_RX_OFFLOAD_VLAN_EXTEND |
58 DEV_RX_OFFLOAD_JUMBO_FRAME |
59 DEV_RX_OFFLOAD_CRC_STRIP |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_CRC_STRIP |
77 DEV_RX_OFFLOAD_SCATTER |
78 DEV_RX_OFFLOAD_TIMESTAMP |
79 DEV_RX_OFFLOAD_SECURITY,
81 DEV_TX_OFFLOAD_MULTI_SEGS |
82 DEV_TX_OFFLOAD_IPV4_CKSUM |
83 DEV_TX_OFFLOAD_UDP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_TSO,
86 .flow_type_rss_offloads =
93 fs_dev_configure(struct rte_eth_dev *dev)
95 struct sub_device *sdev;
100 FOREACH_SUBDEV(sdev, i, dev) {
101 int rmv_interrupt = 0;
102 int lsc_interrupt = 0;
105 if (sdev->state != DEV_PROBED &&
106 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
109 rmv_interrupt = ETH(sdev)->data->dev_flags &
110 RTE_ETH_DEV_INTR_RMV;
112 DEBUG("Enabling RMV interrupts for sub_device %d", i);
113 dev->data->dev_conf.intr_conf.rmv = 1;
115 DEBUG("sub_device %d does not support RMV event", i);
117 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
118 lsc_interrupt = lsc_enabled &&
119 (ETH(sdev)->data->dev_flags &
120 RTE_ETH_DEV_INTR_LSC);
122 DEBUG("Enabling LSC interrupts for sub_device %d", i);
123 dev->data->dev_conf.intr_conf.lsc = 1;
124 } else if (lsc_enabled && !lsc_interrupt) {
125 DEBUG("Disabling LSC interrupts for sub_device %d", i);
126 dev->data->dev_conf.intr_conf.lsc = 0;
128 DEBUG("Configuring sub-device %d", i);
129 ret = rte_eth_dev_configure(PORT_ID(sdev),
130 dev->data->nb_rx_queues,
131 dev->data->nb_tx_queues,
132 &dev->data->dev_conf);
134 if (!fs_err(sdev, ret))
136 ERROR("Could not configure sub_device %d", i);
141 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
142 RTE_ETH_EVENT_INTR_RMV,
143 failsafe_eth_rmv_event_callback,
146 WARN("Failed to register RMV callback for sub_device %d",
149 dev->data->dev_conf.intr_conf.rmv = 0;
151 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
152 RTE_ETH_EVENT_INTR_LSC,
153 failsafe_eth_lsc_event_callback,
156 WARN("Failed to register LSC callback for sub_device %d",
159 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
160 sdev->state = DEV_ACTIVE;
162 if (PRIV(dev)->state < DEV_ACTIVE)
163 PRIV(dev)->state = DEV_ACTIVE;
169 fs_dev_start(struct rte_eth_dev *dev)
171 struct sub_device *sdev;
176 ret = failsafe_rx_intr_install(dev);
181 FOREACH_SUBDEV(sdev, i, dev) {
182 if (sdev->state != DEV_ACTIVE)
184 DEBUG("Starting sub_device %d", i);
185 ret = rte_eth_dev_start(PORT_ID(sdev));
187 if (!fs_err(sdev, ret))
192 ret = failsafe_rx_intr_install_subdevice(sdev);
194 if (!fs_err(sdev, ret))
196 rte_eth_dev_stop(PORT_ID(sdev));
200 sdev->state = DEV_STARTED;
202 if (PRIV(dev)->state < DEV_STARTED)
203 PRIV(dev)->state = DEV_STARTED;
204 fs_switch_dev(dev, NULL);
210 fs_dev_stop(struct rte_eth_dev *dev)
212 struct sub_device *sdev;
216 PRIV(dev)->state = DEV_STARTED - 1;
217 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
218 rte_eth_dev_stop(PORT_ID(sdev));
219 failsafe_rx_intr_uninstall_subdevice(sdev);
220 sdev->state = DEV_STARTED - 1;
222 failsafe_rx_intr_uninstall(dev);
227 fs_dev_set_link_up(struct rte_eth_dev *dev)
229 struct sub_device *sdev;
234 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
235 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
236 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
237 if ((ret = fs_err(sdev, ret))) {
238 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
239 " with error %d", i, ret);
249 fs_dev_set_link_down(struct rte_eth_dev *dev)
251 struct sub_device *sdev;
256 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
257 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
258 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
259 if ((ret = fs_err(sdev, ret))) {
260 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
261 " with error %d", i, ret);
270 static void fs_dev_free_queues(struct rte_eth_dev *dev);
272 fs_dev_close(struct rte_eth_dev *dev)
274 struct sub_device *sdev;
278 failsafe_hotplug_alarm_cancel(dev);
279 if (PRIV(dev)->state == DEV_STARTED)
280 dev->dev_ops->dev_stop(dev);
281 PRIV(dev)->state = DEV_ACTIVE - 1;
282 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
283 DEBUG("Closing sub_device %d", i);
284 rte_eth_dev_close(PORT_ID(sdev));
285 sdev->state = DEV_ACTIVE - 1;
287 fs_dev_free_queues(dev);
292 fs_rx_queue_release(void *queue)
294 struct rte_eth_dev *dev;
295 struct sub_device *sdev;
302 dev = rxq->priv->dev;
304 if (rxq->event_fd > 0)
305 close(rxq->event_fd);
306 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
307 SUBOPS(sdev, rx_queue_release)
308 (ETH(sdev)->data->rx_queues[rxq->qid]);
309 dev->data->rx_queues[rxq->qid] = NULL;
315 fs_rx_queue_setup(struct rte_eth_dev *dev,
316 uint16_t rx_queue_id,
318 unsigned int socket_id,
319 const struct rte_eth_rxconf *rx_conf,
320 struct rte_mempool *mb_pool)
323 * FIXME: Add a proper interface in rte_eal_interrupts for
324 * allocating eventfd as an interrupt vector.
325 * For the time being, fake as if we are using MSIX interrupts,
326 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
328 struct rte_intr_handle intr_handle = {
329 .type = RTE_INTR_HANDLE_VFIO_MSIX,
332 struct sub_device *sdev;
338 rxq = dev->data->rx_queues[rx_queue_id];
340 fs_rx_queue_release(rxq);
341 dev->data->rx_queues[rx_queue_id] = NULL;
343 rxq = rte_zmalloc(NULL,
345 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
346 RTE_CACHE_LINE_SIZE);
351 FOREACH_SUBDEV(sdev, i, dev)
352 rte_atomic64_init(&rxq->refcnt[i]);
353 rxq->qid = rx_queue_id;
354 rxq->socket_id = socket_id;
355 rxq->info.mp = mb_pool;
356 rxq->info.conf = *rx_conf;
357 rxq->info.nb_desc = nb_rx_desc;
358 rxq->priv = PRIV(dev);
359 rxq->sdev = PRIV(dev)->subs;
360 ret = rte_intr_efd_enable(&intr_handle, 1);
365 rxq->event_fd = intr_handle.efds[0];
366 dev->data->rx_queues[rx_queue_id] = rxq;
367 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
368 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
370 nb_rx_desc, socket_id,
372 if ((ret = fs_err(sdev, ret))) {
373 ERROR("RX queue setup failed for sub_device %d", i);
380 fs_rx_queue_release(rxq);
386 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
389 struct sub_device *sdev;
395 if (idx >= dev->data->nb_rx_queues) {
399 rxq = dev->data->rx_queues[idx];
400 if (rxq == NULL || rxq->event_fd <= 0) {
404 /* Fail if proxy service is nor running. */
405 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
406 ERROR("failsafe interrupt services are not running");
410 rxq->enable_events = 1;
411 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
412 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
413 ret = fs_err(sdev, ret);
425 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
428 struct sub_device *sdev;
435 if (idx >= dev->data->nb_rx_queues) {
439 rxq = dev->data->rx_queues[idx];
440 if (rxq == NULL || rxq->event_fd <= 0) {
444 rxq->enable_events = 0;
445 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
446 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
447 ret = fs_err(sdev, ret);
451 /* Clear pending events */
452 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
462 fs_tx_queue_release(void *queue)
464 struct rte_eth_dev *dev;
465 struct sub_device *sdev;
472 dev = txq->priv->dev;
474 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
475 SUBOPS(sdev, tx_queue_release)
476 (ETH(sdev)->data->tx_queues[txq->qid]);
477 dev->data->tx_queues[txq->qid] = NULL;
483 fs_tx_queue_setup(struct rte_eth_dev *dev,
484 uint16_t tx_queue_id,
486 unsigned int socket_id,
487 const struct rte_eth_txconf *tx_conf)
489 struct sub_device *sdev;
495 txq = dev->data->tx_queues[tx_queue_id];
497 fs_tx_queue_release(txq);
498 dev->data->tx_queues[tx_queue_id] = NULL;
500 txq = rte_zmalloc("ethdev TX queue",
502 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
503 RTE_CACHE_LINE_SIZE);
508 FOREACH_SUBDEV(sdev, i, dev)
509 rte_atomic64_init(&txq->refcnt[i]);
510 txq->qid = tx_queue_id;
511 txq->socket_id = socket_id;
512 txq->info.conf = *tx_conf;
513 txq->info.nb_desc = nb_tx_desc;
514 txq->priv = PRIV(dev);
515 dev->data->tx_queues[tx_queue_id] = txq;
516 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
517 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
519 nb_tx_desc, socket_id,
521 if ((ret = fs_err(sdev, ret))) {
522 ERROR("TX queue setup failed for sub_device %d", i);
529 fs_tx_queue_release(txq);
535 fs_dev_free_queues(struct rte_eth_dev *dev)
539 for (i = 0; i < dev->data->nb_rx_queues; i++) {
540 fs_rx_queue_release(dev->data->rx_queues[i]);
541 dev->data->rx_queues[i] = NULL;
543 dev->data->nb_rx_queues = 0;
544 for (i = 0; i < dev->data->nb_tx_queues; i++) {
545 fs_tx_queue_release(dev->data->tx_queues[i]);
546 dev->data->tx_queues[i] = NULL;
548 dev->data->nb_tx_queues = 0;
552 fs_promiscuous_enable(struct rte_eth_dev *dev)
554 struct sub_device *sdev;
558 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
559 rte_eth_promiscuous_enable(PORT_ID(sdev));
564 fs_promiscuous_disable(struct rte_eth_dev *dev)
566 struct sub_device *sdev;
570 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
571 rte_eth_promiscuous_disable(PORT_ID(sdev));
576 fs_allmulticast_enable(struct rte_eth_dev *dev)
578 struct sub_device *sdev;
582 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
583 rte_eth_allmulticast_enable(PORT_ID(sdev));
588 fs_allmulticast_disable(struct rte_eth_dev *dev)
590 struct sub_device *sdev;
594 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
595 rte_eth_allmulticast_disable(PORT_ID(sdev));
600 fs_link_update(struct rte_eth_dev *dev,
601 int wait_to_complete)
603 struct sub_device *sdev;
608 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
609 DEBUG("Calling link_update on sub_device %d", i);
610 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
611 if (ret && ret != -1 && sdev->remove == 0 &&
612 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
613 ERROR("Link update failed for sub_device %d with error %d",
619 if (TX_SUBDEV(dev)) {
620 struct rte_eth_link *l1;
621 struct rte_eth_link *l2;
623 l1 = &dev->data->dev_link;
624 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
625 if (memcmp(l1, l2, sizeof(*l1))) {
636 fs_stats_get(struct rte_eth_dev *dev,
637 struct rte_eth_stats *stats)
639 struct rte_eth_stats backup;
640 struct sub_device *sdev;
645 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
646 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
647 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
648 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
650 rte_memcpy(&backup, snapshot, sizeof(backup));
651 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
653 if (!fs_err(sdev, ret)) {
654 rte_memcpy(snapshot, &backup, sizeof(backup));
657 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
663 *timestamp = rte_rdtsc();
665 failsafe_stats_increment(stats, snapshot);
672 fs_stats_reset(struct rte_eth_dev *dev)
674 struct sub_device *sdev;
678 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
679 rte_eth_stats_reset(PORT_ID(sdev));
680 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
682 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
687 * Fail-safe dev_infos_get rules:
691 * Use the maximum possible values for any field, so as not
692 * to impede any further configuration effort.
694 * Limits capabilities to those that are understood by the
695 * fail-safe PMD. This understanding stems from the fail-safe
696 * being capable of verifying that the related capability is
697 * expressed within the device configuration (struct rte_eth_conf).
699 * At least one probed sub_device:
701 * Uses values from the active probed sub_device
702 * The rationale here is that if any sub_device is less capable
703 * (for example concerning the number of queues) than the active
704 * sub_device, then its subsequent configuration will fail.
705 * It is impossible to foresee this failure when the failing sub_device
706 * is supposed to be plugged-in later on, so the configuration process
707 * is the single point of failure and error reporting.
709 * Uses a logical AND of RX capabilities among
710 * all sub_devices and the default capabilities.
711 * Uses a logical AND of TX capabilities among
712 * the active probed sub_device and the default capabilities.
716 fs_dev_infos_get(struct rte_eth_dev *dev,
717 struct rte_eth_dev_info *infos)
719 struct sub_device *sdev;
722 sdev = TX_SUBDEV(dev);
724 DEBUG("No probed device, using default infos");
725 rte_memcpy(&PRIV(dev)->infos, &default_infos,
726 sizeof(default_infos));
728 uint64_t rx_offload_capa;
729 uint64_t rxq_offload_capa;
730 uint64_t rss_hf_offload_capa;
732 rx_offload_capa = default_infos.rx_offload_capa;
733 rxq_offload_capa = default_infos.rx_queue_offload_capa;
734 rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
735 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
736 rte_eth_dev_info_get(PORT_ID(sdev),
738 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
740 PRIV(dev)->infos.rx_queue_offload_capa;
741 rss_hf_offload_capa &=
742 PRIV(dev)->infos.flow_type_rss_offloads;
744 sdev = TX_SUBDEV(dev);
745 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
746 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
747 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
748 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
749 PRIV(dev)->infos.tx_offload_capa &=
750 default_infos.tx_offload_capa;
751 PRIV(dev)->infos.tx_queue_offload_capa &=
752 default_infos.tx_queue_offload_capa;
754 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
757 static const uint32_t *
758 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
760 struct sub_device *sdev;
761 struct rte_eth_dev *edev;
765 sdev = TX_SUBDEV(dev);
771 /* ENOTSUP: counts as no supported ptypes */
772 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
777 * The API does not permit to do a clean AND of all ptypes,
778 * It is also incomplete by design and we do not really care
779 * to have a best possible value in this context.
780 * We just return the ptypes of the device of highest
781 * priority, usually the PREFERRED device.
783 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
790 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
792 struct sub_device *sdev;
797 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
798 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
799 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
800 if ((ret = fs_err(sdev, ret))) {
801 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
812 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
814 struct sub_device *sdev;
819 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
820 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
821 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
822 if ((ret = fs_err(sdev, ret))) {
823 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
824 " with error %d", i, ret);
834 fs_flow_ctrl_get(struct rte_eth_dev *dev,
835 struct rte_eth_fc_conf *fc_conf)
837 struct sub_device *sdev;
841 sdev = TX_SUBDEV(dev);
846 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
850 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
857 fs_flow_ctrl_set(struct rte_eth_dev *dev,
858 struct rte_eth_fc_conf *fc_conf)
860 struct sub_device *sdev;
865 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
866 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
867 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
868 if ((ret = fs_err(sdev, ret))) {
869 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
870 " with error %d", i, ret);
880 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
882 struct sub_device *sdev;
886 /* No check: already done within the rte_eth_dev_mac_addr_remove
887 * call for the fail-safe device.
889 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
890 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
891 &dev->data->mac_addrs[index]);
892 PRIV(dev)->mac_addr_pool[index] = 0;
897 fs_mac_addr_add(struct rte_eth_dev *dev,
898 struct ether_addr *mac_addr,
902 struct sub_device *sdev;
906 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
908 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
909 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
910 if ((ret = fs_err(sdev, ret))) {
911 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
912 PRIu8 " with error %d", i, ret);
917 if (index >= PRIV(dev)->nb_mac_addr) {
918 DEBUG("Growing mac_addrs array");
919 PRIV(dev)->nb_mac_addr = index;
921 PRIV(dev)->mac_addr_pool[index] = vmdq;
927 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
929 struct sub_device *sdev;
934 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
935 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
936 ret = fs_err(sdev, ret);
938 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
950 fs_filter_ctrl(struct rte_eth_dev *dev,
951 enum rte_filter_type type,
952 enum rte_filter_op op,
955 struct sub_device *sdev;
959 if (type == RTE_ETH_FILTER_GENERIC &&
960 op == RTE_ETH_FILTER_GET) {
961 *(const void **)arg = &fs_flow_ops;
965 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
966 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
967 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
968 if ((ret = fs_err(sdev, ret))) {
969 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
970 " with error %d", i, ret);
979 const struct eth_dev_ops failsafe_ops = {
980 .dev_configure = fs_dev_configure,
981 .dev_start = fs_dev_start,
982 .dev_stop = fs_dev_stop,
983 .dev_set_link_down = fs_dev_set_link_down,
984 .dev_set_link_up = fs_dev_set_link_up,
985 .dev_close = fs_dev_close,
986 .promiscuous_enable = fs_promiscuous_enable,
987 .promiscuous_disable = fs_promiscuous_disable,
988 .allmulticast_enable = fs_allmulticast_enable,
989 .allmulticast_disable = fs_allmulticast_disable,
990 .link_update = fs_link_update,
991 .stats_get = fs_stats_get,
992 .stats_reset = fs_stats_reset,
993 .dev_infos_get = fs_dev_infos_get,
994 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
995 .mtu_set = fs_mtu_set,
996 .vlan_filter_set = fs_vlan_filter_set,
997 .rx_queue_setup = fs_rx_queue_setup,
998 .tx_queue_setup = fs_tx_queue_setup,
999 .rx_queue_release = fs_rx_queue_release,
1000 .tx_queue_release = fs_tx_queue_release,
1001 .rx_queue_intr_enable = fs_rx_intr_enable,
1002 .rx_queue_intr_disable = fs_rx_intr_disable,
1003 .flow_ctrl_get = fs_flow_ctrl_get,
1004 .flow_ctrl_set = fs_flow_ctrl_set,
1005 .mac_addr_remove = fs_mac_addr_remove,
1006 .mac_addr_add = fs_mac_addr_add,
1007 .mac_addr_set = fs_mac_addr_set,
1008 .filter_ctrl = fs_filter_ctrl,