1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
18 #include "failsafe_private.h"
20 static struct rte_eth_dev_info default_infos = {
21 /* Max possible number of elements */
22 .max_rx_pktlen = UINT32_MAX,
23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
26 .max_hash_mac_addrs = UINT32_MAX,
27 .max_vfs = UINT16_MAX,
28 .max_vmdq_pools = UINT16_MAX,
33 .nb_seg_max = UINT16_MAX,
34 .nb_mtu_seg_max = UINT16_MAX,
40 .nb_seg_max = UINT16_MAX,
41 .nb_mtu_seg_max = UINT16_MAX,
44 * Set of capabilities that can be verified upon
45 * configuring a sub-device.
48 DEV_RX_OFFLOAD_VLAN_STRIP |
49 DEV_RX_OFFLOAD_IPV4_CKSUM |
50 DEV_RX_OFFLOAD_UDP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_CKSUM |
52 DEV_RX_OFFLOAD_TCP_LRO |
53 DEV_RX_OFFLOAD_QINQ_STRIP |
54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
55 DEV_RX_OFFLOAD_MACSEC_STRIP |
56 DEV_RX_OFFLOAD_HEADER_SPLIT |
57 DEV_RX_OFFLOAD_VLAN_FILTER |
58 DEV_RX_OFFLOAD_VLAN_EXTEND |
59 DEV_RX_OFFLOAD_JUMBO_FRAME |
60 DEV_RX_OFFLOAD_CRC_STRIP |
61 DEV_RX_OFFLOAD_SCATTER |
62 DEV_RX_OFFLOAD_TIMESTAMP |
63 DEV_RX_OFFLOAD_SECURITY,
64 .rx_queue_offload_capa =
65 DEV_RX_OFFLOAD_VLAN_STRIP |
66 DEV_RX_OFFLOAD_IPV4_CKSUM |
67 DEV_RX_OFFLOAD_UDP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_CKSUM |
69 DEV_RX_OFFLOAD_TCP_LRO |
70 DEV_RX_OFFLOAD_QINQ_STRIP |
71 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
72 DEV_RX_OFFLOAD_MACSEC_STRIP |
73 DEV_RX_OFFLOAD_HEADER_SPLIT |
74 DEV_RX_OFFLOAD_VLAN_FILTER |
75 DEV_RX_OFFLOAD_VLAN_EXTEND |
76 DEV_RX_OFFLOAD_JUMBO_FRAME |
77 DEV_RX_OFFLOAD_CRC_STRIP |
78 DEV_RX_OFFLOAD_SCATTER |
79 DEV_RX_OFFLOAD_TIMESTAMP |
80 DEV_RX_OFFLOAD_SECURITY,
82 DEV_TX_OFFLOAD_MULTI_SEGS |
83 DEV_TX_OFFLOAD_IPV4_CKSUM |
84 DEV_TX_OFFLOAD_UDP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_CKSUM |
86 DEV_TX_OFFLOAD_TCP_TSO,
87 .flow_type_rss_offloads =
94 fs_dev_configure(struct rte_eth_dev *dev)
96 struct sub_device *sdev;
101 FOREACH_SUBDEV(sdev, i, dev) {
102 int rmv_interrupt = 0;
103 int lsc_interrupt = 0;
106 if (sdev->state != DEV_PROBED &&
107 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
110 rmv_interrupt = ETH(sdev)->data->dev_flags &
111 RTE_ETH_DEV_INTR_RMV;
113 DEBUG("Enabling RMV interrupts for sub_device %d", i);
114 dev->data->dev_conf.intr_conf.rmv = 1;
116 DEBUG("sub_device %d does not support RMV event", i);
118 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
119 lsc_interrupt = lsc_enabled &&
120 (ETH(sdev)->data->dev_flags &
121 RTE_ETH_DEV_INTR_LSC);
123 DEBUG("Enabling LSC interrupts for sub_device %d", i);
124 dev->data->dev_conf.intr_conf.lsc = 1;
125 } else if (lsc_enabled && !lsc_interrupt) {
126 DEBUG("Disabling LSC interrupts for sub_device %d", i);
127 dev->data->dev_conf.intr_conf.lsc = 0;
129 DEBUG("Configuring sub-device %d", i);
130 ret = rte_eth_dev_configure(PORT_ID(sdev),
131 dev->data->nb_rx_queues,
132 dev->data->nb_tx_queues,
133 &dev->data->dev_conf);
135 if (!fs_err(sdev, ret))
137 ERROR("Could not configure sub_device %d", i);
142 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
143 RTE_ETH_EVENT_INTR_RMV,
144 failsafe_eth_rmv_event_callback,
147 WARN("Failed to register RMV callback for sub_device %d",
150 dev->data->dev_conf.intr_conf.rmv = 0;
152 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
153 RTE_ETH_EVENT_INTR_LSC,
154 failsafe_eth_lsc_event_callback,
157 WARN("Failed to register LSC callback for sub_device %d",
160 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
161 sdev->state = DEV_ACTIVE;
163 if (PRIV(dev)->state < DEV_ACTIVE)
164 PRIV(dev)->state = DEV_ACTIVE;
170 fs_dev_start(struct rte_eth_dev *dev)
172 struct sub_device *sdev;
177 ret = failsafe_rx_intr_install(dev);
182 FOREACH_SUBDEV(sdev, i, dev) {
183 if (sdev->state != DEV_ACTIVE)
185 DEBUG("Starting sub_device %d", i);
186 ret = rte_eth_dev_start(PORT_ID(sdev));
188 if (!fs_err(sdev, ret))
193 ret = failsafe_rx_intr_install_subdevice(sdev);
195 if (!fs_err(sdev, ret))
197 rte_eth_dev_stop(PORT_ID(sdev));
201 sdev->state = DEV_STARTED;
203 if (PRIV(dev)->state < DEV_STARTED)
204 PRIV(dev)->state = DEV_STARTED;
205 fs_switch_dev(dev, NULL);
211 fs_dev_stop(struct rte_eth_dev *dev)
213 struct sub_device *sdev;
217 PRIV(dev)->state = DEV_STARTED - 1;
218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
219 rte_eth_dev_stop(PORT_ID(sdev));
220 failsafe_rx_intr_uninstall_subdevice(sdev);
221 sdev->state = DEV_STARTED - 1;
223 failsafe_rx_intr_uninstall(dev);
228 fs_dev_set_link_up(struct rte_eth_dev *dev)
230 struct sub_device *sdev;
235 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
236 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
237 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
238 if ((ret = fs_err(sdev, ret))) {
239 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
240 " with error %d", i, ret);
250 fs_dev_set_link_down(struct rte_eth_dev *dev)
252 struct sub_device *sdev;
257 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
258 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
259 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
260 if ((ret = fs_err(sdev, ret))) {
261 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
262 " with error %d", i, ret);
271 static void fs_dev_free_queues(struct rte_eth_dev *dev);
273 fs_dev_close(struct rte_eth_dev *dev)
275 struct sub_device *sdev;
279 failsafe_hotplug_alarm_cancel(dev);
280 if (PRIV(dev)->state == DEV_STARTED)
281 dev->dev_ops->dev_stop(dev);
282 PRIV(dev)->state = DEV_ACTIVE - 1;
283 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
284 DEBUG("Closing sub_device %d", i);
285 rte_eth_dev_close(PORT_ID(sdev));
286 sdev->state = DEV_ACTIVE - 1;
288 fs_dev_free_queues(dev);
293 fs_rx_queue_release(void *queue)
295 struct rte_eth_dev *dev;
296 struct sub_device *sdev;
303 dev = rxq->priv->dev;
305 if (rxq->event_fd > 0)
306 close(rxq->event_fd);
307 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
308 SUBOPS(sdev, rx_queue_release)
309 (ETH(sdev)->data->rx_queues[rxq->qid]);
310 dev->data->rx_queues[rxq->qid] = NULL;
316 fs_rx_queue_setup(struct rte_eth_dev *dev,
317 uint16_t rx_queue_id,
319 unsigned int socket_id,
320 const struct rte_eth_rxconf *rx_conf,
321 struct rte_mempool *mb_pool)
324 * FIXME: Add a proper interface in rte_eal_interrupts for
325 * allocating eventfd as an interrupt vector.
326 * For the time being, fake as if we are using MSIX interrupts,
327 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
329 struct rte_intr_handle intr_handle = {
330 .type = RTE_INTR_HANDLE_VFIO_MSIX,
333 struct sub_device *sdev;
339 rxq = dev->data->rx_queues[rx_queue_id];
341 fs_rx_queue_release(rxq);
342 dev->data->rx_queues[rx_queue_id] = NULL;
344 rxq = rte_zmalloc(NULL,
346 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
347 RTE_CACHE_LINE_SIZE);
352 FOREACH_SUBDEV(sdev, i, dev)
353 rte_atomic64_init(&rxq->refcnt[i]);
354 rxq->qid = rx_queue_id;
355 rxq->socket_id = socket_id;
356 rxq->info.mp = mb_pool;
357 rxq->info.conf = *rx_conf;
358 rxq->info.nb_desc = nb_rx_desc;
359 rxq->priv = PRIV(dev);
360 rxq->sdev = PRIV(dev)->subs;
361 ret = rte_intr_efd_enable(&intr_handle, 1);
366 rxq->event_fd = intr_handle.efds[0];
367 dev->data->rx_queues[rx_queue_id] = rxq;
368 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
369 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
371 nb_rx_desc, socket_id,
373 if ((ret = fs_err(sdev, ret))) {
374 ERROR("RX queue setup failed for sub_device %d", i);
381 fs_rx_queue_release(rxq);
387 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
390 struct sub_device *sdev;
396 if (idx >= dev->data->nb_rx_queues) {
400 rxq = dev->data->rx_queues[idx];
401 if (rxq == NULL || rxq->event_fd <= 0) {
405 /* Fail if proxy service is nor running. */
406 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
407 ERROR("failsafe interrupt services are not running");
411 rxq->enable_events = 1;
412 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
413 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
414 ret = fs_err(sdev, ret);
426 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
429 struct sub_device *sdev;
436 if (idx >= dev->data->nb_rx_queues) {
440 rxq = dev->data->rx_queues[idx];
441 if (rxq == NULL || rxq->event_fd <= 0) {
445 rxq->enable_events = 0;
446 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
447 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
448 ret = fs_err(sdev, ret);
452 /* Clear pending events */
453 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
463 fs_tx_queue_release(void *queue)
465 struct rte_eth_dev *dev;
466 struct sub_device *sdev;
473 dev = txq->priv->dev;
475 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
476 SUBOPS(sdev, tx_queue_release)
477 (ETH(sdev)->data->tx_queues[txq->qid]);
478 dev->data->tx_queues[txq->qid] = NULL;
484 fs_tx_queue_setup(struct rte_eth_dev *dev,
485 uint16_t tx_queue_id,
487 unsigned int socket_id,
488 const struct rte_eth_txconf *tx_conf)
490 struct sub_device *sdev;
496 txq = dev->data->tx_queues[tx_queue_id];
498 fs_tx_queue_release(txq);
499 dev->data->tx_queues[tx_queue_id] = NULL;
501 txq = rte_zmalloc("ethdev TX queue",
503 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
504 RTE_CACHE_LINE_SIZE);
509 FOREACH_SUBDEV(sdev, i, dev)
510 rte_atomic64_init(&txq->refcnt[i]);
511 txq->qid = tx_queue_id;
512 txq->socket_id = socket_id;
513 txq->info.conf = *tx_conf;
514 txq->info.nb_desc = nb_tx_desc;
515 txq->priv = PRIV(dev);
516 dev->data->tx_queues[tx_queue_id] = txq;
517 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
518 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
520 nb_tx_desc, socket_id,
522 if ((ret = fs_err(sdev, ret))) {
523 ERROR("TX queue setup failed for sub_device %d", i);
530 fs_tx_queue_release(txq);
536 fs_dev_free_queues(struct rte_eth_dev *dev)
540 for (i = 0; i < dev->data->nb_rx_queues; i++) {
541 fs_rx_queue_release(dev->data->rx_queues[i]);
542 dev->data->rx_queues[i] = NULL;
544 dev->data->nb_rx_queues = 0;
545 for (i = 0; i < dev->data->nb_tx_queues; i++) {
546 fs_tx_queue_release(dev->data->tx_queues[i]);
547 dev->data->tx_queues[i] = NULL;
549 dev->data->nb_tx_queues = 0;
553 fs_promiscuous_enable(struct rte_eth_dev *dev)
555 struct sub_device *sdev;
559 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
560 rte_eth_promiscuous_enable(PORT_ID(sdev));
565 fs_promiscuous_disable(struct rte_eth_dev *dev)
567 struct sub_device *sdev;
571 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
572 rte_eth_promiscuous_disable(PORT_ID(sdev));
577 fs_allmulticast_enable(struct rte_eth_dev *dev)
579 struct sub_device *sdev;
583 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
584 rte_eth_allmulticast_enable(PORT_ID(sdev));
589 fs_allmulticast_disable(struct rte_eth_dev *dev)
591 struct sub_device *sdev;
595 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
596 rte_eth_allmulticast_disable(PORT_ID(sdev));
601 fs_link_update(struct rte_eth_dev *dev,
602 int wait_to_complete)
604 struct sub_device *sdev;
609 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
610 DEBUG("Calling link_update on sub_device %d", i);
611 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
612 if (ret && ret != -1 && sdev->remove == 0 &&
613 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
614 ERROR("Link update failed for sub_device %d with error %d",
620 if (TX_SUBDEV(dev)) {
621 struct rte_eth_link *l1;
622 struct rte_eth_link *l2;
624 l1 = &dev->data->dev_link;
625 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
626 if (memcmp(l1, l2, sizeof(*l1))) {
637 fs_stats_get(struct rte_eth_dev *dev,
638 struct rte_eth_stats *stats)
640 struct rte_eth_stats backup;
641 struct sub_device *sdev;
646 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
647 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
648 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
649 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
651 rte_memcpy(&backup, snapshot, sizeof(backup));
652 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
654 if (!fs_err(sdev, ret)) {
655 rte_memcpy(snapshot, &backup, sizeof(backup));
658 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
664 *timestamp = rte_rdtsc();
666 failsafe_stats_increment(stats, snapshot);
673 fs_stats_reset(struct rte_eth_dev *dev)
675 struct sub_device *sdev;
679 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
680 rte_eth_stats_reset(PORT_ID(sdev));
681 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
683 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
688 * Fail-safe dev_infos_get rules:
692 * Use the maximum possible values for any field, so as not
693 * to impede any further configuration effort.
695 * Limits capabilities to those that are understood by the
696 * fail-safe PMD. This understanding stems from the fail-safe
697 * being capable of verifying that the related capability is
698 * expressed within the device configuration (struct rte_eth_conf).
700 * At least one probed sub_device:
702 * Uses values from the active probed sub_device
703 * The rationale here is that if any sub_device is less capable
704 * (for example concerning the number of queues) than the active
705 * sub_device, then its subsequent configuration will fail.
706 * It is impossible to foresee this failure when the failing sub_device
707 * is supposed to be plugged-in later on, so the configuration process
708 * is the single point of failure and error reporting.
710 * Uses a logical AND of RX capabilities among
711 * all sub_devices and the default capabilities.
712 * Uses a logical AND of TX capabilities among
713 * the active probed sub_device and the default capabilities.
717 fs_dev_infos_get(struct rte_eth_dev *dev,
718 struct rte_eth_dev_info *infos)
720 struct sub_device *sdev;
723 sdev = TX_SUBDEV(dev);
725 DEBUG("No probed device, using default infos");
726 rte_memcpy(&PRIV(dev)->infos, &default_infos,
727 sizeof(default_infos));
729 uint64_t rx_offload_capa;
730 uint64_t rxq_offload_capa;
731 uint64_t rss_hf_offload_capa;
733 rx_offload_capa = default_infos.rx_offload_capa;
734 rxq_offload_capa = default_infos.rx_queue_offload_capa;
735 rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
736 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
737 rte_eth_dev_info_get(PORT_ID(sdev),
739 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
741 PRIV(dev)->infos.rx_queue_offload_capa;
742 rss_hf_offload_capa &=
743 PRIV(dev)->infos.flow_type_rss_offloads;
745 sdev = TX_SUBDEV(dev);
746 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
747 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
748 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
749 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
750 PRIV(dev)->infos.tx_offload_capa &=
751 default_infos.tx_offload_capa;
752 PRIV(dev)->infos.tx_queue_offload_capa &=
753 default_infos.tx_queue_offload_capa;
755 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
758 static const uint32_t *
759 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
761 struct sub_device *sdev;
762 struct rte_eth_dev *edev;
766 sdev = TX_SUBDEV(dev);
772 /* ENOTSUP: counts as no supported ptypes */
773 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
778 * The API does not permit to do a clean AND of all ptypes,
779 * It is also incomplete by design and we do not really care
780 * to have a best possible value in this context.
781 * We just return the ptypes of the device of highest
782 * priority, usually the PREFERRED device.
784 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
791 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
793 struct sub_device *sdev;
798 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
799 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
800 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
801 if ((ret = fs_err(sdev, ret))) {
802 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
813 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
815 struct sub_device *sdev;
820 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
821 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
822 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
823 if ((ret = fs_err(sdev, ret))) {
824 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
825 " with error %d", i, ret);
835 fs_flow_ctrl_get(struct rte_eth_dev *dev,
836 struct rte_eth_fc_conf *fc_conf)
838 struct sub_device *sdev;
842 sdev = TX_SUBDEV(dev);
847 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
851 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
858 fs_flow_ctrl_set(struct rte_eth_dev *dev,
859 struct rte_eth_fc_conf *fc_conf)
861 struct sub_device *sdev;
866 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
867 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
868 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
869 if ((ret = fs_err(sdev, ret))) {
870 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
871 " with error %d", i, ret);
881 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
883 struct sub_device *sdev;
887 /* No check: already done within the rte_eth_dev_mac_addr_remove
888 * call for the fail-safe device.
890 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
891 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
892 &dev->data->mac_addrs[index]);
893 PRIV(dev)->mac_addr_pool[index] = 0;
898 fs_mac_addr_add(struct rte_eth_dev *dev,
899 struct ether_addr *mac_addr,
903 struct sub_device *sdev;
907 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
909 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
910 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
911 if ((ret = fs_err(sdev, ret))) {
912 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
913 PRIu8 " with error %d", i, ret);
918 if (index >= PRIV(dev)->nb_mac_addr) {
919 DEBUG("Growing mac_addrs array");
920 PRIV(dev)->nb_mac_addr = index;
922 PRIV(dev)->mac_addr_pool[index] = vmdq;
928 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
930 struct sub_device *sdev;
935 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
936 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
937 ret = fs_err(sdev, ret);
939 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
951 fs_rss_hash_update(struct rte_eth_dev *dev,
952 struct rte_eth_rss_conf *rss_conf)
954 struct sub_device *sdev;
959 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
960 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
961 ret = fs_err(sdev, ret);
963 ERROR("Operation rte_eth_dev_rss_hash_update"
964 " failed for sub_device %d with error %d",
976 fs_filter_ctrl(struct rte_eth_dev *dev,
977 enum rte_filter_type type,
978 enum rte_filter_op op,
981 struct sub_device *sdev;
985 if (type == RTE_ETH_FILTER_GENERIC &&
986 op == RTE_ETH_FILTER_GET) {
987 *(const void **)arg = &fs_flow_ops;
991 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
992 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
993 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
994 if ((ret = fs_err(sdev, ret))) {
995 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
996 " with error %d", i, ret);
1005 const struct eth_dev_ops failsafe_ops = {
1006 .dev_configure = fs_dev_configure,
1007 .dev_start = fs_dev_start,
1008 .dev_stop = fs_dev_stop,
1009 .dev_set_link_down = fs_dev_set_link_down,
1010 .dev_set_link_up = fs_dev_set_link_up,
1011 .dev_close = fs_dev_close,
1012 .promiscuous_enable = fs_promiscuous_enable,
1013 .promiscuous_disable = fs_promiscuous_disable,
1014 .allmulticast_enable = fs_allmulticast_enable,
1015 .allmulticast_disable = fs_allmulticast_disable,
1016 .link_update = fs_link_update,
1017 .stats_get = fs_stats_get,
1018 .stats_reset = fs_stats_reset,
1019 .dev_infos_get = fs_dev_infos_get,
1020 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1021 .mtu_set = fs_mtu_set,
1022 .vlan_filter_set = fs_vlan_filter_set,
1023 .rx_queue_setup = fs_rx_queue_setup,
1024 .tx_queue_setup = fs_tx_queue_setup,
1025 .rx_queue_release = fs_rx_queue_release,
1026 .tx_queue_release = fs_tx_queue_release,
1027 .rx_queue_intr_enable = fs_rx_intr_enable,
1028 .rx_queue_intr_disable = fs_rx_intr_disable,
1029 .flow_ctrl_get = fs_flow_ctrl_get,
1030 .flow_ctrl_set = fs_flow_ctrl_set,
1031 .mac_addr_remove = fs_mac_addr_remove,
1032 .mac_addr_add = fs_mac_addr_add,
1033 .mac_addr_set = fs_mac_addr_set,
1034 .rss_hash_update = fs_rss_hash_update,
1035 .filter_ctrl = fs_filter_ctrl,