4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_debug.h>
38 #include <rte_atomic.h>
39 #include <rte_ethdev_driver.h>
40 #include <rte_malloc.h>
42 #include <rte_cycles.h>
44 #include "failsafe_private.h"
46 static struct rte_eth_dev_info default_infos = {
47 /* Max possible number of elements */
48 .max_rx_pktlen = UINT32_MAX,
49 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
50 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
51 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
52 .max_hash_mac_addrs = UINT32_MAX,
53 .max_vfs = UINT16_MAX,
54 .max_vmdq_pools = UINT16_MAX,
59 .nb_seg_max = UINT16_MAX,
60 .nb_mtu_seg_max = UINT16_MAX,
66 .nb_seg_max = UINT16_MAX,
67 .nb_mtu_seg_max = UINT16_MAX,
70 * Set of capabilities that can be verified upon
71 * configuring a sub-device.
74 DEV_RX_OFFLOAD_VLAN_STRIP |
75 DEV_RX_OFFLOAD_IPV4_CKSUM |
76 DEV_RX_OFFLOAD_UDP_CKSUM |
77 DEV_RX_OFFLOAD_TCP_CKSUM |
78 DEV_RX_OFFLOAD_TCP_LRO |
79 DEV_RX_OFFLOAD_QINQ_STRIP |
80 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
81 DEV_RX_OFFLOAD_MACSEC_STRIP |
82 DEV_RX_OFFLOAD_HEADER_SPLIT |
83 DEV_RX_OFFLOAD_VLAN_FILTER |
84 DEV_RX_OFFLOAD_VLAN_EXTEND |
85 DEV_RX_OFFLOAD_JUMBO_FRAME |
86 DEV_RX_OFFLOAD_CRC_STRIP |
87 DEV_RX_OFFLOAD_SCATTER |
88 DEV_RX_OFFLOAD_TIMESTAMP |
89 DEV_RX_OFFLOAD_SECURITY,
90 .rx_queue_offload_capa =
91 DEV_RX_OFFLOAD_VLAN_STRIP |
92 DEV_RX_OFFLOAD_IPV4_CKSUM |
93 DEV_RX_OFFLOAD_UDP_CKSUM |
94 DEV_RX_OFFLOAD_TCP_CKSUM |
95 DEV_RX_OFFLOAD_TCP_LRO |
96 DEV_RX_OFFLOAD_QINQ_STRIP |
97 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
98 DEV_RX_OFFLOAD_MACSEC_STRIP |
99 DEV_RX_OFFLOAD_HEADER_SPLIT |
100 DEV_RX_OFFLOAD_VLAN_FILTER |
101 DEV_RX_OFFLOAD_VLAN_EXTEND |
102 DEV_RX_OFFLOAD_JUMBO_FRAME |
103 DEV_RX_OFFLOAD_CRC_STRIP |
104 DEV_RX_OFFLOAD_SCATTER |
105 DEV_RX_OFFLOAD_TIMESTAMP |
106 DEV_RX_OFFLOAD_SECURITY,
107 .tx_offload_capa = 0x0,
108 .flow_type_rss_offloads = 0x0,
112 fs_dev_configure(struct rte_eth_dev *dev)
114 struct sub_device *sdev;
115 uint64_t supp_tx_offloads;
116 uint64_t tx_offloads;
120 supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
121 tx_offloads = dev->data->dev_conf.txmode.offloads;
122 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
124 ERROR("Some Tx offloads are not supported, "
125 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
126 tx_offloads, supp_tx_offloads);
129 FOREACH_SUBDEV(sdev, i, dev) {
130 int rmv_interrupt = 0;
131 int lsc_interrupt = 0;
134 if (sdev->state != DEV_PROBED)
137 rmv_interrupt = ETH(sdev)->data->dev_flags &
138 RTE_ETH_DEV_INTR_RMV;
140 DEBUG("Enabling RMV interrupts for sub_device %d", i);
141 dev->data->dev_conf.intr_conf.rmv = 1;
143 DEBUG("sub_device %d does not support RMV event", i);
145 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
146 lsc_interrupt = lsc_enabled &&
147 (ETH(sdev)->data->dev_flags &
148 RTE_ETH_DEV_INTR_LSC);
150 DEBUG("Enabling LSC interrupts for sub_device %d", i);
151 dev->data->dev_conf.intr_conf.lsc = 1;
152 } else if (lsc_enabled && !lsc_interrupt) {
153 DEBUG("Disabling LSC interrupts for sub_device %d", i);
154 dev->data->dev_conf.intr_conf.lsc = 0;
156 DEBUG("Configuring sub-device %d", i);
158 ret = rte_eth_dev_configure(PORT_ID(sdev),
159 dev->data->nb_rx_queues,
160 dev->data->nb_tx_queues,
161 &dev->data->dev_conf);
163 if (!fs_err(sdev, ret))
165 ERROR("Could not configure sub_device %d", i);
169 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
170 RTE_ETH_EVENT_INTR_RMV,
171 failsafe_eth_rmv_event_callback,
174 WARN("Failed to register RMV callback for sub_device %d",
177 dev->data->dev_conf.intr_conf.rmv = 0;
179 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
180 RTE_ETH_EVENT_INTR_LSC,
181 failsafe_eth_lsc_event_callback,
184 WARN("Failed to register LSC callback for sub_device %d",
187 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
188 sdev->state = DEV_ACTIVE;
190 if (PRIV(dev)->state < DEV_ACTIVE)
191 PRIV(dev)->state = DEV_ACTIVE;
196 fs_dev_start(struct rte_eth_dev *dev)
198 struct sub_device *sdev;
202 FOREACH_SUBDEV(sdev, i, dev) {
203 if (sdev->state != DEV_ACTIVE)
205 DEBUG("Starting sub_device %d", i);
206 ret = rte_eth_dev_start(PORT_ID(sdev));
208 if (!fs_err(sdev, ret))
212 sdev->state = DEV_STARTED;
214 if (PRIV(dev)->state < DEV_STARTED)
215 PRIV(dev)->state = DEV_STARTED;
216 fs_switch_dev(dev, NULL);
221 fs_dev_stop(struct rte_eth_dev *dev)
223 struct sub_device *sdev;
226 PRIV(dev)->state = DEV_STARTED - 1;
227 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
228 rte_eth_dev_stop(PORT_ID(sdev));
229 sdev->state = DEV_STARTED - 1;
234 fs_dev_set_link_up(struct rte_eth_dev *dev)
236 struct sub_device *sdev;
240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
241 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
242 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
243 if ((ret = fs_err(sdev, ret))) {
244 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
245 " with error %d", i, ret);
253 fs_dev_set_link_down(struct rte_eth_dev *dev)
255 struct sub_device *sdev;
259 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
260 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
261 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
262 if ((ret = fs_err(sdev, ret))) {
263 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
264 " with error %d", i, ret);
271 static void fs_dev_free_queues(struct rte_eth_dev *dev);
273 fs_dev_close(struct rte_eth_dev *dev)
275 struct sub_device *sdev;
278 failsafe_hotplug_alarm_cancel(dev);
279 if (PRIV(dev)->state == DEV_STARTED)
280 dev->dev_ops->dev_stop(dev);
281 PRIV(dev)->state = DEV_ACTIVE - 1;
282 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
283 DEBUG("Closing sub_device %d", i);
284 rte_eth_dev_close(PORT_ID(sdev));
285 sdev->state = DEV_ACTIVE - 1;
287 fs_dev_free_queues(dev);
291 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
293 uint64_t port_offloads;
294 uint64_t queue_supp_offloads;
295 uint64_t port_supp_offloads;
297 port_offloads = dev->data->dev_conf.rxmode.offloads;
298 queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
299 port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
300 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
303 /* Verify we have no conflict with port offloads */
304 if ((port_offloads ^ offloads) & port_supp_offloads)
310 fs_rx_queue_release(void *queue)
312 struct rte_eth_dev *dev;
313 struct sub_device *sdev;
320 dev = rxq->priv->dev;
321 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
322 SUBOPS(sdev, rx_queue_release)
323 (ETH(sdev)->data->rx_queues[rxq->qid]);
324 dev->data->rx_queues[rxq->qid] = NULL;
329 fs_rx_queue_setup(struct rte_eth_dev *dev,
330 uint16_t rx_queue_id,
332 unsigned int socket_id,
333 const struct rte_eth_rxconf *rx_conf,
334 struct rte_mempool *mb_pool)
336 struct sub_device *sdev;
341 rxq = dev->data->rx_queues[rx_queue_id];
343 fs_rx_queue_release(rxq);
344 dev->data->rx_queues[rx_queue_id] = NULL;
346 /* Verify application offloads are valid for our port and queue. */
347 if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
349 ERROR("Rx queue offloads 0x%" PRIx64
350 " don't match port offloads 0x%" PRIx64
351 " or supported offloads 0x%" PRIx64,
353 dev->data->dev_conf.rxmode.offloads,
354 PRIV(dev)->infos.rx_offload_capa |
355 PRIV(dev)->infos.rx_queue_offload_capa);
358 rxq = rte_zmalloc(NULL,
360 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
361 RTE_CACHE_LINE_SIZE);
364 FOREACH_SUBDEV(sdev, i, dev)
365 rte_atomic64_init(&rxq->refcnt[i]);
366 rxq->qid = rx_queue_id;
367 rxq->socket_id = socket_id;
368 rxq->info.mp = mb_pool;
369 rxq->info.conf = *rx_conf;
370 rxq->info.nb_desc = nb_rx_desc;
371 rxq->priv = PRIV(dev);
372 rxq->sdev = PRIV(dev)->subs;
373 dev->data->rx_queues[rx_queue_id] = rxq;
374 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
375 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
377 nb_rx_desc, socket_id,
379 if ((ret = fs_err(sdev, ret))) {
380 ERROR("RX queue setup failed for sub_device %d", i);
386 fs_rx_queue_release(rxq);
391 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
393 uint64_t port_offloads;
394 uint64_t queue_supp_offloads;
395 uint64_t port_supp_offloads;
397 port_offloads = dev->data->dev_conf.txmode.offloads;
398 queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
399 port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
400 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
403 /* Verify we have no conflict with port offloads */
404 if ((port_offloads ^ offloads) & port_supp_offloads)
410 fs_tx_queue_release(void *queue)
412 struct rte_eth_dev *dev;
413 struct sub_device *sdev;
420 dev = txq->priv->dev;
421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
422 SUBOPS(sdev, tx_queue_release)
423 (ETH(sdev)->data->tx_queues[txq->qid]);
424 dev->data->tx_queues[txq->qid] = NULL;
429 fs_tx_queue_setup(struct rte_eth_dev *dev,
430 uint16_t tx_queue_id,
432 unsigned int socket_id,
433 const struct rte_eth_txconf *tx_conf)
435 struct sub_device *sdev;
440 txq = dev->data->tx_queues[tx_queue_id];
442 fs_tx_queue_release(txq);
443 dev->data->tx_queues[tx_queue_id] = NULL;
446 * Don't verify queue offloads for applications which
449 if (tx_conf != NULL &&
450 (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
451 fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
453 ERROR("Tx queue offloads 0x%" PRIx64
454 " don't match port offloads 0x%" PRIx64
455 " or supported offloads 0x%" PRIx64,
457 dev->data->dev_conf.txmode.offloads,
458 PRIV(dev)->infos.tx_offload_capa |
459 PRIV(dev)->infos.tx_queue_offload_capa);
462 txq = rte_zmalloc("ethdev TX queue",
464 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
465 RTE_CACHE_LINE_SIZE);
468 FOREACH_SUBDEV(sdev, i, dev)
469 rte_atomic64_init(&txq->refcnt[i]);
470 txq->qid = tx_queue_id;
471 txq->socket_id = socket_id;
472 txq->info.conf = *tx_conf;
473 txq->info.nb_desc = nb_tx_desc;
474 txq->priv = PRIV(dev);
475 dev->data->tx_queues[tx_queue_id] = txq;
476 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
477 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
479 nb_tx_desc, socket_id,
481 if ((ret = fs_err(sdev, ret))) {
482 ERROR("TX queue setup failed for sub_device %d", i);
488 fs_tx_queue_release(txq);
493 fs_dev_free_queues(struct rte_eth_dev *dev)
497 for (i = 0; i < dev->data->nb_rx_queues; i++) {
498 fs_rx_queue_release(dev->data->rx_queues[i]);
499 dev->data->rx_queues[i] = NULL;
501 dev->data->nb_rx_queues = 0;
502 for (i = 0; i < dev->data->nb_tx_queues; i++) {
503 fs_tx_queue_release(dev->data->tx_queues[i]);
504 dev->data->tx_queues[i] = NULL;
506 dev->data->nb_tx_queues = 0;
510 fs_promiscuous_enable(struct rte_eth_dev *dev)
512 struct sub_device *sdev;
515 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
516 rte_eth_promiscuous_enable(PORT_ID(sdev));
520 fs_promiscuous_disable(struct rte_eth_dev *dev)
522 struct sub_device *sdev;
525 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
526 rte_eth_promiscuous_disable(PORT_ID(sdev));
530 fs_allmulticast_enable(struct rte_eth_dev *dev)
532 struct sub_device *sdev;
535 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
536 rte_eth_allmulticast_enable(PORT_ID(sdev));
540 fs_allmulticast_disable(struct rte_eth_dev *dev)
542 struct sub_device *sdev;
545 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
546 rte_eth_allmulticast_disable(PORT_ID(sdev));
550 fs_link_update(struct rte_eth_dev *dev,
551 int wait_to_complete)
553 struct sub_device *sdev;
557 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
558 DEBUG("Calling link_update on sub_device %d", i);
559 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
560 if (ret && ret != -1 && sdev->remove == 0 &&
561 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
562 ERROR("Link update failed for sub_device %d with error %d",
567 if (TX_SUBDEV(dev)) {
568 struct rte_eth_link *l1;
569 struct rte_eth_link *l2;
571 l1 = &dev->data->dev_link;
572 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
573 if (memcmp(l1, l2, sizeof(*l1))) {
582 fs_stats_get(struct rte_eth_dev *dev,
583 struct rte_eth_stats *stats)
585 struct rte_eth_stats backup;
586 struct sub_device *sdev;
590 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
591 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
592 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
593 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
595 rte_memcpy(&backup, snapshot, sizeof(backup));
596 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
598 if (!fs_err(sdev, ret)) {
599 rte_memcpy(snapshot, &backup, sizeof(backup));
602 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
607 *timestamp = rte_rdtsc();
609 failsafe_stats_increment(stats, snapshot);
615 fs_stats_reset(struct rte_eth_dev *dev)
617 struct sub_device *sdev;
620 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
621 rte_eth_stats_reset(PORT_ID(sdev));
622 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
624 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
628 * Fail-safe dev_infos_get rules:
632 * Use the maximum possible values for any field, so as not
633 * to impede any further configuration effort.
635 * Limits capabilities to those that are understood by the
636 * fail-safe PMD. This understanding stems from the fail-safe
637 * being capable of verifying that the related capability is
638 * expressed within the device configuration (struct rte_eth_conf).
640 * At least one probed sub_device:
642 * Uses values from the active probed sub_device
643 * The rationale here is that if any sub_device is less capable
644 * (for example concerning the number of queues) than the active
645 * sub_device, then its subsequent configuration will fail.
646 * It is impossible to foresee this failure when the failing sub_device
647 * is supposed to be plugged-in later on, so the configuration process
648 * is the single point of failure and error reporting.
650 * Uses a logical AND of RX capabilities among
651 * all sub_devices and the default capabilities.
652 * Uses a logical AND of TX capabilities among
653 * the active probed sub_device and the default capabilities.
657 fs_dev_infos_get(struct rte_eth_dev *dev,
658 struct rte_eth_dev_info *infos)
660 struct sub_device *sdev;
663 sdev = TX_SUBDEV(dev);
665 DEBUG("No probed device, using default infos");
666 rte_memcpy(&PRIV(dev)->infos, &default_infos,
667 sizeof(default_infos));
669 uint64_t rx_offload_capa;
670 uint64_t rxq_offload_capa;
672 rx_offload_capa = default_infos.rx_offload_capa;
673 rxq_offload_capa = default_infos.rx_queue_offload_capa;
674 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
675 rte_eth_dev_info_get(PORT_ID(sdev),
677 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
679 PRIV(dev)->infos.rx_queue_offload_capa;
681 sdev = TX_SUBDEV(dev);
682 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
683 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
684 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
685 PRIV(dev)->infos.tx_offload_capa &=
686 default_infos.tx_offload_capa;
687 PRIV(dev)->infos.tx_queue_offload_capa &=
688 default_infos.tx_queue_offload_capa;
689 PRIV(dev)->infos.flow_type_rss_offloads &=
690 default_infos.flow_type_rss_offloads;
692 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
695 static const uint32_t *
696 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
698 struct sub_device *sdev;
699 struct rte_eth_dev *edev;
701 sdev = TX_SUBDEV(dev);
705 /* ENOTSUP: counts as no supported ptypes */
706 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
709 * The API does not permit to do a clean AND of all ptypes,
710 * It is also incomplete by design and we do not really care
711 * to have a best possible value in this context.
712 * We just return the ptypes of the device of highest
713 * priority, usually the PREFERRED device.
715 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
719 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
721 struct sub_device *sdev;
725 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
726 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
727 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
728 if ((ret = fs_err(sdev, ret))) {
729 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
738 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
740 struct sub_device *sdev;
744 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
745 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
746 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
747 if ((ret = fs_err(sdev, ret))) {
748 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
749 " with error %d", i, ret);
757 fs_flow_ctrl_get(struct rte_eth_dev *dev,
758 struct rte_eth_fc_conf *fc_conf)
760 struct sub_device *sdev;
762 sdev = TX_SUBDEV(dev);
765 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
767 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
771 fs_flow_ctrl_set(struct rte_eth_dev *dev,
772 struct rte_eth_fc_conf *fc_conf)
774 struct sub_device *sdev;
778 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
779 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
780 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
781 if ((ret = fs_err(sdev, ret))) {
782 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
783 " with error %d", i, ret);
791 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
793 struct sub_device *sdev;
796 /* No check: already done within the rte_eth_dev_mac_addr_remove
797 * call for the fail-safe device.
799 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
800 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
801 &dev->data->mac_addrs[index]);
802 PRIV(dev)->mac_addr_pool[index] = 0;
806 fs_mac_addr_add(struct rte_eth_dev *dev,
807 struct ether_addr *mac_addr,
811 struct sub_device *sdev;
815 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
816 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
817 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
818 if ((ret = fs_err(sdev, ret))) {
819 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
820 PRIu8 " with error %d", i, ret);
824 if (index >= PRIV(dev)->nb_mac_addr) {
825 DEBUG("Growing mac_addrs array");
826 PRIV(dev)->nb_mac_addr = index;
828 PRIV(dev)->mac_addr_pool[index] = vmdq;
833 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
835 struct sub_device *sdev;
838 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
839 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
843 fs_filter_ctrl(struct rte_eth_dev *dev,
844 enum rte_filter_type type,
845 enum rte_filter_op op,
848 struct sub_device *sdev;
852 if (type == RTE_ETH_FILTER_GENERIC &&
853 op == RTE_ETH_FILTER_GET) {
854 *(const void **)arg = &fs_flow_ops;
857 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
858 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
859 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
860 if ((ret = fs_err(sdev, ret))) {
861 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
862 " with error %d", i, ret);
869 const struct eth_dev_ops failsafe_ops = {
870 .dev_configure = fs_dev_configure,
871 .dev_start = fs_dev_start,
872 .dev_stop = fs_dev_stop,
873 .dev_set_link_down = fs_dev_set_link_down,
874 .dev_set_link_up = fs_dev_set_link_up,
875 .dev_close = fs_dev_close,
876 .promiscuous_enable = fs_promiscuous_enable,
877 .promiscuous_disable = fs_promiscuous_disable,
878 .allmulticast_enable = fs_allmulticast_enable,
879 .allmulticast_disable = fs_allmulticast_disable,
880 .link_update = fs_link_update,
881 .stats_get = fs_stats_get,
882 .stats_reset = fs_stats_reset,
883 .dev_infos_get = fs_dev_infos_get,
884 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
885 .mtu_set = fs_mtu_set,
886 .vlan_filter_set = fs_vlan_filter_set,
887 .rx_queue_setup = fs_rx_queue_setup,
888 .tx_queue_setup = fs_tx_queue_setup,
889 .rx_queue_release = fs_rx_queue_release,
890 .tx_queue_release = fs_tx_queue_release,
891 .flow_ctrl_get = fs_flow_ctrl_get,
892 .flow_ctrl_set = fs_flow_ctrl_set,
893 .mac_addr_remove = fs_mac_addr_remove,
894 .mac_addr_add = fs_mac_addr_add,
895 .mac_addr_set = fs_mac_addr_set,
896 .filter_ctrl = fs_filter_ctrl,