1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox.
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
17 #include "failsafe_private.h"
19 static struct rte_eth_dev_info default_infos = {
20 /* Max possible number of elements */
21 .max_rx_pktlen = UINT32_MAX,
22 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
23 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
25 .max_hash_mac_addrs = UINT32_MAX,
26 .max_vfs = UINT16_MAX,
27 .max_vmdq_pools = UINT16_MAX,
32 .nb_seg_max = UINT16_MAX,
33 .nb_mtu_seg_max = UINT16_MAX,
39 .nb_seg_max = UINT16_MAX,
40 .nb_mtu_seg_max = UINT16_MAX,
43 * Set of capabilities that can be verified upon
44 * configuring a sub-device.
47 DEV_RX_OFFLOAD_VLAN_STRIP |
48 DEV_RX_OFFLOAD_IPV4_CKSUM |
49 DEV_RX_OFFLOAD_UDP_CKSUM |
50 DEV_RX_OFFLOAD_TCP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_LRO |
52 DEV_RX_OFFLOAD_QINQ_STRIP |
53 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
54 DEV_RX_OFFLOAD_MACSEC_STRIP |
55 DEV_RX_OFFLOAD_HEADER_SPLIT |
56 DEV_RX_OFFLOAD_VLAN_FILTER |
57 DEV_RX_OFFLOAD_VLAN_EXTEND |
58 DEV_RX_OFFLOAD_JUMBO_FRAME |
59 DEV_RX_OFFLOAD_CRC_STRIP |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_CRC_STRIP |
77 DEV_RX_OFFLOAD_SCATTER |
78 DEV_RX_OFFLOAD_TIMESTAMP |
79 DEV_RX_OFFLOAD_SECURITY,
80 .tx_offload_capa = 0x0,
81 .flow_type_rss_offloads = 0x0,
85 fs_dev_configure(struct rte_eth_dev *dev)
87 struct sub_device *sdev;
88 uint64_t supp_tx_offloads;
93 supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
94 tx_offloads = dev->data->dev_conf.txmode.offloads;
95 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
97 ERROR("Some Tx offloads are not supported, "
98 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
99 tx_offloads, supp_tx_offloads);
102 FOREACH_SUBDEV(sdev, i, dev) {
103 int rmv_interrupt = 0;
104 int lsc_interrupt = 0;
107 if (sdev->state != DEV_PROBED)
110 rmv_interrupt = ETH(sdev)->data->dev_flags &
111 RTE_ETH_DEV_INTR_RMV;
113 DEBUG("Enabling RMV interrupts for sub_device %d", i);
114 dev->data->dev_conf.intr_conf.rmv = 1;
116 DEBUG("sub_device %d does not support RMV event", i);
118 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
119 lsc_interrupt = lsc_enabled &&
120 (ETH(sdev)->data->dev_flags &
121 RTE_ETH_DEV_INTR_LSC);
123 DEBUG("Enabling LSC interrupts for sub_device %d", i);
124 dev->data->dev_conf.intr_conf.lsc = 1;
125 } else if (lsc_enabled && !lsc_interrupt) {
126 DEBUG("Disabling LSC interrupts for sub_device %d", i);
127 dev->data->dev_conf.intr_conf.lsc = 0;
129 DEBUG("Configuring sub-device %d", i);
131 ret = rte_eth_dev_configure(PORT_ID(sdev),
132 dev->data->nb_rx_queues,
133 dev->data->nb_tx_queues,
134 &dev->data->dev_conf);
136 if (!fs_err(sdev, ret))
138 ERROR("Could not configure sub_device %d", i);
142 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
143 RTE_ETH_EVENT_INTR_RMV,
144 failsafe_eth_rmv_event_callback,
147 WARN("Failed to register RMV callback for sub_device %d",
150 dev->data->dev_conf.intr_conf.rmv = 0;
152 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
153 RTE_ETH_EVENT_INTR_LSC,
154 failsafe_eth_lsc_event_callback,
157 WARN("Failed to register LSC callback for sub_device %d",
160 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
161 sdev->state = DEV_ACTIVE;
163 if (PRIV(dev)->state < DEV_ACTIVE)
164 PRIV(dev)->state = DEV_ACTIVE;
169 fs_dev_start(struct rte_eth_dev *dev)
171 struct sub_device *sdev;
175 ret = failsafe_rx_intr_install(dev);
178 FOREACH_SUBDEV(sdev, i, dev) {
179 if (sdev->state != DEV_ACTIVE)
181 DEBUG("Starting sub_device %d", i);
182 ret = rte_eth_dev_start(PORT_ID(sdev));
184 if (!fs_err(sdev, ret))
188 ret = failsafe_rx_intr_install_subdevice(sdev);
190 if (!fs_err(sdev, ret))
192 rte_eth_dev_stop(PORT_ID(sdev));
195 sdev->state = DEV_STARTED;
197 if (PRIV(dev)->state < DEV_STARTED)
198 PRIV(dev)->state = DEV_STARTED;
199 fs_switch_dev(dev, NULL);
204 fs_dev_stop(struct rte_eth_dev *dev)
206 struct sub_device *sdev;
209 PRIV(dev)->state = DEV_STARTED - 1;
210 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
211 rte_eth_dev_stop(PORT_ID(sdev));
212 failsafe_rx_intr_uninstall_subdevice(sdev);
213 sdev->state = DEV_STARTED - 1;
215 failsafe_rx_intr_uninstall(dev);
219 fs_dev_set_link_up(struct rte_eth_dev *dev)
221 struct sub_device *sdev;
225 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
226 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
227 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
228 if ((ret = fs_err(sdev, ret))) {
229 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
230 " with error %d", i, ret);
238 fs_dev_set_link_down(struct rte_eth_dev *dev)
240 struct sub_device *sdev;
244 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
245 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
246 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
247 if ((ret = fs_err(sdev, ret))) {
248 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
249 " with error %d", i, ret);
256 static void fs_dev_free_queues(struct rte_eth_dev *dev);
258 fs_dev_close(struct rte_eth_dev *dev)
260 struct sub_device *sdev;
263 failsafe_hotplug_alarm_cancel(dev);
264 if (PRIV(dev)->state == DEV_STARTED)
265 dev->dev_ops->dev_stop(dev);
266 PRIV(dev)->state = DEV_ACTIVE - 1;
267 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
268 DEBUG("Closing sub_device %d", i);
269 rte_eth_dev_close(PORT_ID(sdev));
270 sdev->state = DEV_ACTIVE - 1;
272 fs_dev_free_queues(dev);
276 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
278 uint64_t port_offloads;
279 uint64_t queue_supp_offloads;
280 uint64_t port_supp_offloads;
282 port_offloads = dev->data->dev_conf.rxmode.offloads;
283 queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
284 port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
285 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
288 /* Verify we have no conflict with port offloads */
289 if ((port_offloads ^ offloads) & port_supp_offloads)
295 fs_rx_queue_release(void *queue)
297 struct rte_eth_dev *dev;
298 struct sub_device *sdev;
305 if (rxq->event_fd > 0)
306 close(rxq->event_fd);
307 dev = rxq->priv->dev;
308 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
309 SUBOPS(sdev, rx_queue_release)
310 (ETH(sdev)->data->rx_queues[rxq->qid]);
311 dev->data->rx_queues[rxq->qid] = NULL;
316 fs_rx_queue_setup(struct rte_eth_dev *dev,
317 uint16_t rx_queue_id,
319 unsigned int socket_id,
320 const struct rte_eth_rxconf *rx_conf,
321 struct rte_mempool *mb_pool)
324 * FIXME: Add a proper interface in rte_eal_interrupts for
325 * allocating eventfd as an interrupt vector.
326 * For the time being, fake as if we are using MSIX interrupts,
327 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
329 struct rte_intr_handle intr_handle = {
330 .type = RTE_INTR_HANDLE_VFIO_MSIX,
333 struct sub_device *sdev;
338 rxq = dev->data->rx_queues[rx_queue_id];
340 fs_rx_queue_release(rxq);
341 dev->data->rx_queues[rx_queue_id] = NULL;
343 /* Verify application offloads are valid for our port and queue. */
344 if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
346 ERROR("Rx queue offloads 0x%" PRIx64
347 " don't match port offloads 0x%" PRIx64
348 " or supported offloads 0x%" PRIx64,
350 dev->data->dev_conf.rxmode.offloads,
351 PRIV(dev)->infos.rx_offload_capa |
352 PRIV(dev)->infos.rx_queue_offload_capa);
355 rxq = rte_zmalloc(NULL,
357 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
358 RTE_CACHE_LINE_SIZE);
361 FOREACH_SUBDEV(sdev, i, dev)
362 rte_atomic64_init(&rxq->refcnt[i]);
363 rxq->qid = rx_queue_id;
364 rxq->socket_id = socket_id;
365 rxq->info.mp = mb_pool;
366 rxq->info.conf = *rx_conf;
367 rxq->info.nb_desc = nb_rx_desc;
368 rxq->priv = PRIV(dev);
369 rxq->sdev = PRIV(dev)->subs;
370 ret = rte_intr_efd_enable(&intr_handle, 1);
373 rxq->event_fd = intr_handle.efds[0];
374 dev->data->rx_queues[rx_queue_id] = rxq;
375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
376 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
378 nb_rx_desc, socket_id,
380 if ((ret = fs_err(sdev, ret))) {
381 ERROR("RX queue setup failed for sub_device %d", i);
387 fs_rx_queue_release(rxq);
392 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
395 struct sub_device *sdev;
400 if (idx >= dev->data->nb_rx_queues) {
404 rxq = dev->data->rx_queues[idx];
405 if (rxq == NULL || rxq->event_fd <= 0) {
409 /* Fail if proxy service is nor running. */
410 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
411 ERROR("failsafe interrupt services are not running");
415 rxq->enable_events = 1;
416 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
417 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
418 ret = fs_err(sdev, ret);
428 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
431 struct sub_device *sdev;
437 if (idx >= dev->data->nb_rx_queues) {
441 rxq = dev->data->rx_queues[idx];
442 if (rxq == NULL || rxq->event_fd <= 0) {
446 rxq->enable_events = 0;
447 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
448 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
449 ret = fs_err(sdev, ret);
453 /* Clear pending events */
454 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
462 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
464 uint64_t port_offloads;
465 uint64_t queue_supp_offloads;
466 uint64_t port_supp_offloads;
468 port_offloads = dev->data->dev_conf.txmode.offloads;
469 queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
470 port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
471 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
474 /* Verify we have no conflict with port offloads */
475 if ((port_offloads ^ offloads) & port_supp_offloads)
481 fs_tx_queue_release(void *queue)
483 struct rte_eth_dev *dev;
484 struct sub_device *sdev;
491 dev = txq->priv->dev;
492 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
493 SUBOPS(sdev, tx_queue_release)
494 (ETH(sdev)->data->tx_queues[txq->qid]);
495 dev->data->tx_queues[txq->qid] = NULL;
500 fs_tx_queue_setup(struct rte_eth_dev *dev,
501 uint16_t tx_queue_id,
503 unsigned int socket_id,
504 const struct rte_eth_txconf *tx_conf)
506 struct sub_device *sdev;
511 txq = dev->data->tx_queues[tx_queue_id];
513 fs_tx_queue_release(txq);
514 dev->data->tx_queues[tx_queue_id] = NULL;
517 * Don't verify queue offloads for applications which
520 if (tx_conf != NULL &&
521 (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
522 fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
524 ERROR("Tx queue offloads 0x%" PRIx64
525 " don't match port offloads 0x%" PRIx64
526 " or supported offloads 0x%" PRIx64,
528 dev->data->dev_conf.txmode.offloads,
529 PRIV(dev)->infos.tx_offload_capa |
530 PRIV(dev)->infos.tx_queue_offload_capa);
533 txq = rte_zmalloc("ethdev TX queue",
535 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
536 RTE_CACHE_LINE_SIZE);
539 FOREACH_SUBDEV(sdev, i, dev)
540 rte_atomic64_init(&txq->refcnt[i]);
541 txq->qid = tx_queue_id;
542 txq->socket_id = socket_id;
543 txq->info.conf = *tx_conf;
544 txq->info.nb_desc = nb_tx_desc;
545 txq->priv = PRIV(dev);
546 dev->data->tx_queues[tx_queue_id] = txq;
547 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
548 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
550 nb_tx_desc, socket_id,
552 if ((ret = fs_err(sdev, ret))) {
553 ERROR("TX queue setup failed for sub_device %d", i);
559 fs_tx_queue_release(txq);
564 fs_dev_free_queues(struct rte_eth_dev *dev)
568 for (i = 0; i < dev->data->nb_rx_queues; i++) {
569 fs_rx_queue_release(dev->data->rx_queues[i]);
570 dev->data->rx_queues[i] = NULL;
572 dev->data->nb_rx_queues = 0;
573 for (i = 0; i < dev->data->nb_tx_queues; i++) {
574 fs_tx_queue_release(dev->data->tx_queues[i]);
575 dev->data->tx_queues[i] = NULL;
577 dev->data->nb_tx_queues = 0;
581 fs_promiscuous_enable(struct rte_eth_dev *dev)
583 struct sub_device *sdev;
586 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
587 rte_eth_promiscuous_enable(PORT_ID(sdev));
591 fs_promiscuous_disable(struct rte_eth_dev *dev)
593 struct sub_device *sdev;
596 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
597 rte_eth_promiscuous_disable(PORT_ID(sdev));
601 fs_allmulticast_enable(struct rte_eth_dev *dev)
603 struct sub_device *sdev;
606 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
607 rte_eth_allmulticast_enable(PORT_ID(sdev));
611 fs_allmulticast_disable(struct rte_eth_dev *dev)
613 struct sub_device *sdev;
616 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
617 rte_eth_allmulticast_disable(PORT_ID(sdev));
621 fs_link_update(struct rte_eth_dev *dev,
622 int wait_to_complete)
624 struct sub_device *sdev;
628 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
629 DEBUG("Calling link_update on sub_device %d", i);
630 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
631 if (ret && ret != -1 && sdev->remove == 0 &&
632 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
633 ERROR("Link update failed for sub_device %d with error %d",
638 if (TX_SUBDEV(dev)) {
639 struct rte_eth_link *l1;
640 struct rte_eth_link *l2;
642 l1 = &dev->data->dev_link;
643 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
644 if (memcmp(l1, l2, sizeof(*l1))) {
653 fs_stats_get(struct rte_eth_dev *dev,
654 struct rte_eth_stats *stats)
656 struct rte_eth_stats backup;
657 struct sub_device *sdev;
661 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
662 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
663 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
664 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
666 rte_memcpy(&backup, snapshot, sizeof(backup));
667 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
669 if (!fs_err(sdev, ret)) {
670 rte_memcpy(snapshot, &backup, sizeof(backup));
673 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
678 *timestamp = rte_rdtsc();
680 failsafe_stats_increment(stats, snapshot);
686 fs_stats_reset(struct rte_eth_dev *dev)
688 struct sub_device *sdev;
691 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
692 rte_eth_stats_reset(PORT_ID(sdev));
693 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
695 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
699 * Fail-safe dev_infos_get rules:
703 * Use the maximum possible values for any field, so as not
704 * to impede any further configuration effort.
706 * Limits capabilities to those that are understood by the
707 * fail-safe PMD. This understanding stems from the fail-safe
708 * being capable of verifying that the related capability is
709 * expressed within the device configuration (struct rte_eth_conf).
711 * At least one probed sub_device:
713 * Uses values from the active probed sub_device
714 * The rationale here is that if any sub_device is less capable
715 * (for example concerning the number of queues) than the active
716 * sub_device, then its subsequent configuration will fail.
717 * It is impossible to foresee this failure when the failing sub_device
718 * is supposed to be plugged-in later on, so the configuration process
719 * is the single point of failure and error reporting.
721 * Uses a logical AND of RX capabilities among
722 * all sub_devices and the default capabilities.
723 * Uses a logical AND of TX capabilities among
724 * the active probed sub_device and the default capabilities.
728 fs_dev_infos_get(struct rte_eth_dev *dev,
729 struct rte_eth_dev_info *infos)
731 struct sub_device *sdev;
734 sdev = TX_SUBDEV(dev);
736 DEBUG("No probed device, using default infos");
737 rte_memcpy(&PRIV(dev)->infos, &default_infos,
738 sizeof(default_infos));
740 uint64_t rx_offload_capa;
741 uint64_t rxq_offload_capa;
743 rx_offload_capa = default_infos.rx_offload_capa;
744 rxq_offload_capa = default_infos.rx_queue_offload_capa;
745 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
746 rte_eth_dev_info_get(PORT_ID(sdev),
748 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
750 PRIV(dev)->infos.rx_queue_offload_capa;
752 sdev = TX_SUBDEV(dev);
753 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
754 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
755 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
756 PRIV(dev)->infos.tx_offload_capa &=
757 default_infos.tx_offload_capa;
758 PRIV(dev)->infos.tx_queue_offload_capa &=
759 default_infos.tx_queue_offload_capa;
760 PRIV(dev)->infos.flow_type_rss_offloads &=
761 default_infos.flow_type_rss_offloads;
763 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
766 static const uint32_t *
767 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
769 struct sub_device *sdev;
770 struct rte_eth_dev *edev;
772 sdev = TX_SUBDEV(dev);
776 /* ENOTSUP: counts as no supported ptypes */
777 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
780 * The API does not permit to do a clean AND of all ptypes,
781 * It is also incomplete by design and we do not really care
782 * to have a best possible value in this context.
783 * We just return the ptypes of the device of highest
784 * priority, usually the PREFERRED device.
786 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
790 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
792 struct sub_device *sdev;
796 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
797 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
798 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
799 if ((ret = fs_err(sdev, ret))) {
800 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
809 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
811 struct sub_device *sdev;
815 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
816 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
817 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
818 if ((ret = fs_err(sdev, ret))) {
819 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
820 " with error %d", i, ret);
828 fs_flow_ctrl_get(struct rte_eth_dev *dev,
829 struct rte_eth_fc_conf *fc_conf)
831 struct sub_device *sdev;
833 sdev = TX_SUBDEV(dev);
836 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
838 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
842 fs_flow_ctrl_set(struct rte_eth_dev *dev,
843 struct rte_eth_fc_conf *fc_conf)
845 struct sub_device *sdev;
849 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
850 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
851 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
852 if ((ret = fs_err(sdev, ret))) {
853 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
854 " with error %d", i, ret);
862 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
864 struct sub_device *sdev;
867 /* No check: already done within the rte_eth_dev_mac_addr_remove
868 * call for the fail-safe device.
870 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
871 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
872 &dev->data->mac_addrs[index]);
873 PRIV(dev)->mac_addr_pool[index] = 0;
877 fs_mac_addr_add(struct rte_eth_dev *dev,
878 struct ether_addr *mac_addr,
882 struct sub_device *sdev;
886 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
887 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
888 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
889 if ((ret = fs_err(sdev, ret))) {
890 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
891 PRIu8 " with error %d", i, ret);
895 if (index >= PRIV(dev)->nb_mac_addr) {
896 DEBUG("Growing mac_addrs array");
897 PRIV(dev)->nb_mac_addr = index;
899 PRIV(dev)->mac_addr_pool[index] = vmdq;
904 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
906 struct sub_device *sdev;
909 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
910 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
914 fs_filter_ctrl(struct rte_eth_dev *dev,
915 enum rte_filter_type type,
916 enum rte_filter_op op,
919 struct sub_device *sdev;
923 if (type == RTE_ETH_FILTER_GENERIC &&
924 op == RTE_ETH_FILTER_GET) {
925 *(const void **)arg = &fs_flow_ops;
928 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
929 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
930 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
931 if ((ret = fs_err(sdev, ret))) {
932 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
933 " with error %d", i, ret);
940 const struct eth_dev_ops failsafe_ops = {
941 .dev_configure = fs_dev_configure,
942 .dev_start = fs_dev_start,
943 .dev_stop = fs_dev_stop,
944 .dev_set_link_down = fs_dev_set_link_down,
945 .dev_set_link_up = fs_dev_set_link_up,
946 .dev_close = fs_dev_close,
947 .promiscuous_enable = fs_promiscuous_enable,
948 .promiscuous_disable = fs_promiscuous_disable,
949 .allmulticast_enable = fs_allmulticast_enable,
950 .allmulticast_disable = fs_allmulticast_disable,
951 .link_update = fs_link_update,
952 .stats_get = fs_stats_get,
953 .stats_reset = fs_stats_reset,
954 .dev_infos_get = fs_dev_infos_get,
955 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
956 .mtu_set = fs_mtu_set,
957 .vlan_filter_set = fs_vlan_filter_set,
958 .rx_queue_setup = fs_rx_queue_setup,
959 .tx_queue_setup = fs_tx_queue_setup,
960 .rx_queue_release = fs_rx_queue_release,
961 .tx_queue_release = fs_tx_queue_release,
962 .rx_queue_intr_enable = fs_rx_intr_enable,
963 .rx_queue_intr_disable = fs_rx_intr_disable,
964 .flow_ctrl_get = fs_flow_ctrl_get,
965 .flow_ctrl_set = fs_flow_ctrl_set,
966 .mac_addr_remove = fs_mac_addr_remove,
967 .mac_addr_add = fs_mac_addr_add,
968 .mac_addr_set = fs_mac_addr_set,
969 .filter_ctrl = fs_filter_ctrl,