4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_debug.h>
39 #include <rte_atomic.h>
40 #include <rte_ethdev_driver.h>
41 #include <rte_malloc.h>
43 #include <rte_cycles.h>
45 #include "failsafe_private.h"
47 static struct rte_eth_dev_info default_infos = {
48 /* Max possible number of elements */
49 .max_rx_pktlen = UINT32_MAX,
50 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
51 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
52 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
53 .max_hash_mac_addrs = UINT32_MAX,
54 .max_vfs = UINT16_MAX,
55 .max_vmdq_pools = UINT16_MAX,
60 .nb_seg_max = UINT16_MAX,
61 .nb_mtu_seg_max = UINT16_MAX,
67 .nb_seg_max = UINT16_MAX,
68 .nb_mtu_seg_max = UINT16_MAX,
71 * Set of capabilities that can be verified upon
72 * configuring a sub-device.
75 DEV_RX_OFFLOAD_VLAN_STRIP |
76 DEV_RX_OFFLOAD_IPV4_CKSUM |
77 DEV_RX_OFFLOAD_UDP_CKSUM |
78 DEV_RX_OFFLOAD_TCP_CKSUM |
79 DEV_RX_OFFLOAD_TCP_LRO |
80 DEV_RX_OFFLOAD_QINQ_STRIP |
81 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
82 DEV_RX_OFFLOAD_MACSEC_STRIP |
83 DEV_RX_OFFLOAD_HEADER_SPLIT |
84 DEV_RX_OFFLOAD_VLAN_FILTER |
85 DEV_RX_OFFLOAD_VLAN_EXTEND |
86 DEV_RX_OFFLOAD_JUMBO_FRAME |
87 DEV_RX_OFFLOAD_CRC_STRIP |
88 DEV_RX_OFFLOAD_SCATTER |
89 DEV_RX_OFFLOAD_TIMESTAMP |
90 DEV_RX_OFFLOAD_SECURITY,
91 .rx_queue_offload_capa =
92 DEV_RX_OFFLOAD_VLAN_STRIP |
93 DEV_RX_OFFLOAD_IPV4_CKSUM |
94 DEV_RX_OFFLOAD_UDP_CKSUM |
95 DEV_RX_OFFLOAD_TCP_CKSUM |
96 DEV_RX_OFFLOAD_TCP_LRO |
97 DEV_RX_OFFLOAD_QINQ_STRIP |
98 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
99 DEV_RX_OFFLOAD_MACSEC_STRIP |
100 DEV_RX_OFFLOAD_HEADER_SPLIT |
101 DEV_RX_OFFLOAD_VLAN_FILTER |
102 DEV_RX_OFFLOAD_VLAN_EXTEND |
103 DEV_RX_OFFLOAD_JUMBO_FRAME |
104 DEV_RX_OFFLOAD_CRC_STRIP |
105 DEV_RX_OFFLOAD_SCATTER |
106 DEV_RX_OFFLOAD_TIMESTAMP |
107 DEV_RX_OFFLOAD_SECURITY,
108 .tx_offload_capa = 0x0,
109 .flow_type_rss_offloads = 0x0,
113 fs_dev_configure(struct rte_eth_dev *dev)
115 struct sub_device *sdev;
116 uint64_t supp_tx_offloads;
117 uint64_t tx_offloads;
121 supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
122 tx_offloads = dev->data->dev_conf.txmode.offloads;
123 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
125 ERROR("Some Tx offloads are not supported, "
126 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
127 tx_offloads, supp_tx_offloads);
130 FOREACH_SUBDEV(sdev, i, dev) {
131 int rmv_interrupt = 0;
132 int lsc_interrupt = 0;
135 if (sdev->state != DEV_PROBED)
138 rmv_interrupt = ETH(sdev)->data->dev_flags &
139 RTE_ETH_DEV_INTR_RMV;
141 DEBUG("Enabling RMV interrupts for sub_device %d", i);
142 dev->data->dev_conf.intr_conf.rmv = 1;
144 DEBUG("sub_device %d does not support RMV event", i);
146 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
147 lsc_interrupt = lsc_enabled &&
148 (ETH(sdev)->data->dev_flags &
149 RTE_ETH_DEV_INTR_LSC);
151 DEBUG("Enabling LSC interrupts for sub_device %d", i);
152 dev->data->dev_conf.intr_conf.lsc = 1;
153 } else if (lsc_enabled && !lsc_interrupt) {
154 DEBUG("Disabling LSC interrupts for sub_device %d", i);
155 dev->data->dev_conf.intr_conf.lsc = 0;
157 DEBUG("Configuring sub-device %d", i);
159 ret = rte_eth_dev_configure(PORT_ID(sdev),
160 dev->data->nb_rx_queues,
161 dev->data->nb_tx_queues,
162 &dev->data->dev_conf);
164 if (!fs_err(sdev, ret))
166 ERROR("Could not configure sub_device %d", i);
170 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
171 RTE_ETH_EVENT_INTR_RMV,
172 failsafe_eth_rmv_event_callback,
175 WARN("Failed to register RMV callback for sub_device %d",
178 dev->data->dev_conf.intr_conf.rmv = 0;
180 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
181 RTE_ETH_EVENT_INTR_LSC,
182 failsafe_eth_lsc_event_callback,
185 WARN("Failed to register LSC callback for sub_device %d",
188 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
189 sdev->state = DEV_ACTIVE;
191 if (PRIV(dev)->state < DEV_ACTIVE)
192 PRIV(dev)->state = DEV_ACTIVE;
197 fs_dev_start(struct rte_eth_dev *dev)
199 struct sub_device *sdev;
203 ret = failsafe_rx_intr_install(dev);
206 FOREACH_SUBDEV(sdev, i, dev) {
207 if (sdev->state != DEV_ACTIVE)
209 DEBUG("Starting sub_device %d", i);
210 ret = rte_eth_dev_start(PORT_ID(sdev));
212 if (!fs_err(sdev, ret))
216 ret = failsafe_rx_intr_install_subdevice(sdev);
218 if (!fs_err(sdev, ret))
220 rte_eth_dev_stop(PORT_ID(sdev));
223 sdev->state = DEV_STARTED;
225 if (PRIV(dev)->state < DEV_STARTED)
226 PRIV(dev)->state = DEV_STARTED;
227 fs_switch_dev(dev, NULL);
232 fs_dev_stop(struct rte_eth_dev *dev)
234 struct sub_device *sdev;
237 PRIV(dev)->state = DEV_STARTED - 1;
238 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
239 rte_eth_dev_stop(PORT_ID(sdev));
240 failsafe_rx_intr_uninstall_subdevice(sdev);
241 sdev->state = DEV_STARTED - 1;
243 failsafe_rx_intr_uninstall(dev);
247 fs_dev_set_link_up(struct rte_eth_dev *dev)
249 struct sub_device *sdev;
253 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
254 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
255 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
256 if ((ret = fs_err(sdev, ret))) {
257 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
258 " with error %d", i, ret);
266 fs_dev_set_link_down(struct rte_eth_dev *dev)
268 struct sub_device *sdev;
272 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
273 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
274 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
275 if ((ret = fs_err(sdev, ret))) {
276 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
277 " with error %d", i, ret);
284 static void fs_dev_free_queues(struct rte_eth_dev *dev);
286 fs_dev_close(struct rte_eth_dev *dev)
288 struct sub_device *sdev;
291 failsafe_hotplug_alarm_cancel(dev);
292 if (PRIV(dev)->state == DEV_STARTED)
293 dev->dev_ops->dev_stop(dev);
294 PRIV(dev)->state = DEV_ACTIVE - 1;
295 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
296 DEBUG("Closing sub_device %d", i);
297 rte_eth_dev_close(PORT_ID(sdev));
298 sdev->state = DEV_ACTIVE - 1;
300 fs_dev_free_queues(dev);
304 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
306 uint64_t port_offloads;
307 uint64_t queue_supp_offloads;
308 uint64_t port_supp_offloads;
310 port_offloads = dev->data->dev_conf.rxmode.offloads;
311 queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
312 port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
313 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
316 /* Verify we have no conflict with port offloads */
317 if ((port_offloads ^ offloads) & port_supp_offloads)
323 fs_rx_queue_release(void *queue)
325 struct rte_eth_dev *dev;
326 struct sub_device *sdev;
333 if (rxq->event_fd > 0)
334 close(rxq->event_fd);
335 dev = rxq->priv->dev;
336 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
337 SUBOPS(sdev, rx_queue_release)
338 (ETH(sdev)->data->rx_queues[rxq->qid]);
339 dev->data->rx_queues[rxq->qid] = NULL;
344 fs_rx_queue_setup(struct rte_eth_dev *dev,
345 uint16_t rx_queue_id,
347 unsigned int socket_id,
348 const struct rte_eth_rxconf *rx_conf,
349 struct rte_mempool *mb_pool)
352 * FIXME: Add a proper interface in rte_eal_interrupts for
353 * allocating eventfd as an interrupt vector.
354 * For the time being, fake as if we are using MSIX interrupts,
355 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
357 struct rte_intr_handle intr_handle = {
358 .type = RTE_INTR_HANDLE_VFIO_MSIX,
361 struct sub_device *sdev;
366 rxq = dev->data->rx_queues[rx_queue_id];
368 fs_rx_queue_release(rxq);
369 dev->data->rx_queues[rx_queue_id] = NULL;
371 /* Verify application offloads are valid for our port and queue. */
372 if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
374 ERROR("Rx queue offloads 0x%" PRIx64
375 " don't match port offloads 0x%" PRIx64
376 " or supported offloads 0x%" PRIx64,
378 dev->data->dev_conf.rxmode.offloads,
379 PRIV(dev)->infos.rx_offload_capa |
380 PRIV(dev)->infos.rx_queue_offload_capa);
383 rxq = rte_zmalloc(NULL,
385 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
386 RTE_CACHE_LINE_SIZE);
389 FOREACH_SUBDEV(sdev, i, dev)
390 rte_atomic64_init(&rxq->refcnt[i]);
391 rxq->qid = rx_queue_id;
392 rxq->socket_id = socket_id;
393 rxq->info.mp = mb_pool;
394 rxq->info.conf = *rx_conf;
395 rxq->info.nb_desc = nb_rx_desc;
396 rxq->priv = PRIV(dev);
397 rxq->sdev = PRIV(dev)->subs;
398 ret = rte_intr_efd_enable(&intr_handle, 1);
401 rxq->event_fd = intr_handle.efds[0];
402 dev->data->rx_queues[rx_queue_id] = rxq;
403 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
404 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
406 nb_rx_desc, socket_id,
408 if ((ret = fs_err(sdev, ret))) {
409 ERROR("RX queue setup failed for sub_device %d", i);
415 fs_rx_queue_release(rxq);
420 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
423 struct sub_device *sdev;
428 if (idx >= dev->data->nb_rx_queues) {
432 rxq = dev->data->rx_queues[idx];
433 if (rxq == NULL || rxq->event_fd <= 0) {
437 rxq->enable_events = 1;
438 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
439 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
440 ret = fs_err(sdev, ret);
450 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
453 struct sub_device *sdev;
459 if (idx >= dev->data->nb_rx_queues) {
463 rxq = dev->data->rx_queues[idx];
464 if (rxq == NULL || rxq->event_fd <= 0) {
468 rxq->enable_events = 0;
469 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
470 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
471 ret = fs_err(sdev, ret);
475 /* Clear pending events */
476 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
484 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
486 uint64_t port_offloads;
487 uint64_t queue_supp_offloads;
488 uint64_t port_supp_offloads;
490 port_offloads = dev->data->dev_conf.txmode.offloads;
491 queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
492 port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
493 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
496 /* Verify we have no conflict with port offloads */
497 if ((port_offloads ^ offloads) & port_supp_offloads)
503 fs_tx_queue_release(void *queue)
505 struct rte_eth_dev *dev;
506 struct sub_device *sdev;
513 dev = txq->priv->dev;
514 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
515 SUBOPS(sdev, tx_queue_release)
516 (ETH(sdev)->data->tx_queues[txq->qid]);
517 dev->data->tx_queues[txq->qid] = NULL;
522 fs_tx_queue_setup(struct rte_eth_dev *dev,
523 uint16_t tx_queue_id,
525 unsigned int socket_id,
526 const struct rte_eth_txconf *tx_conf)
528 struct sub_device *sdev;
533 txq = dev->data->tx_queues[tx_queue_id];
535 fs_tx_queue_release(txq);
536 dev->data->tx_queues[tx_queue_id] = NULL;
539 * Don't verify queue offloads for applications which
542 if (tx_conf != NULL &&
543 (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
544 fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
546 ERROR("Tx queue offloads 0x%" PRIx64
547 " don't match port offloads 0x%" PRIx64
548 " or supported offloads 0x%" PRIx64,
550 dev->data->dev_conf.txmode.offloads,
551 PRIV(dev)->infos.tx_offload_capa |
552 PRIV(dev)->infos.tx_queue_offload_capa);
555 txq = rte_zmalloc("ethdev TX queue",
557 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
558 RTE_CACHE_LINE_SIZE);
561 FOREACH_SUBDEV(sdev, i, dev)
562 rte_atomic64_init(&txq->refcnt[i]);
563 txq->qid = tx_queue_id;
564 txq->socket_id = socket_id;
565 txq->info.conf = *tx_conf;
566 txq->info.nb_desc = nb_tx_desc;
567 txq->priv = PRIV(dev);
568 dev->data->tx_queues[tx_queue_id] = txq;
569 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
570 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
572 nb_tx_desc, socket_id,
574 if ((ret = fs_err(sdev, ret))) {
575 ERROR("TX queue setup failed for sub_device %d", i);
581 fs_tx_queue_release(txq);
586 fs_dev_free_queues(struct rte_eth_dev *dev)
590 for (i = 0; i < dev->data->nb_rx_queues; i++) {
591 fs_rx_queue_release(dev->data->rx_queues[i]);
592 dev->data->rx_queues[i] = NULL;
594 dev->data->nb_rx_queues = 0;
595 for (i = 0; i < dev->data->nb_tx_queues; i++) {
596 fs_tx_queue_release(dev->data->tx_queues[i]);
597 dev->data->tx_queues[i] = NULL;
599 dev->data->nb_tx_queues = 0;
603 fs_promiscuous_enable(struct rte_eth_dev *dev)
605 struct sub_device *sdev;
608 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
609 rte_eth_promiscuous_enable(PORT_ID(sdev));
613 fs_promiscuous_disable(struct rte_eth_dev *dev)
615 struct sub_device *sdev;
618 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
619 rte_eth_promiscuous_disable(PORT_ID(sdev));
623 fs_allmulticast_enable(struct rte_eth_dev *dev)
625 struct sub_device *sdev;
628 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
629 rte_eth_allmulticast_enable(PORT_ID(sdev));
633 fs_allmulticast_disable(struct rte_eth_dev *dev)
635 struct sub_device *sdev;
638 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
639 rte_eth_allmulticast_disable(PORT_ID(sdev));
643 fs_link_update(struct rte_eth_dev *dev,
644 int wait_to_complete)
646 struct sub_device *sdev;
650 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
651 DEBUG("Calling link_update on sub_device %d", i);
652 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
653 if (ret && ret != -1 && sdev->remove == 0 &&
654 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
655 ERROR("Link update failed for sub_device %d with error %d",
660 if (TX_SUBDEV(dev)) {
661 struct rte_eth_link *l1;
662 struct rte_eth_link *l2;
664 l1 = &dev->data->dev_link;
665 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
666 if (memcmp(l1, l2, sizeof(*l1))) {
675 fs_stats_get(struct rte_eth_dev *dev,
676 struct rte_eth_stats *stats)
678 struct rte_eth_stats backup;
679 struct sub_device *sdev;
683 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
684 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
685 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
686 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
688 rte_memcpy(&backup, snapshot, sizeof(backup));
689 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
691 if (!fs_err(sdev, ret)) {
692 rte_memcpy(snapshot, &backup, sizeof(backup));
695 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
700 *timestamp = rte_rdtsc();
702 failsafe_stats_increment(stats, snapshot);
708 fs_stats_reset(struct rte_eth_dev *dev)
710 struct sub_device *sdev;
713 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
714 rte_eth_stats_reset(PORT_ID(sdev));
715 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
717 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
721 * Fail-safe dev_infos_get rules:
725 * Use the maximum possible values for any field, so as not
726 * to impede any further configuration effort.
728 * Limits capabilities to those that are understood by the
729 * fail-safe PMD. This understanding stems from the fail-safe
730 * being capable of verifying that the related capability is
731 * expressed within the device configuration (struct rte_eth_conf).
733 * At least one probed sub_device:
735 * Uses values from the active probed sub_device
736 * The rationale here is that if any sub_device is less capable
737 * (for example concerning the number of queues) than the active
738 * sub_device, then its subsequent configuration will fail.
739 * It is impossible to foresee this failure when the failing sub_device
740 * is supposed to be plugged-in later on, so the configuration process
741 * is the single point of failure and error reporting.
743 * Uses a logical AND of RX capabilities among
744 * all sub_devices and the default capabilities.
745 * Uses a logical AND of TX capabilities among
746 * the active probed sub_device and the default capabilities.
750 fs_dev_infos_get(struct rte_eth_dev *dev,
751 struct rte_eth_dev_info *infos)
753 struct sub_device *sdev;
756 sdev = TX_SUBDEV(dev);
758 DEBUG("No probed device, using default infos");
759 rte_memcpy(&PRIV(dev)->infos, &default_infos,
760 sizeof(default_infos));
762 uint64_t rx_offload_capa;
763 uint64_t rxq_offload_capa;
765 rx_offload_capa = default_infos.rx_offload_capa;
766 rxq_offload_capa = default_infos.rx_queue_offload_capa;
767 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
768 rte_eth_dev_info_get(PORT_ID(sdev),
770 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
772 PRIV(dev)->infos.rx_queue_offload_capa;
774 sdev = TX_SUBDEV(dev);
775 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
776 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
777 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
778 PRIV(dev)->infos.tx_offload_capa &=
779 default_infos.tx_offload_capa;
780 PRIV(dev)->infos.tx_queue_offload_capa &=
781 default_infos.tx_queue_offload_capa;
782 PRIV(dev)->infos.flow_type_rss_offloads &=
783 default_infos.flow_type_rss_offloads;
785 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
788 static const uint32_t *
789 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
791 struct sub_device *sdev;
792 struct rte_eth_dev *edev;
794 sdev = TX_SUBDEV(dev);
798 /* ENOTSUP: counts as no supported ptypes */
799 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
802 * The API does not permit to do a clean AND of all ptypes,
803 * It is also incomplete by design and we do not really care
804 * to have a best possible value in this context.
805 * We just return the ptypes of the device of highest
806 * priority, usually the PREFERRED device.
808 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
812 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
814 struct sub_device *sdev;
818 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
819 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
820 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
821 if ((ret = fs_err(sdev, ret))) {
822 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
831 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
833 struct sub_device *sdev;
837 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
838 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
839 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
840 if ((ret = fs_err(sdev, ret))) {
841 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
842 " with error %d", i, ret);
850 fs_flow_ctrl_get(struct rte_eth_dev *dev,
851 struct rte_eth_fc_conf *fc_conf)
853 struct sub_device *sdev;
855 sdev = TX_SUBDEV(dev);
858 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
860 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
864 fs_flow_ctrl_set(struct rte_eth_dev *dev,
865 struct rte_eth_fc_conf *fc_conf)
867 struct sub_device *sdev;
871 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
872 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
873 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
874 if ((ret = fs_err(sdev, ret))) {
875 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
876 " with error %d", i, ret);
884 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
886 struct sub_device *sdev;
889 /* No check: already done within the rte_eth_dev_mac_addr_remove
890 * call for the fail-safe device.
892 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
893 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
894 &dev->data->mac_addrs[index]);
895 PRIV(dev)->mac_addr_pool[index] = 0;
899 fs_mac_addr_add(struct rte_eth_dev *dev,
900 struct ether_addr *mac_addr,
904 struct sub_device *sdev;
908 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
909 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
910 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
911 if ((ret = fs_err(sdev, ret))) {
912 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
913 PRIu8 " with error %d", i, ret);
917 if (index >= PRIV(dev)->nb_mac_addr) {
918 DEBUG("Growing mac_addrs array");
919 PRIV(dev)->nb_mac_addr = index;
921 PRIV(dev)->mac_addr_pool[index] = vmdq;
926 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
928 struct sub_device *sdev;
931 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
932 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
936 fs_filter_ctrl(struct rte_eth_dev *dev,
937 enum rte_filter_type type,
938 enum rte_filter_op op,
941 struct sub_device *sdev;
945 if (type == RTE_ETH_FILTER_GENERIC &&
946 op == RTE_ETH_FILTER_GET) {
947 *(const void **)arg = &fs_flow_ops;
950 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
951 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
952 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
953 if ((ret = fs_err(sdev, ret))) {
954 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
955 " with error %d", i, ret);
962 const struct eth_dev_ops failsafe_ops = {
963 .dev_configure = fs_dev_configure,
964 .dev_start = fs_dev_start,
965 .dev_stop = fs_dev_stop,
966 .dev_set_link_down = fs_dev_set_link_down,
967 .dev_set_link_up = fs_dev_set_link_up,
968 .dev_close = fs_dev_close,
969 .promiscuous_enable = fs_promiscuous_enable,
970 .promiscuous_disable = fs_promiscuous_disable,
971 .allmulticast_enable = fs_allmulticast_enable,
972 .allmulticast_disable = fs_allmulticast_disable,
973 .link_update = fs_link_update,
974 .stats_get = fs_stats_get,
975 .stats_reset = fs_stats_reset,
976 .dev_infos_get = fs_dev_infos_get,
977 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
978 .mtu_set = fs_mtu_set,
979 .vlan_filter_set = fs_vlan_filter_set,
980 .rx_queue_setup = fs_rx_queue_setup,
981 .tx_queue_setup = fs_tx_queue_setup,
982 .rx_queue_release = fs_rx_queue_release,
983 .tx_queue_release = fs_tx_queue_release,
984 .rx_queue_intr_enable = fs_rx_intr_enable,
985 .rx_queue_intr_disable = fs_rx_intr_disable,
986 .flow_ctrl_get = fs_flow_ctrl_get,
987 .flow_ctrl_set = fs_flow_ctrl_set,
988 .mac_addr_remove = fs_mac_addr_remove,
989 .mac_addr_add = fs_mac_addr_add,
990 .mac_addr_set = fs_mac_addr_set,
991 .filter_ctrl = fs_filter_ctrl,