4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_debug.h>
39 #include <rte_atomic.h>
40 #include <rte_ethdev_driver.h>
41 #include <rte_malloc.h>
43 #include <rte_cycles.h>
45 #include "failsafe_private.h"
47 static struct rte_eth_dev_info default_infos = {
48 /* Max possible number of elements */
49 .max_rx_pktlen = UINT32_MAX,
50 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
51 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
52 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
53 .max_hash_mac_addrs = UINT32_MAX,
54 .max_vfs = UINT16_MAX,
55 .max_vmdq_pools = UINT16_MAX,
60 .nb_seg_max = UINT16_MAX,
61 .nb_mtu_seg_max = UINT16_MAX,
67 .nb_seg_max = UINT16_MAX,
68 .nb_mtu_seg_max = UINT16_MAX,
71 * Set of capabilities that can be verified upon
72 * configuring a sub-device.
75 DEV_RX_OFFLOAD_VLAN_STRIP |
76 DEV_RX_OFFLOAD_IPV4_CKSUM |
77 DEV_RX_OFFLOAD_UDP_CKSUM |
78 DEV_RX_OFFLOAD_TCP_CKSUM |
79 DEV_RX_OFFLOAD_TCP_LRO |
80 DEV_RX_OFFLOAD_QINQ_STRIP |
81 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
82 DEV_RX_OFFLOAD_MACSEC_STRIP |
83 DEV_RX_OFFLOAD_HEADER_SPLIT |
84 DEV_RX_OFFLOAD_VLAN_FILTER |
85 DEV_RX_OFFLOAD_VLAN_EXTEND |
86 DEV_RX_OFFLOAD_JUMBO_FRAME |
87 DEV_RX_OFFLOAD_CRC_STRIP |
88 DEV_RX_OFFLOAD_SCATTER |
89 DEV_RX_OFFLOAD_TIMESTAMP |
90 DEV_RX_OFFLOAD_SECURITY,
91 .rx_queue_offload_capa =
92 DEV_RX_OFFLOAD_VLAN_STRIP |
93 DEV_RX_OFFLOAD_IPV4_CKSUM |
94 DEV_RX_OFFLOAD_UDP_CKSUM |
95 DEV_RX_OFFLOAD_TCP_CKSUM |
96 DEV_RX_OFFLOAD_TCP_LRO |
97 DEV_RX_OFFLOAD_QINQ_STRIP |
98 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
99 DEV_RX_OFFLOAD_MACSEC_STRIP |
100 DEV_RX_OFFLOAD_HEADER_SPLIT |
101 DEV_RX_OFFLOAD_VLAN_FILTER |
102 DEV_RX_OFFLOAD_VLAN_EXTEND |
103 DEV_RX_OFFLOAD_JUMBO_FRAME |
104 DEV_RX_OFFLOAD_CRC_STRIP |
105 DEV_RX_OFFLOAD_SCATTER |
106 DEV_RX_OFFLOAD_TIMESTAMP |
107 DEV_RX_OFFLOAD_SECURITY,
108 .tx_offload_capa = 0x0,
109 .flow_type_rss_offloads = 0x0,
113 fs_dev_configure(struct rte_eth_dev *dev)
115 struct sub_device *sdev;
116 uint64_t supp_tx_offloads;
117 uint64_t tx_offloads;
121 supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
122 tx_offloads = dev->data->dev_conf.txmode.offloads;
123 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
125 ERROR("Some Tx offloads are not supported, "
126 "requested 0x%" PRIx64 " supported 0x%" PRIx64,
127 tx_offloads, supp_tx_offloads);
130 FOREACH_SUBDEV(sdev, i, dev) {
131 int rmv_interrupt = 0;
132 int lsc_interrupt = 0;
135 if (sdev->state != DEV_PROBED)
138 rmv_interrupt = ETH(sdev)->data->dev_flags &
139 RTE_ETH_DEV_INTR_RMV;
141 DEBUG("Enabling RMV interrupts for sub_device %d", i);
142 dev->data->dev_conf.intr_conf.rmv = 1;
144 DEBUG("sub_device %d does not support RMV event", i);
146 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
147 lsc_interrupt = lsc_enabled &&
148 (ETH(sdev)->data->dev_flags &
149 RTE_ETH_DEV_INTR_LSC);
151 DEBUG("Enabling LSC interrupts for sub_device %d", i);
152 dev->data->dev_conf.intr_conf.lsc = 1;
153 } else if (lsc_enabled && !lsc_interrupt) {
154 DEBUG("Disabling LSC interrupts for sub_device %d", i);
155 dev->data->dev_conf.intr_conf.lsc = 0;
157 DEBUG("Configuring sub-device %d", i);
159 ret = rte_eth_dev_configure(PORT_ID(sdev),
160 dev->data->nb_rx_queues,
161 dev->data->nb_tx_queues,
162 &dev->data->dev_conf);
164 if (!fs_err(sdev, ret))
166 ERROR("Could not configure sub_device %d", i);
170 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
171 RTE_ETH_EVENT_INTR_RMV,
172 failsafe_eth_rmv_event_callback,
175 WARN("Failed to register RMV callback for sub_device %d",
178 dev->data->dev_conf.intr_conf.rmv = 0;
180 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
181 RTE_ETH_EVENT_INTR_LSC,
182 failsafe_eth_lsc_event_callback,
185 WARN("Failed to register LSC callback for sub_device %d",
188 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
189 sdev->state = DEV_ACTIVE;
191 if (PRIV(dev)->state < DEV_ACTIVE)
192 PRIV(dev)->state = DEV_ACTIVE;
197 fs_dev_start(struct rte_eth_dev *dev)
199 struct sub_device *sdev;
203 ret = failsafe_rx_intr_install(dev);
206 FOREACH_SUBDEV(sdev, i, dev) {
207 if (sdev->state != DEV_ACTIVE)
209 DEBUG("Starting sub_device %d", i);
210 ret = rte_eth_dev_start(PORT_ID(sdev));
212 if (!fs_err(sdev, ret))
216 sdev->state = DEV_STARTED;
218 if (PRIV(dev)->state < DEV_STARTED)
219 PRIV(dev)->state = DEV_STARTED;
220 fs_switch_dev(dev, NULL);
225 fs_dev_stop(struct rte_eth_dev *dev)
227 struct sub_device *sdev;
230 PRIV(dev)->state = DEV_STARTED - 1;
231 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
232 rte_eth_dev_stop(PORT_ID(sdev));
233 sdev->state = DEV_STARTED - 1;
235 failsafe_rx_intr_uninstall(dev);
239 fs_dev_set_link_up(struct rte_eth_dev *dev)
241 struct sub_device *sdev;
245 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
246 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
247 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
248 if ((ret = fs_err(sdev, ret))) {
249 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
250 " with error %d", i, ret);
258 fs_dev_set_link_down(struct rte_eth_dev *dev)
260 struct sub_device *sdev;
264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
265 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
266 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
267 if ((ret = fs_err(sdev, ret))) {
268 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
269 " with error %d", i, ret);
276 static void fs_dev_free_queues(struct rte_eth_dev *dev);
278 fs_dev_close(struct rte_eth_dev *dev)
280 struct sub_device *sdev;
283 failsafe_hotplug_alarm_cancel(dev);
284 if (PRIV(dev)->state == DEV_STARTED)
285 dev->dev_ops->dev_stop(dev);
286 PRIV(dev)->state = DEV_ACTIVE - 1;
287 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
288 DEBUG("Closing sub_device %d", i);
289 rte_eth_dev_close(PORT_ID(sdev));
290 sdev->state = DEV_ACTIVE - 1;
292 fs_dev_free_queues(dev);
296 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
298 uint64_t port_offloads;
299 uint64_t queue_supp_offloads;
300 uint64_t port_supp_offloads;
302 port_offloads = dev->data->dev_conf.rxmode.offloads;
303 queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
304 port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
305 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
308 /* Verify we have no conflict with port offloads */
309 if ((port_offloads ^ offloads) & port_supp_offloads)
315 fs_rx_queue_release(void *queue)
317 struct rte_eth_dev *dev;
318 struct sub_device *sdev;
325 if (rxq->event_fd > 0)
326 close(rxq->event_fd);
327 dev = rxq->priv->dev;
328 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
329 SUBOPS(sdev, rx_queue_release)
330 (ETH(sdev)->data->rx_queues[rxq->qid]);
331 dev->data->rx_queues[rxq->qid] = NULL;
336 fs_rx_queue_setup(struct rte_eth_dev *dev,
337 uint16_t rx_queue_id,
339 unsigned int socket_id,
340 const struct rte_eth_rxconf *rx_conf,
341 struct rte_mempool *mb_pool)
344 * FIXME: Add a proper interface in rte_eal_interrupts for
345 * allocating eventfd as an interrupt vector.
346 * For the time being, fake as if we are using MSIX interrupts,
347 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
349 struct rte_intr_handle intr_handle = {
350 .type = RTE_INTR_HANDLE_VFIO_MSIX,
353 struct sub_device *sdev;
358 rxq = dev->data->rx_queues[rx_queue_id];
360 fs_rx_queue_release(rxq);
361 dev->data->rx_queues[rx_queue_id] = NULL;
363 /* Verify application offloads are valid for our port and queue. */
364 if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
366 ERROR("Rx queue offloads 0x%" PRIx64
367 " don't match port offloads 0x%" PRIx64
368 " or supported offloads 0x%" PRIx64,
370 dev->data->dev_conf.rxmode.offloads,
371 PRIV(dev)->infos.rx_offload_capa |
372 PRIV(dev)->infos.rx_queue_offload_capa);
375 rxq = rte_zmalloc(NULL,
377 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
378 RTE_CACHE_LINE_SIZE);
381 FOREACH_SUBDEV(sdev, i, dev)
382 rte_atomic64_init(&rxq->refcnt[i]);
383 rxq->qid = rx_queue_id;
384 rxq->socket_id = socket_id;
385 rxq->info.mp = mb_pool;
386 rxq->info.conf = *rx_conf;
387 rxq->info.nb_desc = nb_rx_desc;
388 rxq->priv = PRIV(dev);
389 rxq->sdev = PRIV(dev)->subs;
390 ret = rte_intr_efd_enable(&intr_handle, 1);
393 rxq->event_fd = intr_handle.efds[0];
394 dev->data->rx_queues[rx_queue_id] = rxq;
395 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
396 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
398 nb_rx_desc, socket_id,
400 if ((ret = fs_err(sdev, ret))) {
401 ERROR("RX queue setup failed for sub_device %d", i);
407 fs_rx_queue_release(rxq);
412 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
416 if (idx >= dev->data->nb_rx_queues) {
420 rxq = dev->data->rx_queues[idx];
421 if (rxq == NULL || rxq->event_fd <= 0) {
425 rxq->enable_events = 1;
430 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
435 if (idx >= dev->data->nb_rx_queues) {
439 rxq = dev->data->rx_queues[idx];
440 if (rxq == NULL || rxq->event_fd <= 0) {
444 rxq->enable_events = 0;
445 /* Clear pending events */
446 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
452 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
454 uint64_t port_offloads;
455 uint64_t queue_supp_offloads;
456 uint64_t port_supp_offloads;
458 port_offloads = dev->data->dev_conf.txmode.offloads;
459 queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
460 port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
461 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
464 /* Verify we have no conflict with port offloads */
465 if ((port_offloads ^ offloads) & port_supp_offloads)
471 fs_tx_queue_release(void *queue)
473 struct rte_eth_dev *dev;
474 struct sub_device *sdev;
481 dev = txq->priv->dev;
482 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
483 SUBOPS(sdev, tx_queue_release)
484 (ETH(sdev)->data->tx_queues[txq->qid]);
485 dev->data->tx_queues[txq->qid] = NULL;
490 fs_tx_queue_setup(struct rte_eth_dev *dev,
491 uint16_t tx_queue_id,
493 unsigned int socket_id,
494 const struct rte_eth_txconf *tx_conf)
496 struct sub_device *sdev;
501 txq = dev->data->tx_queues[tx_queue_id];
503 fs_tx_queue_release(txq);
504 dev->data->tx_queues[tx_queue_id] = NULL;
507 * Don't verify queue offloads for applications which
510 if (tx_conf != NULL &&
511 (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
512 fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
514 ERROR("Tx queue offloads 0x%" PRIx64
515 " don't match port offloads 0x%" PRIx64
516 " or supported offloads 0x%" PRIx64,
518 dev->data->dev_conf.txmode.offloads,
519 PRIV(dev)->infos.tx_offload_capa |
520 PRIV(dev)->infos.tx_queue_offload_capa);
523 txq = rte_zmalloc("ethdev TX queue",
525 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
526 RTE_CACHE_LINE_SIZE);
529 FOREACH_SUBDEV(sdev, i, dev)
530 rte_atomic64_init(&txq->refcnt[i]);
531 txq->qid = tx_queue_id;
532 txq->socket_id = socket_id;
533 txq->info.conf = *tx_conf;
534 txq->info.nb_desc = nb_tx_desc;
535 txq->priv = PRIV(dev);
536 dev->data->tx_queues[tx_queue_id] = txq;
537 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
538 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
540 nb_tx_desc, socket_id,
542 if ((ret = fs_err(sdev, ret))) {
543 ERROR("TX queue setup failed for sub_device %d", i);
549 fs_tx_queue_release(txq);
554 fs_dev_free_queues(struct rte_eth_dev *dev)
558 for (i = 0; i < dev->data->nb_rx_queues; i++) {
559 fs_rx_queue_release(dev->data->rx_queues[i]);
560 dev->data->rx_queues[i] = NULL;
562 dev->data->nb_rx_queues = 0;
563 for (i = 0; i < dev->data->nb_tx_queues; i++) {
564 fs_tx_queue_release(dev->data->tx_queues[i]);
565 dev->data->tx_queues[i] = NULL;
567 dev->data->nb_tx_queues = 0;
571 fs_promiscuous_enable(struct rte_eth_dev *dev)
573 struct sub_device *sdev;
576 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
577 rte_eth_promiscuous_enable(PORT_ID(sdev));
581 fs_promiscuous_disable(struct rte_eth_dev *dev)
583 struct sub_device *sdev;
586 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
587 rte_eth_promiscuous_disable(PORT_ID(sdev));
591 fs_allmulticast_enable(struct rte_eth_dev *dev)
593 struct sub_device *sdev;
596 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
597 rte_eth_allmulticast_enable(PORT_ID(sdev));
601 fs_allmulticast_disable(struct rte_eth_dev *dev)
603 struct sub_device *sdev;
606 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
607 rte_eth_allmulticast_disable(PORT_ID(sdev));
611 fs_link_update(struct rte_eth_dev *dev,
612 int wait_to_complete)
614 struct sub_device *sdev;
618 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
619 DEBUG("Calling link_update on sub_device %d", i);
620 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
621 if (ret && ret != -1 && sdev->remove == 0 &&
622 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
623 ERROR("Link update failed for sub_device %d with error %d",
628 if (TX_SUBDEV(dev)) {
629 struct rte_eth_link *l1;
630 struct rte_eth_link *l2;
632 l1 = &dev->data->dev_link;
633 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
634 if (memcmp(l1, l2, sizeof(*l1))) {
643 fs_stats_get(struct rte_eth_dev *dev,
644 struct rte_eth_stats *stats)
646 struct rte_eth_stats backup;
647 struct sub_device *sdev;
651 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
652 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
653 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
654 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
656 rte_memcpy(&backup, snapshot, sizeof(backup));
657 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
659 if (!fs_err(sdev, ret)) {
660 rte_memcpy(snapshot, &backup, sizeof(backup));
663 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
668 *timestamp = rte_rdtsc();
670 failsafe_stats_increment(stats, snapshot);
676 fs_stats_reset(struct rte_eth_dev *dev)
678 struct sub_device *sdev;
681 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
682 rte_eth_stats_reset(PORT_ID(sdev));
683 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
685 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
689 * Fail-safe dev_infos_get rules:
693 * Use the maximum possible values for any field, so as not
694 * to impede any further configuration effort.
696 * Limits capabilities to those that are understood by the
697 * fail-safe PMD. This understanding stems from the fail-safe
698 * being capable of verifying that the related capability is
699 * expressed within the device configuration (struct rte_eth_conf).
701 * At least one probed sub_device:
703 * Uses values from the active probed sub_device
704 * The rationale here is that if any sub_device is less capable
705 * (for example concerning the number of queues) than the active
706 * sub_device, then its subsequent configuration will fail.
707 * It is impossible to foresee this failure when the failing sub_device
708 * is supposed to be plugged-in later on, so the configuration process
709 * is the single point of failure and error reporting.
711 * Uses a logical AND of RX capabilities among
712 * all sub_devices and the default capabilities.
713 * Uses a logical AND of TX capabilities among
714 * the active probed sub_device and the default capabilities.
718 fs_dev_infos_get(struct rte_eth_dev *dev,
719 struct rte_eth_dev_info *infos)
721 struct sub_device *sdev;
724 sdev = TX_SUBDEV(dev);
726 DEBUG("No probed device, using default infos");
727 rte_memcpy(&PRIV(dev)->infos, &default_infos,
728 sizeof(default_infos));
730 uint64_t rx_offload_capa;
731 uint64_t rxq_offload_capa;
733 rx_offload_capa = default_infos.rx_offload_capa;
734 rxq_offload_capa = default_infos.rx_queue_offload_capa;
735 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
736 rte_eth_dev_info_get(PORT_ID(sdev),
738 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
740 PRIV(dev)->infos.rx_queue_offload_capa;
742 sdev = TX_SUBDEV(dev);
743 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
744 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
745 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
746 PRIV(dev)->infos.tx_offload_capa &=
747 default_infos.tx_offload_capa;
748 PRIV(dev)->infos.tx_queue_offload_capa &=
749 default_infos.tx_queue_offload_capa;
750 PRIV(dev)->infos.flow_type_rss_offloads &=
751 default_infos.flow_type_rss_offloads;
753 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
756 static const uint32_t *
757 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
759 struct sub_device *sdev;
760 struct rte_eth_dev *edev;
762 sdev = TX_SUBDEV(dev);
766 /* ENOTSUP: counts as no supported ptypes */
767 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
770 * The API does not permit to do a clean AND of all ptypes,
771 * It is also incomplete by design and we do not really care
772 * to have a best possible value in this context.
773 * We just return the ptypes of the device of highest
774 * priority, usually the PREFERRED device.
776 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
780 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
782 struct sub_device *sdev;
786 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
787 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
788 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
789 if ((ret = fs_err(sdev, ret))) {
790 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
799 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
801 struct sub_device *sdev;
805 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
806 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
807 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
808 if ((ret = fs_err(sdev, ret))) {
809 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
810 " with error %d", i, ret);
818 fs_flow_ctrl_get(struct rte_eth_dev *dev,
819 struct rte_eth_fc_conf *fc_conf)
821 struct sub_device *sdev;
823 sdev = TX_SUBDEV(dev);
826 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
828 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
832 fs_flow_ctrl_set(struct rte_eth_dev *dev,
833 struct rte_eth_fc_conf *fc_conf)
835 struct sub_device *sdev;
839 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
840 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
841 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
842 if ((ret = fs_err(sdev, ret))) {
843 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
844 " with error %d", i, ret);
852 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
854 struct sub_device *sdev;
857 /* No check: already done within the rte_eth_dev_mac_addr_remove
858 * call for the fail-safe device.
860 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
861 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
862 &dev->data->mac_addrs[index]);
863 PRIV(dev)->mac_addr_pool[index] = 0;
867 fs_mac_addr_add(struct rte_eth_dev *dev,
868 struct ether_addr *mac_addr,
872 struct sub_device *sdev;
876 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
877 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
878 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
879 if ((ret = fs_err(sdev, ret))) {
880 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
881 PRIu8 " with error %d", i, ret);
885 if (index >= PRIV(dev)->nb_mac_addr) {
886 DEBUG("Growing mac_addrs array");
887 PRIV(dev)->nb_mac_addr = index;
889 PRIV(dev)->mac_addr_pool[index] = vmdq;
894 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
896 struct sub_device *sdev;
899 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
900 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
904 fs_filter_ctrl(struct rte_eth_dev *dev,
905 enum rte_filter_type type,
906 enum rte_filter_op op,
909 struct sub_device *sdev;
913 if (type == RTE_ETH_FILTER_GENERIC &&
914 op == RTE_ETH_FILTER_GET) {
915 *(const void **)arg = &fs_flow_ops;
918 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
919 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
920 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
921 if ((ret = fs_err(sdev, ret))) {
922 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
923 " with error %d", i, ret);
930 const struct eth_dev_ops failsafe_ops = {
931 .dev_configure = fs_dev_configure,
932 .dev_start = fs_dev_start,
933 .dev_stop = fs_dev_stop,
934 .dev_set_link_down = fs_dev_set_link_down,
935 .dev_set_link_up = fs_dev_set_link_up,
936 .dev_close = fs_dev_close,
937 .promiscuous_enable = fs_promiscuous_enable,
938 .promiscuous_disable = fs_promiscuous_disable,
939 .allmulticast_enable = fs_allmulticast_enable,
940 .allmulticast_disable = fs_allmulticast_disable,
941 .link_update = fs_link_update,
942 .stats_get = fs_stats_get,
943 .stats_reset = fs_stats_reset,
944 .dev_infos_get = fs_dev_infos_get,
945 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
946 .mtu_set = fs_mtu_set,
947 .vlan_filter_set = fs_vlan_filter_set,
948 .rx_queue_setup = fs_rx_queue_setup,
949 .tx_queue_setup = fs_tx_queue_setup,
950 .rx_queue_release = fs_rx_queue_release,
951 .tx_queue_release = fs_tx_queue_release,
952 .rx_queue_intr_enable = fs_rx_intr_enable,
953 .rx_queue_intr_disable = fs_rx_intr_disable,
954 .flow_ctrl_get = fs_flow_ctrl_get,
955 .flow_ctrl_set = fs_flow_ctrl_set,
956 .mac_addr_remove = fs_mac_addr_remove,
957 .mac_addr_add = fs_mac_addr_add,
958 .mac_addr_set = fs_mac_addr_set,
959 .filter_ctrl = fs_filter_ctrl,