1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
18 #include "failsafe_private.h"
20 static struct rte_eth_dev_info default_infos = {
21 /* Max possible number of elements */
22 .max_rx_pktlen = UINT32_MAX,
23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
26 .max_hash_mac_addrs = UINT32_MAX,
27 .max_vfs = UINT16_MAX,
28 .max_vmdq_pools = UINT16_MAX,
33 .nb_seg_max = UINT16_MAX,
34 .nb_mtu_seg_max = UINT16_MAX,
40 .nb_seg_max = UINT16_MAX,
41 .nb_mtu_seg_max = UINT16_MAX,
44 * Set of capabilities that can be verified upon
45 * configuring a sub-device.
48 DEV_RX_OFFLOAD_VLAN_STRIP |
49 DEV_RX_OFFLOAD_IPV4_CKSUM |
50 DEV_RX_OFFLOAD_UDP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_CKSUM |
52 DEV_RX_OFFLOAD_TCP_LRO |
53 DEV_RX_OFFLOAD_QINQ_STRIP |
54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
55 DEV_RX_OFFLOAD_MACSEC_STRIP |
56 DEV_RX_OFFLOAD_HEADER_SPLIT |
57 DEV_RX_OFFLOAD_VLAN_FILTER |
58 DEV_RX_OFFLOAD_VLAN_EXTEND |
59 DEV_RX_OFFLOAD_JUMBO_FRAME |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_SCATTER |
77 DEV_RX_OFFLOAD_TIMESTAMP |
78 DEV_RX_OFFLOAD_SECURITY,
80 DEV_TX_OFFLOAD_MULTI_SEGS |
81 DEV_TX_OFFLOAD_IPV4_CKSUM |
82 DEV_TX_OFFLOAD_UDP_CKSUM |
83 DEV_TX_OFFLOAD_TCP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_TSO,
85 .flow_type_rss_offloads =
92 fs_dev_configure(struct rte_eth_dev *dev)
94 struct sub_device *sdev;
99 FOREACH_SUBDEV(sdev, i, dev) {
100 int rmv_interrupt = 0;
101 int lsc_interrupt = 0;
104 if (sdev->state != DEV_PROBED &&
105 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
108 rmv_interrupt = ETH(sdev)->data->dev_flags &
109 RTE_ETH_DEV_INTR_RMV;
111 DEBUG("Enabling RMV interrupts for sub_device %d", i);
112 dev->data->dev_conf.intr_conf.rmv = 1;
114 DEBUG("sub_device %d does not support RMV event", i);
116 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
117 lsc_interrupt = lsc_enabled &&
118 (ETH(sdev)->data->dev_flags &
119 RTE_ETH_DEV_INTR_LSC);
121 DEBUG("Enabling LSC interrupts for sub_device %d", i);
122 dev->data->dev_conf.intr_conf.lsc = 1;
123 } else if (lsc_enabled && !lsc_interrupt) {
124 DEBUG("Disabling LSC interrupts for sub_device %d", i);
125 dev->data->dev_conf.intr_conf.lsc = 0;
127 DEBUG("Configuring sub-device %d", i);
128 ret = rte_eth_dev_configure(PORT_ID(sdev),
129 dev->data->nb_rx_queues,
130 dev->data->nb_tx_queues,
131 &dev->data->dev_conf);
133 if (!fs_err(sdev, ret))
135 ERROR("Could not configure sub_device %d", i);
139 if (rmv_interrupt && sdev->rmv_callback == 0) {
140 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
141 RTE_ETH_EVENT_INTR_RMV,
142 failsafe_eth_rmv_event_callback,
145 WARN("Failed to register RMV callback for sub_device %d",
148 sdev->rmv_callback = 1;
150 dev->data->dev_conf.intr_conf.rmv = 0;
151 if (lsc_interrupt && sdev->lsc_callback == 0) {
152 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
153 RTE_ETH_EVENT_INTR_LSC,
154 failsafe_eth_lsc_event_callback,
157 WARN("Failed to register LSC callback for sub_device %d",
160 sdev->lsc_callback = 1;
162 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
163 sdev->state = DEV_ACTIVE;
165 if (PRIV(dev)->state < DEV_ACTIVE)
166 PRIV(dev)->state = DEV_ACTIVE;
172 fs_set_queues_state_start(struct rte_eth_dev *dev)
177 for (i = 0; i < dev->data->nb_rx_queues; i++) {
178 rxq = dev->data->rx_queues[i];
179 if (!rxq->info.conf.rx_deferred_start)
180 dev->data->rx_queue_state[i] =
181 RTE_ETH_QUEUE_STATE_STARTED;
186 fs_dev_start(struct rte_eth_dev *dev)
188 struct sub_device *sdev;
193 ret = failsafe_rx_intr_install(dev);
198 FOREACH_SUBDEV(sdev, i, dev) {
199 if (sdev->state != DEV_ACTIVE)
201 DEBUG("Starting sub_device %d", i);
202 ret = rte_eth_dev_start(PORT_ID(sdev));
204 if (!fs_err(sdev, ret))
209 ret = failsafe_rx_intr_install_subdevice(sdev);
211 if (!fs_err(sdev, ret))
213 rte_eth_dev_stop(PORT_ID(sdev));
217 sdev->state = DEV_STARTED;
219 if (PRIV(dev)->state < DEV_STARTED) {
220 PRIV(dev)->state = DEV_STARTED;
221 fs_set_queues_state_start(dev);
223 fs_switch_dev(dev, NULL);
229 fs_set_queues_state_stop(struct rte_eth_dev *dev)
233 for (i = 0; i < dev->data->nb_rx_queues; i++)
234 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
238 fs_dev_stop(struct rte_eth_dev *dev)
240 struct sub_device *sdev;
244 PRIV(dev)->state = DEV_STARTED - 1;
245 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
246 rte_eth_dev_stop(PORT_ID(sdev));
247 failsafe_rx_intr_uninstall_subdevice(sdev);
248 sdev->state = DEV_STARTED - 1;
250 failsafe_rx_intr_uninstall(dev);
251 fs_set_queues_state_stop(dev);
256 fs_dev_set_link_up(struct rte_eth_dev *dev)
258 struct sub_device *sdev;
263 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
264 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
265 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
266 if ((ret = fs_err(sdev, ret))) {
267 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
268 " with error %d", i, ret);
278 fs_dev_set_link_down(struct rte_eth_dev *dev)
280 struct sub_device *sdev;
285 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
286 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
287 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
288 if ((ret = fs_err(sdev, ret))) {
289 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
290 " with error %d", i, ret);
299 static void fs_dev_free_queues(struct rte_eth_dev *dev);
301 fs_dev_close(struct rte_eth_dev *dev)
303 struct sub_device *sdev;
307 failsafe_hotplug_alarm_cancel(dev);
308 if (PRIV(dev)->state == DEV_STARTED)
309 dev->dev_ops->dev_stop(dev);
310 PRIV(dev)->state = DEV_ACTIVE - 1;
311 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
312 DEBUG("Closing sub_device %d", i);
313 failsafe_eth_dev_unregister_callbacks(sdev);
314 rte_eth_dev_close(PORT_ID(sdev));
315 sdev->state = DEV_ACTIVE - 1;
317 fs_dev_free_queues(dev);
322 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
324 struct sub_device *sdev;
331 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
332 uint16_t port_id = ETH(sdev)->data->port_id;
334 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
335 ret = fs_err(sdev, ret);
337 ERROR("Rx queue stop failed for subdevice %d", i);
343 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
345 /* Return 0 in case of at least one successful queue stop */
346 return (failure) ? err : 0;
350 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
352 struct sub_device *sdev;
357 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
358 uint16_t port_id = ETH(sdev)->data->port_id;
360 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
361 ret = fs_err(sdev, ret);
363 ERROR("Rx queue start failed for subdevice %d", i);
364 fs_rx_queue_stop(dev, rx_queue_id);
369 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
375 fs_rx_queue_release(void *queue)
377 struct rte_eth_dev *dev;
378 struct sub_device *sdev;
385 dev = rxq->priv->dev;
387 if (rxq->event_fd > 0)
388 close(rxq->event_fd);
389 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
390 if (ETH(sdev)->data->rx_queues != NULL &&
391 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
392 SUBOPS(sdev, rx_queue_release)
393 (ETH(sdev)->data->rx_queues[rxq->qid]);
396 dev->data->rx_queues[rxq->qid] = NULL;
402 fs_rx_queue_setup(struct rte_eth_dev *dev,
403 uint16_t rx_queue_id,
405 unsigned int socket_id,
406 const struct rte_eth_rxconf *rx_conf,
407 struct rte_mempool *mb_pool)
410 * FIXME: Add a proper interface in rte_eal_interrupts for
411 * allocating eventfd as an interrupt vector.
412 * For the time being, fake as if we are using MSIX interrupts,
413 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
415 struct rte_intr_handle intr_handle = {
416 .type = RTE_INTR_HANDLE_VFIO_MSIX,
419 struct sub_device *sdev;
425 if (rx_conf->rx_deferred_start) {
426 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
427 if (SUBOPS(sdev, rx_queue_start) == NULL) {
428 ERROR("Rx queue deferred start is not "
429 "supported for subdevice %d", i);
435 rxq = dev->data->rx_queues[rx_queue_id];
437 fs_rx_queue_release(rxq);
438 dev->data->rx_queues[rx_queue_id] = NULL;
440 rxq = rte_zmalloc(NULL,
442 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
443 RTE_CACHE_LINE_SIZE);
448 FOREACH_SUBDEV(sdev, i, dev)
449 rte_atomic64_init(&rxq->refcnt[i]);
450 rxq->qid = rx_queue_id;
451 rxq->socket_id = socket_id;
452 rxq->info.mp = mb_pool;
453 rxq->info.conf = *rx_conf;
454 rxq->info.nb_desc = nb_rx_desc;
455 rxq->priv = PRIV(dev);
456 rxq->sdev = PRIV(dev)->subs;
457 ret = rte_intr_efd_enable(&intr_handle, 1);
462 rxq->event_fd = intr_handle.efds[0];
463 dev->data->rx_queues[rx_queue_id] = rxq;
464 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
465 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
467 nb_rx_desc, socket_id,
469 if ((ret = fs_err(sdev, ret))) {
470 ERROR("RX queue setup failed for sub_device %d", i);
477 fs_rx_queue_release(rxq);
483 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
486 struct sub_device *sdev;
492 if (idx >= dev->data->nb_rx_queues) {
496 rxq = dev->data->rx_queues[idx];
497 if (rxq == NULL || rxq->event_fd <= 0) {
501 /* Fail if proxy service is nor running. */
502 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
503 ERROR("failsafe interrupt services are not running");
507 rxq->enable_events = 1;
508 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
509 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
510 ret = fs_err(sdev, ret);
522 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
525 struct sub_device *sdev;
532 if (idx >= dev->data->nb_rx_queues) {
536 rxq = dev->data->rx_queues[idx];
537 if (rxq == NULL || rxq->event_fd <= 0) {
541 rxq->enable_events = 0;
542 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
543 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
544 ret = fs_err(sdev, ret);
548 /* Clear pending events */
549 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
559 fs_tx_queue_release(void *queue)
561 struct rte_eth_dev *dev;
562 struct sub_device *sdev;
569 dev = txq->priv->dev;
571 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
572 if (ETH(sdev)->data->tx_queues != NULL &&
573 ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
574 SUBOPS(sdev, tx_queue_release)
575 (ETH(sdev)->data->tx_queues[txq->qid]);
578 dev->data->tx_queues[txq->qid] = NULL;
584 fs_tx_queue_setup(struct rte_eth_dev *dev,
585 uint16_t tx_queue_id,
587 unsigned int socket_id,
588 const struct rte_eth_txconf *tx_conf)
590 struct sub_device *sdev;
595 if (tx_conf->tx_deferred_start) {
596 ERROR("Tx queue deferred start is not supported");
601 txq = dev->data->tx_queues[tx_queue_id];
603 fs_tx_queue_release(txq);
604 dev->data->tx_queues[tx_queue_id] = NULL;
606 txq = rte_zmalloc("ethdev TX queue",
608 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
609 RTE_CACHE_LINE_SIZE);
614 FOREACH_SUBDEV(sdev, i, dev)
615 rte_atomic64_init(&txq->refcnt[i]);
616 txq->qid = tx_queue_id;
617 txq->socket_id = socket_id;
618 txq->info.conf = *tx_conf;
619 txq->info.nb_desc = nb_tx_desc;
620 txq->priv = PRIV(dev);
621 dev->data->tx_queues[tx_queue_id] = txq;
622 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
623 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
625 nb_tx_desc, socket_id,
627 if ((ret = fs_err(sdev, ret))) {
628 ERROR("TX queue setup failed for sub_device %d", i);
635 fs_tx_queue_release(txq);
641 fs_dev_free_queues(struct rte_eth_dev *dev)
645 for (i = 0; i < dev->data->nb_rx_queues; i++) {
646 fs_rx_queue_release(dev->data->rx_queues[i]);
647 dev->data->rx_queues[i] = NULL;
649 dev->data->nb_rx_queues = 0;
650 for (i = 0; i < dev->data->nb_tx_queues; i++) {
651 fs_tx_queue_release(dev->data->tx_queues[i]);
652 dev->data->tx_queues[i] = NULL;
654 dev->data->nb_tx_queues = 0;
658 fs_promiscuous_enable(struct rte_eth_dev *dev)
660 struct sub_device *sdev;
664 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
665 rte_eth_promiscuous_enable(PORT_ID(sdev));
670 fs_promiscuous_disable(struct rte_eth_dev *dev)
672 struct sub_device *sdev;
676 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
677 rte_eth_promiscuous_disable(PORT_ID(sdev));
682 fs_allmulticast_enable(struct rte_eth_dev *dev)
684 struct sub_device *sdev;
688 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
689 rte_eth_allmulticast_enable(PORT_ID(sdev));
694 fs_allmulticast_disable(struct rte_eth_dev *dev)
696 struct sub_device *sdev;
700 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
701 rte_eth_allmulticast_disable(PORT_ID(sdev));
706 fs_link_update(struct rte_eth_dev *dev,
707 int wait_to_complete)
709 struct sub_device *sdev;
714 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
715 DEBUG("Calling link_update on sub_device %d", i);
716 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
717 if (ret && ret != -1 && sdev->remove == 0 &&
718 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
719 ERROR("Link update failed for sub_device %d with error %d",
725 if (TX_SUBDEV(dev)) {
726 struct rte_eth_link *l1;
727 struct rte_eth_link *l2;
729 l1 = &dev->data->dev_link;
730 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
731 if (memcmp(l1, l2, sizeof(*l1))) {
742 fs_stats_get(struct rte_eth_dev *dev,
743 struct rte_eth_stats *stats)
745 struct rte_eth_stats backup;
746 struct sub_device *sdev;
751 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
752 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
753 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
754 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
756 rte_memcpy(&backup, snapshot, sizeof(backup));
757 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
759 if (!fs_err(sdev, ret)) {
760 rte_memcpy(snapshot, &backup, sizeof(backup));
763 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
769 *timestamp = rte_rdtsc();
771 failsafe_stats_increment(stats, snapshot);
778 fs_stats_reset(struct rte_eth_dev *dev)
780 struct sub_device *sdev;
784 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
785 rte_eth_stats_reset(PORT_ID(sdev));
786 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
788 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
793 * Fail-safe dev_infos_get rules:
797 * Use the maximum possible values for any field, so as not
798 * to impede any further configuration effort.
800 * Limits capabilities to those that are understood by the
801 * fail-safe PMD. This understanding stems from the fail-safe
802 * being capable of verifying that the related capability is
803 * expressed within the device configuration (struct rte_eth_conf).
805 * At least one probed sub_device:
807 * Uses values from the active probed sub_device
808 * The rationale here is that if any sub_device is less capable
809 * (for example concerning the number of queues) than the active
810 * sub_device, then its subsequent configuration will fail.
811 * It is impossible to foresee this failure when the failing sub_device
812 * is supposed to be plugged-in later on, so the configuration process
813 * is the single point of failure and error reporting.
815 * Uses a logical AND of RX capabilities among
816 * all sub_devices and the default capabilities.
817 * Uses a logical AND of TX capabilities among
818 * the active probed sub_device and the default capabilities.
822 fs_dev_infos_get(struct rte_eth_dev *dev,
823 struct rte_eth_dev_info *infos)
825 struct sub_device *sdev;
828 sdev = TX_SUBDEV(dev);
830 DEBUG("No probed device, using default infos");
831 rte_memcpy(&PRIV(dev)->infos, &default_infos,
832 sizeof(default_infos));
834 uint64_t rx_offload_capa;
835 uint64_t rxq_offload_capa;
836 uint64_t rss_hf_offload_capa;
838 rx_offload_capa = default_infos.rx_offload_capa;
839 rxq_offload_capa = default_infos.rx_queue_offload_capa;
840 rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
841 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
842 rte_eth_dev_info_get(PORT_ID(sdev),
844 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
846 PRIV(dev)->infos.rx_queue_offload_capa;
847 rss_hf_offload_capa &=
848 PRIV(dev)->infos.flow_type_rss_offloads;
850 sdev = TX_SUBDEV(dev);
851 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
852 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
853 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
854 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
855 PRIV(dev)->infos.tx_offload_capa &=
856 default_infos.tx_offload_capa;
857 PRIV(dev)->infos.tx_queue_offload_capa &=
858 default_infos.tx_queue_offload_capa;
860 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
863 static const uint32_t *
864 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
866 struct sub_device *sdev;
867 struct rte_eth_dev *edev;
871 sdev = TX_SUBDEV(dev);
877 /* ENOTSUP: counts as no supported ptypes */
878 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
883 * The API does not permit to do a clean AND of all ptypes,
884 * It is also incomplete by design and we do not really care
885 * to have a best possible value in this context.
886 * We just return the ptypes of the device of highest
887 * priority, usually the PREFERRED device.
889 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
896 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
898 struct sub_device *sdev;
903 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
904 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
905 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
906 if ((ret = fs_err(sdev, ret))) {
907 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
918 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
920 struct sub_device *sdev;
925 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
926 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
927 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
928 if ((ret = fs_err(sdev, ret))) {
929 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
930 " with error %d", i, ret);
940 fs_flow_ctrl_get(struct rte_eth_dev *dev,
941 struct rte_eth_fc_conf *fc_conf)
943 struct sub_device *sdev;
947 sdev = TX_SUBDEV(dev);
952 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
956 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
963 fs_flow_ctrl_set(struct rte_eth_dev *dev,
964 struct rte_eth_fc_conf *fc_conf)
966 struct sub_device *sdev;
971 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
972 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
973 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
974 if ((ret = fs_err(sdev, ret))) {
975 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
976 " with error %d", i, ret);
986 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
988 struct sub_device *sdev;
992 /* No check: already done within the rte_eth_dev_mac_addr_remove
993 * call for the fail-safe device.
995 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
996 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
997 &dev->data->mac_addrs[index]);
998 PRIV(dev)->mac_addr_pool[index] = 0;
1003 fs_mac_addr_add(struct rte_eth_dev *dev,
1004 struct ether_addr *mac_addr,
1008 struct sub_device *sdev;
1012 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
1014 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1015 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
1016 if ((ret = fs_err(sdev, ret))) {
1017 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
1018 PRIu8 " with error %d", i, ret);
1023 if (index >= PRIV(dev)->nb_mac_addr) {
1024 DEBUG("Growing mac_addrs array");
1025 PRIV(dev)->nb_mac_addr = index;
1027 PRIV(dev)->mac_addr_pool[index] = vmdq;
1033 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1035 struct sub_device *sdev;
1040 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1041 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1042 ret = fs_err(sdev, ret);
1044 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1056 fs_rss_hash_update(struct rte_eth_dev *dev,
1057 struct rte_eth_rss_conf *rss_conf)
1059 struct sub_device *sdev;
1064 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1065 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
1066 ret = fs_err(sdev, ret);
1068 ERROR("Operation rte_eth_dev_rss_hash_update"
1069 " failed for sub_device %d with error %d",
1081 fs_filter_ctrl(struct rte_eth_dev *dev,
1082 enum rte_filter_type type,
1083 enum rte_filter_op op,
1086 struct sub_device *sdev;
1090 if (type == RTE_ETH_FILTER_GENERIC &&
1091 op == RTE_ETH_FILTER_GET) {
1092 *(const void **)arg = &fs_flow_ops;
1096 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1097 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
1098 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
1099 if ((ret = fs_err(sdev, ret))) {
1100 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
1101 " with error %d", i, ret);
1110 const struct eth_dev_ops failsafe_ops = {
1111 .dev_configure = fs_dev_configure,
1112 .dev_start = fs_dev_start,
1113 .dev_stop = fs_dev_stop,
1114 .dev_set_link_down = fs_dev_set_link_down,
1115 .dev_set_link_up = fs_dev_set_link_up,
1116 .dev_close = fs_dev_close,
1117 .promiscuous_enable = fs_promiscuous_enable,
1118 .promiscuous_disable = fs_promiscuous_disable,
1119 .allmulticast_enable = fs_allmulticast_enable,
1120 .allmulticast_disable = fs_allmulticast_disable,
1121 .link_update = fs_link_update,
1122 .stats_get = fs_stats_get,
1123 .stats_reset = fs_stats_reset,
1124 .dev_infos_get = fs_dev_infos_get,
1125 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1126 .mtu_set = fs_mtu_set,
1127 .vlan_filter_set = fs_vlan_filter_set,
1128 .rx_queue_start = fs_rx_queue_start,
1129 .rx_queue_stop = fs_rx_queue_stop,
1130 .rx_queue_setup = fs_rx_queue_setup,
1131 .tx_queue_setup = fs_tx_queue_setup,
1132 .rx_queue_release = fs_rx_queue_release,
1133 .tx_queue_release = fs_tx_queue_release,
1134 .rx_queue_intr_enable = fs_rx_intr_enable,
1135 .rx_queue_intr_disable = fs_rx_intr_disable,
1136 .flow_ctrl_get = fs_flow_ctrl_get,
1137 .flow_ctrl_set = fs_flow_ctrl_set,
1138 .mac_addr_remove = fs_mac_addr_remove,
1139 .mac_addr_add = fs_mac_addr_add,
1140 .mac_addr_set = fs_mac_addr_set,
1141 .rss_hash_update = fs_rss_hash_update,
1142 .filter_ctrl = fs_filter_ctrl,