1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
10 #include <rte_debug.h>
11 #include <rte_atomic.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
15 #include <rte_cycles.h>
16 #include <rte_ethdev.h>
18 #include "failsafe_private.h"
20 static struct rte_eth_dev_info default_infos = {
21 /* Max possible number of elements */
22 .max_rx_pktlen = UINT32_MAX,
23 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
24 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
25 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
26 .max_hash_mac_addrs = UINT32_MAX,
27 .max_vfs = UINT16_MAX,
28 .max_vmdq_pools = UINT16_MAX,
33 .nb_seg_max = UINT16_MAX,
34 .nb_mtu_seg_max = UINT16_MAX,
40 .nb_seg_max = UINT16_MAX,
41 .nb_mtu_seg_max = UINT16_MAX,
44 * Set of capabilities that can be verified upon
45 * configuring a sub-device.
48 DEV_RX_OFFLOAD_VLAN_STRIP |
49 DEV_RX_OFFLOAD_IPV4_CKSUM |
50 DEV_RX_OFFLOAD_UDP_CKSUM |
51 DEV_RX_OFFLOAD_TCP_CKSUM |
52 DEV_RX_OFFLOAD_TCP_LRO |
53 DEV_RX_OFFLOAD_QINQ_STRIP |
54 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
55 DEV_RX_OFFLOAD_MACSEC_STRIP |
56 DEV_RX_OFFLOAD_HEADER_SPLIT |
57 DEV_RX_OFFLOAD_VLAN_FILTER |
58 DEV_RX_OFFLOAD_VLAN_EXTEND |
59 DEV_RX_OFFLOAD_JUMBO_FRAME |
60 DEV_RX_OFFLOAD_SCATTER |
61 DEV_RX_OFFLOAD_TIMESTAMP |
62 DEV_RX_OFFLOAD_SECURITY,
63 .rx_queue_offload_capa =
64 DEV_RX_OFFLOAD_VLAN_STRIP |
65 DEV_RX_OFFLOAD_IPV4_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM |
67 DEV_RX_OFFLOAD_TCP_CKSUM |
68 DEV_RX_OFFLOAD_TCP_LRO |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 DEV_RX_OFFLOAD_MACSEC_STRIP |
72 DEV_RX_OFFLOAD_HEADER_SPLIT |
73 DEV_RX_OFFLOAD_VLAN_FILTER |
74 DEV_RX_OFFLOAD_VLAN_EXTEND |
75 DEV_RX_OFFLOAD_JUMBO_FRAME |
76 DEV_RX_OFFLOAD_SCATTER |
77 DEV_RX_OFFLOAD_TIMESTAMP |
78 DEV_RX_OFFLOAD_SECURITY,
80 DEV_TX_OFFLOAD_MULTI_SEGS |
81 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
82 DEV_TX_OFFLOAD_IPV4_CKSUM |
83 DEV_TX_OFFLOAD_UDP_CKSUM |
84 DEV_TX_OFFLOAD_TCP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_TSO,
86 .flow_type_rss_offloads =
91 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
92 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP,
96 fs_dev_configure(struct rte_eth_dev *dev)
98 struct sub_device *sdev;
103 FOREACH_SUBDEV(sdev, i, dev) {
104 int rmv_interrupt = 0;
105 int lsc_interrupt = 0;
108 if (sdev->state != DEV_PROBED &&
109 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
112 rmv_interrupt = ETH(sdev)->data->dev_flags &
113 RTE_ETH_DEV_INTR_RMV;
115 DEBUG("Enabling RMV interrupts for sub_device %d", i);
116 dev->data->dev_conf.intr_conf.rmv = 1;
118 DEBUG("sub_device %d does not support RMV event", i);
120 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
121 lsc_interrupt = lsc_enabled &&
122 (ETH(sdev)->data->dev_flags &
123 RTE_ETH_DEV_INTR_LSC);
125 DEBUG("Enabling LSC interrupts for sub_device %d", i);
126 dev->data->dev_conf.intr_conf.lsc = 1;
127 } else if (lsc_enabled && !lsc_interrupt) {
128 DEBUG("Disabling LSC interrupts for sub_device %d", i);
129 dev->data->dev_conf.intr_conf.lsc = 0;
131 DEBUG("Configuring sub-device %d", i);
132 ret = rte_eth_dev_configure(PORT_ID(sdev),
133 dev->data->nb_rx_queues,
134 dev->data->nb_tx_queues,
135 &dev->data->dev_conf);
137 if (!fs_err(sdev, ret))
139 ERROR("Could not configure sub_device %d", i);
143 if (rmv_interrupt && sdev->rmv_callback == 0) {
144 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
145 RTE_ETH_EVENT_INTR_RMV,
146 failsafe_eth_rmv_event_callback,
149 WARN("Failed to register RMV callback for sub_device %d",
152 sdev->rmv_callback = 1;
154 dev->data->dev_conf.intr_conf.rmv = 0;
155 if (lsc_interrupt && sdev->lsc_callback == 0) {
156 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
157 RTE_ETH_EVENT_INTR_LSC,
158 failsafe_eth_lsc_event_callback,
161 WARN("Failed to register LSC callback for sub_device %d",
164 sdev->lsc_callback = 1;
166 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
167 sdev->state = DEV_ACTIVE;
169 if (PRIV(dev)->state < DEV_ACTIVE)
170 PRIV(dev)->state = DEV_ACTIVE;
176 fs_set_queues_state_start(struct rte_eth_dev *dev)
182 for (i = 0; i < dev->data->nb_rx_queues; i++) {
183 rxq = dev->data->rx_queues[i];
184 if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
185 dev->data->rx_queue_state[i] =
186 RTE_ETH_QUEUE_STATE_STARTED;
188 for (i = 0; i < dev->data->nb_tx_queues; i++) {
189 txq = dev->data->tx_queues[i];
190 if (txq != NULL && !txq->info.conf.tx_deferred_start)
191 dev->data->tx_queue_state[i] =
192 RTE_ETH_QUEUE_STATE_STARTED;
197 fs_dev_start(struct rte_eth_dev *dev)
199 struct sub_device *sdev;
204 ret = failsafe_rx_intr_install(dev);
209 FOREACH_SUBDEV(sdev, i, dev) {
210 if (sdev->state != DEV_ACTIVE)
212 DEBUG("Starting sub_device %d", i);
213 ret = rte_eth_dev_start(PORT_ID(sdev));
215 if (!fs_err(sdev, ret))
220 ret = failsafe_rx_intr_install_subdevice(sdev);
222 if (!fs_err(sdev, ret))
224 rte_eth_dev_stop(PORT_ID(sdev));
228 sdev->state = DEV_STARTED;
230 if (PRIV(dev)->state < DEV_STARTED) {
231 PRIV(dev)->state = DEV_STARTED;
232 fs_set_queues_state_start(dev);
234 fs_switch_dev(dev, NULL);
240 fs_set_queues_state_stop(struct rte_eth_dev *dev)
244 for (i = 0; i < dev->data->nb_rx_queues; i++)
245 if (dev->data->rx_queues[i] != NULL)
246 dev->data->rx_queue_state[i] =
247 RTE_ETH_QUEUE_STATE_STOPPED;
248 for (i = 0; i < dev->data->nb_tx_queues; i++)
249 if (dev->data->tx_queues[i] != NULL)
250 dev->data->tx_queue_state[i] =
251 RTE_ETH_QUEUE_STATE_STOPPED;
255 fs_dev_stop(struct rte_eth_dev *dev)
257 struct sub_device *sdev;
261 PRIV(dev)->state = DEV_STARTED - 1;
262 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
263 rte_eth_dev_stop(PORT_ID(sdev));
264 failsafe_rx_intr_uninstall_subdevice(sdev);
265 sdev->state = DEV_STARTED - 1;
267 failsafe_rx_intr_uninstall(dev);
268 fs_set_queues_state_stop(dev);
273 fs_dev_set_link_up(struct rte_eth_dev *dev)
275 struct sub_device *sdev;
280 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
281 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
282 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
283 if ((ret = fs_err(sdev, ret))) {
284 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
285 " with error %d", i, ret);
295 fs_dev_set_link_down(struct rte_eth_dev *dev)
297 struct sub_device *sdev;
302 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
303 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
304 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
305 if ((ret = fs_err(sdev, ret))) {
306 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
307 " with error %d", i, ret);
316 static void fs_dev_free_queues(struct rte_eth_dev *dev);
318 fs_dev_close(struct rte_eth_dev *dev)
320 struct sub_device *sdev;
324 failsafe_hotplug_alarm_cancel(dev);
325 if (PRIV(dev)->state == DEV_STARTED)
326 dev->dev_ops->dev_stop(dev);
327 PRIV(dev)->state = DEV_ACTIVE - 1;
328 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
329 DEBUG("Closing sub_device %d", i);
330 failsafe_eth_dev_unregister_callbacks(sdev);
331 rte_eth_dev_close(PORT_ID(sdev));
332 sdev->state = DEV_ACTIVE - 1;
334 fs_dev_free_queues(dev);
339 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
341 struct sub_device *sdev;
348 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
349 uint16_t port_id = ETH(sdev)->data->port_id;
351 ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
352 ret = fs_err(sdev, ret);
354 ERROR("Rx queue stop failed for subdevice %d", i);
360 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
362 /* Return 0 in case of at least one successful queue stop */
363 return (failure) ? err : 0;
367 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
369 struct sub_device *sdev;
374 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
375 uint16_t port_id = ETH(sdev)->data->port_id;
377 ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
378 ret = fs_err(sdev, ret);
380 ERROR("Rx queue start failed for subdevice %d", i);
381 fs_rx_queue_stop(dev, rx_queue_id);
386 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
392 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
394 struct sub_device *sdev;
401 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
402 uint16_t port_id = ETH(sdev)->data->port_id;
404 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
405 ret = fs_err(sdev, ret);
407 ERROR("Tx queue stop failed for subdevice %d", i);
413 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
415 /* Return 0 in case of at least one successful queue stop */
416 return (failure) ? err : 0;
420 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
422 struct sub_device *sdev;
427 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
428 uint16_t port_id = ETH(sdev)->data->port_id;
430 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
431 ret = fs_err(sdev, ret);
433 ERROR("Tx queue start failed for subdevice %d", i);
434 fs_tx_queue_stop(dev, tx_queue_id);
439 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
445 fs_rx_queue_release(void *queue)
447 struct rte_eth_dev *dev;
448 struct sub_device *sdev;
455 dev = rxq->priv->dev;
457 if (rxq->event_fd > 0)
458 close(rxq->event_fd);
459 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
460 if (ETH(sdev)->data->rx_queues != NULL &&
461 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
462 SUBOPS(sdev, rx_queue_release)
463 (ETH(sdev)->data->rx_queues[rxq->qid]);
466 dev->data->rx_queues[rxq->qid] = NULL;
472 fs_rx_queue_setup(struct rte_eth_dev *dev,
473 uint16_t rx_queue_id,
475 unsigned int socket_id,
476 const struct rte_eth_rxconf *rx_conf,
477 struct rte_mempool *mb_pool)
480 * FIXME: Add a proper interface in rte_eal_interrupts for
481 * allocating eventfd as an interrupt vector.
482 * For the time being, fake as if we are using MSIX interrupts,
483 * this will cause rte_intr_efd_enable to allocate an eventfd for us.
485 struct rte_intr_handle intr_handle = {
486 .type = RTE_INTR_HANDLE_VFIO_MSIX,
489 struct sub_device *sdev;
495 if (rx_conf->rx_deferred_start) {
496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
497 if (SUBOPS(sdev, rx_queue_start) == NULL) {
498 ERROR("Rx queue deferred start is not "
499 "supported for subdevice %d", i);
505 rxq = dev->data->rx_queues[rx_queue_id];
507 fs_rx_queue_release(rxq);
508 dev->data->rx_queues[rx_queue_id] = NULL;
510 rxq = rte_zmalloc(NULL,
512 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
513 RTE_CACHE_LINE_SIZE);
518 FOREACH_SUBDEV(sdev, i, dev)
519 rte_atomic64_init(&rxq->refcnt[i]);
520 rxq->qid = rx_queue_id;
521 rxq->socket_id = socket_id;
522 rxq->info.mp = mb_pool;
523 rxq->info.conf = *rx_conf;
524 rxq->info.nb_desc = nb_rx_desc;
525 rxq->priv = PRIV(dev);
526 rxq->sdev = PRIV(dev)->subs;
527 ret = rte_intr_efd_enable(&intr_handle, 1);
532 rxq->event_fd = intr_handle.efds[0];
533 dev->data->rx_queues[rx_queue_id] = rxq;
534 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
535 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
537 nb_rx_desc, socket_id,
539 if ((ret = fs_err(sdev, ret))) {
540 ERROR("RX queue setup failed for sub_device %d", i);
547 fs_rx_queue_release(rxq);
553 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
556 struct sub_device *sdev;
562 if (idx >= dev->data->nb_rx_queues) {
566 rxq = dev->data->rx_queues[idx];
567 if (rxq == NULL || rxq->event_fd <= 0) {
571 /* Fail if proxy service is nor running. */
572 if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
573 ERROR("failsafe interrupt services are not running");
577 rxq->enable_events = 1;
578 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
579 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
580 ret = fs_err(sdev, ret);
592 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
595 struct sub_device *sdev;
602 if (idx >= dev->data->nb_rx_queues) {
606 rxq = dev->data->rx_queues[idx];
607 if (rxq == NULL || rxq->event_fd <= 0) {
611 rxq->enable_events = 0;
612 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
613 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
614 ret = fs_err(sdev, ret);
618 /* Clear pending events */
619 while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
629 fs_tx_queue_release(void *queue)
631 struct rte_eth_dev *dev;
632 struct sub_device *sdev;
639 dev = txq->priv->dev;
641 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
642 if (ETH(sdev)->data->tx_queues != NULL &&
643 ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
644 SUBOPS(sdev, tx_queue_release)
645 (ETH(sdev)->data->tx_queues[txq->qid]);
648 dev->data->tx_queues[txq->qid] = NULL;
654 fs_tx_queue_setup(struct rte_eth_dev *dev,
655 uint16_t tx_queue_id,
657 unsigned int socket_id,
658 const struct rte_eth_txconf *tx_conf)
660 struct sub_device *sdev;
666 if (tx_conf->tx_deferred_start) {
667 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
668 if (SUBOPS(sdev, tx_queue_start) == NULL) {
669 ERROR("Tx queue deferred start is not "
670 "supported for subdevice %d", i);
676 txq = dev->data->tx_queues[tx_queue_id];
678 fs_tx_queue_release(txq);
679 dev->data->tx_queues[tx_queue_id] = NULL;
681 txq = rte_zmalloc("ethdev TX queue",
683 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
684 RTE_CACHE_LINE_SIZE);
689 FOREACH_SUBDEV(sdev, i, dev)
690 rte_atomic64_init(&txq->refcnt[i]);
691 txq->qid = tx_queue_id;
692 txq->socket_id = socket_id;
693 txq->info.conf = *tx_conf;
694 txq->info.nb_desc = nb_tx_desc;
695 txq->priv = PRIV(dev);
696 dev->data->tx_queues[tx_queue_id] = txq;
697 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
698 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
700 nb_tx_desc, socket_id,
702 if ((ret = fs_err(sdev, ret))) {
703 ERROR("TX queue setup failed for sub_device %d", i);
710 fs_tx_queue_release(txq);
716 fs_dev_free_queues(struct rte_eth_dev *dev)
720 for (i = 0; i < dev->data->nb_rx_queues; i++) {
721 fs_rx_queue_release(dev->data->rx_queues[i]);
722 dev->data->rx_queues[i] = NULL;
724 dev->data->nb_rx_queues = 0;
725 for (i = 0; i < dev->data->nb_tx_queues; i++) {
726 fs_tx_queue_release(dev->data->tx_queues[i]);
727 dev->data->tx_queues[i] = NULL;
729 dev->data->nb_tx_queues = 0;
733 fs_promiscuous_enable(struct rte_eth_dev *dev)
735 struct sub_device *sdev;
739 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
740 rte_eth_promiscuous_enable(PORT_ID(sdev));
745 fs_promiscuous_disable(struct rte_eth_dev *dev)
747 struct sub_device *sdev;
751 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
752 rte_eth_promiscuous_disable(PORT_ID(sdev));
757 fs_allmulticast_enable(struct rte_eth_dev *dev)
759 struct sub_device *sdev;
763 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
764 rte_eth_allmulticast_enable(PORT_ID(sdev));
769 fs_allmulticast_disable(struct rte_eth_dev *dev)
771 struct sub_device *sdev;
775 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
776 rte_eth_allmulticast_disable(PORT_ID(sdev));
781 fs_link_update(struct rte_eth_dev *dev,
782 int wait_to_complete)
784 struct sub_device *sdev;
789 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
790 DEBUG("Calling link_update on sub_device %d", i);
791 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
792 if (ret && ret != -1 && sdev->remove == 0 &&
793 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
794 ERROR("Link update failed for sub_device %d with error %d",
800 if (TX_SUBDEV(dev)) {
801 struct rte_eth_link *l1;
802 struct rte_eth_link *l2;
804 l1 = &dev->data->dev_link;
805 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
806 if (memcmp(l1, l2, sizeof(*l1))) {
817 fs_stats_get(struct rte_eth_dev *dev,
818 struct rte_eth_stats *stats)
820 struct rte_eth_stats backup;
821 struct sub_device *sdev;
826 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
827 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
828 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
829 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
831 rte_memcpy(&backup, snapshot, sizeof(backup));
832 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
834 if (!fs_err(sdev, ret)) {
835 rte_memcpy(snapshot, &backup, sizeof(backup));
838 ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
844 *timestamp = rte_rdtsc();
846 failsafe_stats_increment(stats, snapshot);
853 fs_stats_reset(struct rte_eth_dev *dev)
855 struct sub_device *sdev;
859 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
860 rte_eth_stats_reset(PORT_ID(sdev));
861 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
863 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
868 * Fail-safe dev_infos_get rules:
872 * Use the maximum possible values for any field, so as not
873 * to impede any further configuration effort.
875 * Limits capabilities to those that are understood by the
876 * fail-safe PMD. This understanding stems from the fail-safe
877 * being capable of verifying that the related capability is
878 * expressed within the device configuration (struct rte_eth_conf).
880 * At least one probed sub_device:
882 * Uses values from the active probed sub_device
883 * The rationale here is that if any sub_device is less capable
884 * (for example concerning the number of queues) than the active
885 * sub_device, then its subsequent configuration will fail.
886 * It is impossible to foresee this failure when the failing sub_device
887 * is supposed to be plugged-in later on, so the configuration process
888 * is the single point of failure and error reporting.
890 * Uses a logical AND of RX capabilities among
891 * all sub_devices and the default capabilities.
892 * Uses a logical AND of TX capabilities among
893 * the active probed sub_device and the default capabilities.
894 * Uses a logical AND of device capabilities among
895 * all sub_devices and the default capabilities.
899 fs_dev_infos_get(struct rte_eth_dev *dev,
900 struct rte_eth_dev_info *infos)
902 struct sub_device *sdev;
905 sdev = TX_SUBDEV(dev);
907 DEBUG("No probed device, using default infos");
908 rte_memcpy(&PRIV(dev)->infos, &default_infos,
909 sizeof(default_infos));
911 uint64_t rx_offload_capa;
912 uint64_t rxq_offload_capa;
913 uint64_t rss_hf_offload_capa;
916 rx_offload_capa = default_infos.rx_offload_capa;
917 rxq_offload_capa = default_infos.rx_queue_offload_capa;
918 rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
919 dev_capa = default_infos.dev_capa;
920 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
921 rte_eth_dev_info_get(PORT_ID(sdev),
923 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
925 PRIV(dev)->infos.rx_queue_offload_capa;
926 rss_hf_offload_capa &=
927 PRIV(dev)->infos.flow_type_rss_offloads;
928 dev_capa &= PRIV(dev)->infos.dev_capa;
930 sdev = TX_SUBDEV(dev);
931 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
932 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
933 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
934 PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
935 PRIV(dev)->infos.dev_capa = dev_capa;
936 PRIV(dev)->infos.tx_offload_capa &=
937 default_infos.tx_offload_capa;
938 PRIV(dev)->infos.tx_queue_offload_capa &=
939 default_infos.tx_queue_offload_capa;
941 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
944 static const uint32_t *
945 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
947 struct sub_device *sdev;
948 struct rte_eth_dev *edev;
952 sdev = TX_SUBDEV(dev);
958 /* ENOTSUP: counts as no supported ptypes */
959 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
964 * The API does not permit to do a clean AND of all ptypes,
965 * It is also incomplete by design and we do not really care
966 * to have a best possible value in this context.
967 * We just return the ptypes of the device of highest
968 * priority, usually the PREFERRED device.
970 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
977 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
979 struct sub_device *sdev;
984 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
985 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
986 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
987 if ((ret = fs_err(sdev, ret))) {
988 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
999 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1001 struct sub_device *sdev;
1006 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1007 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
1008 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
1009 if ((ret = fs_err(sdev, ret))) {
1010 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
1011 " with error %d", i, ret);
1021 fs_flow_ctrl_get(struct rte_eth_dev *dev,
1022 struct rte_eth_fc_conf *fc_conf)
1024 struct sub_device *sdev;
1028 sdev = TX_SUBDEV(dev);
1033 if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
1037 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
1044 fs_flow_ctrl_set(struct rte_eth_dev *dev,
1045 struct rte_eth_fc_conf *fc_conf)
1047 struct sub_device *sdev;
1052 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1053 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
1054 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
1055 if ((ret = fs_err(sdev, ret))) {
1056 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
1057 " with error %d", i, ret);
1067 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1069 struct sub_device *sdev;
1073 /* No check: already done within the rte_eth_dev_mac_addr_remove
1074 * call for the fail-safe device.
1076 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
1077 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
1078 &dev->data->mac_addrs[index]);
1079 PRIV(dev)->mac_addr_pool[index] = 0;
1084 fs_mac_addr_add(struct rte_eth_dev *dev,
1085 struct ether_addr *mac_addr,
1089 struct sub_device *sdev;
1093 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
1095 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1096 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
1097 if ((ret = fs_err(sdev, ret))) {
1098 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
1099 PRIu8 " with error %d", i, ret);
1104 if (index >= PRIV(dev)->nb_mac_addr) {
1105 DEBUG("Growing mac_addrs array");
1106 PRIV(dev)->nb_mac_addr = index;
1108 PRIV(dev)->mac_addr_pool[index] = vmdq;
1114 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1116 struct sub_device *sdev;
1121 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1122 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
1123 ret = fs_err(sdev, ret);
1125 ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
1137 fs_set_mc_addr_list(struct rte_eth_dev *dev,
1138 struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1140 struct sub_device *sdev;
1147 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1148 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1149 mc_addr_set, nb_mc_addr);
1151 ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
1157 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
1158 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
1159 if (mcast_addrs == NULL && nb_mc_addr > 0) {
1163 rte_memcpy(mcast_addrs, mc_addr_set,
1164 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
1165 PRIV(dev)->nb_mcast_addr = nb_mc_addr;
1166 PRIV(dev)->mcast_addrs = mcast_addrs;
1172 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1173 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
1174 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr);
1176 ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
1186 fs_rss_hash_update(struct rte_eth_dev *dev,
1187 struct rte_eth_rss_conf *rss_conf)
1189 struct sub_device *sdev;
1194 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1195 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
1196 ret = fs_err(sdev, ret);
1198 ERROR("Operation rte_eth_dev_rss_hash_update"
1199 " failed for sub_device %d with error %d",
1211 fs_filter_ctrl(struct rte_eth_dev *dev,
1212 enum rte_filter_type type,
1213 enum rte_filter_op op,
1216 struct sub_device *sdev;
1220 if (type == RTE_ETH_FILTER_GENERIC &&
1221 op == RTE_ETH_FILTER_GET) {
1222 *(const void **)arg = &fs_flow_ops;
1226 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
1227 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
1228 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
1229 if ((ret = fs_err(sdev, ret))) {
1230 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
1231 " with error %d", i, ret);
1240 const struct eth_dev_ops failsafe_ops = {
1241 .dev_configure = fs_dev_configure,
1242 .dev_start = fs_dev_start,
1243 .dev_stop = fs_dev_stop,
1244 .dev_set_link_down = fs_dev_set_link_down,
1245 .dev_set_link_up = fs_dev_set_link_up,
1246 .dev_close = fs_dev_close,
1247 .promiscuous_enable = fs_promiscuous_enable,
1248 .promiscuous_disable = fs_promiscuous_disable,
1249 .allmulticast_enable = fs_allmulticast_enable,
1250 .allmulticast_disable = fs_allmulticast_disable,
1251 .link_update = fs_link_update,
1252 .stats_get = fs_stats_get,
1253 .stats_reset = fs_stats_reset,
1254 .dev_infos_get = fs_dev_infos_get,
1255 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
1256 .mtu_set = fs_mtu_set,
1257 .vlan_filter_set = fs_vlan_filter_set,
1258 .rx_queue_start = fs_rx_queue_start,
1259 .rx_queue_stop = fs_rx_queue_stop,
1260 .tx_queue_start = fs_tx_queue_start,
1261 .tx_queue_stop = fs_tx_queue_stop,
1262 .rx_queue_setup = fs_rx_queue_setup,
1263 .tx_queue_setup = fs_tx_queue_setup,
1264 .rx_queue_release = fs_rx_queue_release,
1265 .tx_queue_release = fs_tx_queue_release,
1266 .rx_queue_intr_enable = fs_rx_intr_enable,
1267 .rx_queue_intr_disable = fs_rx_intr_disable,
1268 .flow_ctrl_get = fs_flow_ctrl_get,
1269 .flow_ctrl_set = fs_flow_ctrl_set,
1270 .mac_addr_remove = fs_mac_addr_remove,
1271 .mac_addr_add = fs_mac_addr_add,
1272 .mac_addr_set = fs_mac_addr_set,
1273 .set_mc_addr_list = fs_set_mc_addr_list,
1274 .rss_hash_update = fs_rss_hash_update,
1275 .filter_ctrl = fs_filter_ctrl,