1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #include <rte_atomic.h>
9 #include <rte_ethdev_driver.h>
11 #include "failsafe_private.h"
14 fs_rx_unsafe(struct sub_device *sdev)
16 return (ETH(sdev) == NULL) ||
17 (ETH(sdev)->rx_pkt_burst == NULL) ||
18 (sdev->state != DEV_STARTED) ||
23 fs_tx_unsafe(struct sub_device *sdev)
25 return (sdev == NULL) ||
26 (ETH(sdev) == NULL) ||
27 (ETH(sdev)->tx_pkt_burst == NULL) ||
28 (sdev->state != DEV_STARTED);
32 set_burst_fn(struct rte_eth_dev *dev, int force_safe)
34 struct sub_device *sdev;
39 need_safe = force_safe;
40 FOREACH_SUBDEV(sdev, i, dev)
41 need_safe |= fs_rx_unsafe(sdev);
42 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
43 if (need_safe && !safe_set) {
44 DEBUG("Using safe RX bursts%s",
45 (force_safe ? " (forced)" : ""));
46 dev->rx_pkt_burst = &failsafe_rx_burst;
47 } else if (!need_safe && safe_set) {
48 DEBUG("Using fast RX bursts");
49 dev->rx_pkt_burst = &failsafe_rx_burst_fast;
51 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
52 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
53 if (need_safe && !safe_set) {
54 DEBUG("Using safe TX bursts%s",
55 (force_safe ? " (forced)" : ""));
56 dev->tx_pkt_burst = &failsafe_tx_burst;
57 } else if (!need_safe && safe_set) {
58 DEBUG("Using fast TX bursts");
59 dev->tx_pkt_burst = &failsafe_tx_burst_fast;
65 failsafe_rx_burst(void *queue,
66 struct rte_mbuf **rx_pkts,
69 struct sub_device *sdev;
77 if (fs_rx_unsafe(sdev)) {
82 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
83 FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
85 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
86 FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
88 } while (nb_rx == 0 && sdev != rxq->sdev);
94 failsafe_rx_burst_fast(void *queue,
95 struct rte_mbuf **rx_pkts,
98 struct sub_device *sdev;
106 RTE_ASSERT(!fs_rx_unsafe(sdev));
107 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
108 FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
110 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
111 FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
113 } while (nb_rx == 0 && sdev != rxq->sdev);
119 failsafe_tx_burst(void *queue,
120 struct rte_mbuf **tx_pkts,
123 struct sub_device *sdev;
129 sdev = TX_SUBDEV(txq->priv->dev);
130 if (unlikely(fs_tx_unsafe(sdev)))
132 sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
133 FS_ATOMIC_P(txq->refcnt[sdev->sid]);
134 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
135 FS_ATOMIC_V(txq->refcnt[sdev->sid]);
140 failsafe_tx_burst_fast(void *queue,
141 struct rte_mbuf **tx_pkts,
144 struct sub_device *sdev;
150 sdev = TX_SUBDEV(txq->priv->dev);
151 RTE_ASSERT(!fs_tx_unsafe(sdev));
152 sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
153 FS_ATOMIC_P(txq->refcnt[sdev->sid]);
154 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
155 FS_ATOMIC_V(txq->refcnt[sdev->sid]);