1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #include <rte_atomic.h>
9 #include <rte_ethdev_driver.h>
11 #include "failsafe_private.h"
14 fs_rx_unsafe(struct sub_device *sdev)
16 return (ETH(sdev) == NULL) ||
17 (ETH(sdev)->rx_pkt_burst == NULL) ||
18 (sdev->state != DEV_STARTED) ||
23 fs_tx_unsafe(struct sub_device *sdev)
25 return (sdev == NULL) ||
26 (ETH(sdev) == NULL) ||
27 (ETH(sdev)->tx_pkt_burst == NULL) ||
28 (sdev->state != DEV_STARTED);
32 failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe)
34 struct sub_device *sdev;
39 need_safe = force_safe;
40 FOREACH_SUBDEV(sdev, i, dev)
41 need_safe |= fs_rx_unsafe(sdev);
42 safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
43 if (need_safe && !safe_set) {
44 DEBUG("Using safe RX bursts%s",
45 (force_safe ? " (forced)" : ""));
46 dev->rx_pkt_burst = &failsafe_rx_burst;
47 } else if (!need_safe && safe_set) {
48 DEBUG("Using fast RX bursts");
49 dev->rx_pkt_burst = &failsafe_rx_burst_fast;
51 need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
52 safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
53 if (need_safe && !safe_set) {
54 DEBUG("Using safe TX bursts%s",
55 (force_safe ? " (forced)" : ""));
56 dev->tx_pkt_burst = &failsafe_tx_burst;
57 } else if (!need_safe && safe_set) {
58 DEBUG("Using fast TX bursts");
59 dev->tx_pkt_burst = &failsafe_tx_burst_fast;
65 * Override source port in Rx packets.
67 * Make Rx packets originate from this PMD instance instead of one of its
68 * sub-devices. This is mandatory to avoid breaking applications.
71 failsafe_rx_set_port(struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint16_t port)
75 for (i = 0; i != nb_pkts; ++i)
76 rx_pkts[i]->port = port;
80 failsafe_rx_burst(void *queue,
81 struct rte_mbuf **rx_pkts,
84 struct sub_device *sdev;
92 if (fs_rx_unsafe(sdev)) {
97 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
98 FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
100 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
101 FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
103 } while (nb_rx == 0 && sdev != rxq->sdev);
106 failsafe_rx_set_port(rx_pkts, nb_rx,
107 rxq->priv->data->port_id);
112 failsafe_rx_burst_fast(void *queue,
113 struct rte_mbuf **rx_pkts,
116 struct sub_device *sdev;
124 RTE_ASSERT(!fs_rx_unsafe(sdev));
125 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
126 FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
128 rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
129 FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
131 } while (nb_rx == 0 && sdev != rxq->sdev);
134 failsafe_rx_set_port(rx_pkts, nb_rx,
135 rxq->priv->data->port_id);
140 failsafe_tx_burst(void *queue,
141 struct rte_mbuf **tx_pkts,
144 struct sub_device *sdev;
150 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
151 if (unlikely(fs_tx_unsafe(sdev)))
153 sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
154 FS_ATOMIC_P(txq->refcnt[sdev->sid]);
155 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
156 FS_ATOMIC_V(txq->refcnt[sdev->sid]);
161 failsafe_tx_burst_fast(void *queue,
162 struct rte_mbuf **tx_pkts,
165 struct sub_device *sdev;
171 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
172 RTE_ASSERT(!fs_tx_unsafe(sdev));
173 sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
174 FS_ATOMIC_P(txq->refcnt[sdev->sid]);
175 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
176 FS_ATOMIC_V(txq->refcnt[sdev->sid]);