* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_atomic.h>
#include <rte_debug.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
{
return (ETH(sdev) == NULL) ||
(ETH(sdev)->rx_pkt_burst == NULL) ||
- (sdev->state != DEV_STARTED);
+ (sdev->state != DEV_STARTED) ||
+ (sdev->remove != 0);
}
static inline int
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct fs_priv *priv;
struct sub_device *sdev;
struct rxq *rxq;
void *sub_rxq;
uint16_t nb_rx;
- uint8_t nb_polled, nb_subs;
- uint8_t i;
rxq = queue;
- priv = rxq->priv;
- nb_subs = priv->subs_tail - priv->subs_head;
- nb_polled = 0;
- for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
- i++;
- if (i == priv->subs_tail)
- i = priv->subs_head;
- sdev = &priv->subs[i];
- if (unlikely(fs_rx_unsafe(sdev)))
+ sdev = rxq->sdev;
+ do {
+ if (fs_rx_unsafe(sdev)) {
+ nb_rx = 0;
continue;
+ }
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
nb_rx = ETH(sdev)->
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
- if (nb_rx) {
- rxq->last_polled = i;
- return nb_rx;
- }
- }
- return 0;
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
}
uint16_t
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct fs_priv *priv;
struct sub_device *sdev;
struct rxq *rxq;
void *sub_rxq;
uint16_t nb_rx;
- uint8_t nb_polled, nb_subs;
- uint8_t i;
rxq = queue;
- priv = rxq->priv;
- nb_subs = priv->subs_tail - priv->subs_head;
- nb_polled = 0;
- for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
- i++;
- if (i == priv->subs_tail)
- i = priv->subs_head;
- sdev = &priv->subs[i];
+ sdev = rxq->sdev;
+ do {
RTE_ASSERT(!fs_rx_unsafe(sdev));
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
nb_rx = ETH(sdev)->
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
- if (nb_rx) {
- rxq->last_polled = i;
- return nb_rx;
- }
- }
- return 0;
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
}
uint16_t
struct sub_device *sdev;
struct txq *txq;
void *sub_txq;
+ uint16_t nb_tx;
txq = queue;
sdev = TX_SUBDEV(txq->priv->dev);
if (unlikely(fs_tx_unsafe(sdev)))
return 0;
sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
- return ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
}
uint16_t
struct sub_device *sdev;
struct txq *txq;
void *sub_txq;
+ uint16_t nb_tx;
txq = queue;
sdev = TX_SUBDEV(txq->priv->dev);
RTE_ASSERT(!fs_tx_unsafe(sdev));
sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
- return ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
}