}
uint32_t
-nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_rx_queue_count(void *rx_queue)
{
struct nfp_net_rxq *rxq;
struct nfp_net_rx_desc *rxds;
uint32_t idx;
uint32_t count;
- rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+ rxq = rx_queue;
idx = rxq->rd_p;
}
mbuf->hash.rss = hash;
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
switch (hash_type) {
case NFP_NET_RSS_IPV4:
/* If IPv4 and IP checksum error, fail */
if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
- mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
- mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
return;
if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
- mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
- mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
/*
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
/* Adding the mbuf to the mbuf array passed by the app */
}
void
-nfp_net_rx_queue_release(void *rx_queue)
+nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct nfp_net_rxq *rxq = rx_queue;
+ struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
if (rxq) {
nfp_net_rx_queue_release_mbufs(rxq);
+ rte_eth_dma_zone_free(dev, "rx_ring", queue_idx);
rte_free(rxq->rxbufs);
rte_free(rxq);
}
* calling nfp_net_stop
*/
if (dev->data->rx_queues[queue_idx]) {
- nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ nfp_net_rx_queue_release(dev, queue_idx);
dev->data->rx_queues[queue_idx] = NULL;
}
if (rxq == NULL)
return -ENOMEM;
+ dev->data->rx_queues[queue_idx] = rxq;
+
/* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating rx dma");
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
sizeof(*rxq->rxbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->rxbufs == NULL) {
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
nfp_net_reset_rx_queue(rxq);
- dev->data->rx_queues[queue_idx] = rxq;
rxq->hw = hw;
/*
}
void
-nfp_net_tx_queue_release(void *tx_queue)
+nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct nfp_net_txq *txq = tx_queue;
+ struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
if (txq) {
nfp_net_tx_queue_release_mbufs(txq);
+ rte_eth_dma_zone_free(dev, "tx_ring", queue_idx);
rte_free(txq->txbufs);
rte_free(txq);
}
if (dev->data->tx_queues[queue_idx]) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
- nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ nfp_net_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
}
return -ENOMEM;
}
+ dev->data->tx_queues[queue_idx] = txq;
+
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating tx dma");
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->txbufs == NULL) {
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
nfp_net_reset_tx_queue(txq);
- dev->data->tx_queues[queue_idx] = txq;
txq->hw = hw;
/*
ol_flags = mb->ol_flags;
- if (!(ol_flags & PKT_TX_TCP_SEG))
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
goto clean_txd;
txd->l3_offset = mb->l2_len;
ol_flags = mb->ol_flags;
/* IPv6 does not need checksum */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
txd->flags |= PCIE_DESC_TX_TCP_CSUM;
break;
}
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
txd->flags |= PCIE_DESC_TX_CSUM;
}
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
- if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;