X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_rxtx.c;h=0fe1415596e0cbb61cc93ee2459bd2ffdbb01ead;hb=891f8260dd49d4bc683cffaca79b7f7d5247b968;hp=1402c5f84a0773b6559bdcdfd074d518e735ad62;hpb=8d7a59f171a2bb765ad45cee3f67df45e7e71f7e;p=dpdk.git diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c index 1402c5f84a..0fe1415596 100644 --- a/drivers/net/nfp/nfp_rxtx.c +++ b/drivers/net/nfp/nfp_rxtx.c @@ -97,14 +97,14 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) } uint32_t -nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) +nfp_net_rx_queue_count(void *rx_queue) { struct nfp_net_rxq *rxq; struct nfp_net_rx_desc *rxds; uint32_t idx; uint32_t count; - rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]; + rxq = rx_queue; idx = rxq->rd_p; @@ -203,7 +203,7 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, } mbuf->hash.rss = hash; - mbuf->ol_flags |= PKT_RX_RSS_HASH; + mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; switch (hash_type) { case NFP_NET_RSS_IPV4: @@ -245,9 +245,9 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, /* If IPv4 and IP checksum error, fail */ if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))) - mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; + mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; else - mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; /* If neither UDP nor TCP return */ if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && @@ -255,9 +255,9 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, return; if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK)) - mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; else - mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; + mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; } /* @@ -403,7 +403,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; } /* Adding the mbuf to the mbuf array passed by the app */ @@ -464,9 +464,9 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) } void -nfp_net_rx_queue_release(void *rx_queue) +nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) { - struct nfp_net_rxq *rxq = rx_queue; + struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx]; if (rxq) { nfp_net_rx_queue_release_mbufs(rxq); @@ -513,7 +513,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, * calling nfp_net_stop */ if (dev->data->rx_queues[queue_idx]) { - nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]); + nfp_net_rx_queue_release(dev, queue_idx); dev->data->rx_queues[queue_idx] = NULL; } @@ -523,6 +523,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (rxq == NULL) return -ENOMEM; + dev->data->rx_queues[queue_idx] = rxq; + /* Hw queues mapping based on firmware configuration */ rxq->qidx = queue_idx; rxq->fl_qcidx = queue_idx * hw->stride_rx; @@ -556,7 +558,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating rx dma"); - nfp_net_rx_queue_release(rxq); + nfp_net_rx_queue_release(dev, queue_idx); + dev->data->rx_queues[queue_idx] = NULL; return -ENOMEM; } @@ -569,7 +572,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, sizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE, socket_id); if (rxq->rxbufs == NULL) { - nfp_net_rx_queue_release(rxq); + nfp_net_rx_queue_release(dev, queue_idx); + dev->data->rx_queues[queue_idx] = NULL; return -ENOMEM; } @@ -578,7 +582,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, nfp_net_reset_rx_queue(rxq); - dev->data->rx_queues[queue_idx] = rxq; rxq->hw = hw; /* @@ -651,9 +654,9 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) } void -nfp_net_tx_queue_release(void *tx_queue) +nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) { - struct nfp_net_txq *txq = tx_queue; + struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx]; if (txq) { nfp_net_tx_queue_release_mbufs(txq); @@ -714,7 +717,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (dev->data->tx_queues[queue_idx]) { PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", queue_idx); - nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); + nfp_net_tx_queue_release(dev, queue_idx); dev->data->tx_queues[queue_idx] = NULL; } @@ -726,6 +729,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return -ENOMEM; } + dev->data->tx_queues[queue_idx] = txq; + /* * Allocate TX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for @@ -737,7 +742,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, socket_id); if (tz == NULL) { PMD_DRV_LOG(ERR, "Error allocating tx dma"); - nfp_net_tx_queue_release(txq); + nfp_net_tx_queue_release(dev, queue_idx); + dev->data->tx_queues[queue_idx] = NULL; return -ENOMEM; } @@ -763,7 +769,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, sizeof(*txq->txbufs) * nb_desc, RTE_CACHE_LINE_SIZE, socket_id); if (txq->txbufs == NULL) { - nfp_net_tx_queue_release(txq); + nfp_net_tx_queue_release(dev, queue_idx); + dev->data->tx_queues[queue_idx] = NULL; return -ENOMEM; } PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, @@ -771,7 +778,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, nfp_net_reset_tx_queue(txq); - dev->data->tx_queues[queue_idx] = txq; txq->hw = hw; /* @@ -821,7 +827,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, ol_flags = mb->ol_flags; - if (!(ol_flags & PKT_TX_TCP_SEG)) + if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) goto clean_txd; txd->l3_offset = mb->l2_len; @@ -853,19 +859,19 @@ nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, ol_flags = mb->ol_flags; /* IPv6 does not need checksum */ - if (ol_flags & PKT_TX_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) txd->flags |= PCIE_DESC_TX_IP4_CSUM; - switch (ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_UDP_CKSUM: txd->flags |= PCIE_DESC_TX_UDP_CSUM; break; - case PKT_TX_TCP_CKSUM: + case RTE_MBUF_F_TX_TCP_CKSUM: txd->flags |= PCIE_DESC_TX_TCP_CSUM; break; } - if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK)) txd->flags |= PCIE_DESC_TX_CSUM; } @@ -929,7 +935,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nfp_net_tx_tso(txq, &txd, pkt); nfp_net_tx_cksum(txq, &txd, pkt); - if ((pkt->ol_flags & PKT_TX_VLAN_PKT) && + if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) && (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { txd.flags |= PCIE_DESC_TX_VLAN; txd.vlan = pkt->vlan_tci;