git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/bnxt: add Tx TruFlow table config for P4 device
[dpdk.git]
/
drivers
/
net
/
nfp
/
nfp_rxtx.c
diff --git
a/drivers/net/nfp/nfp_rxtx.c
b/drivers/net/nfp/nfp_rxtx.c
index
1402c5f
..
0fe1415
100644
(file)
--- a/
drivers/net/nfp/nfp_rxtx.c
+++ b/
drivers/net/nfp/nfp_rxtx.c
@@
-97,14
+97,14
@@
nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
}
uint32_t
}
uint32_t
-nfp_net_rx_queue_count(
struct rte_eth_dev *dev, uint16_t queue_idx
)
+nfp_net_rx_queue_count(
void *rx_queue
)
{
struct nfp_net_rxq *rxq;
struct nfp_net_rx_desc *rxds;
uint32_t idx;
uint32_t count;
{
struct nfp_net_rxq *rxq;
struct nfp_net_rx_desc *rxds;
uint32_t idx;
uint32_t count;
- rxq =
(struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]
;
+ rxq =
rx_queue
;
idx = rxq->rd_p;
idx = rxq->rd_p;
@@
-203,7
+203,7
@@
nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
}
mbuf->hash.rss = hash;
}
mbuf->hash.rss = hash;
- mbuf->ol_flags |=
PKT
_RX_RSS_HASH;
+ mbuf->ol_flags |=
RTE_MBUF_F
_RX_RSS_HASH;
switch (hash_type) {
case NFP_NET_RSS_IPV4:
switch (hash_type) {
case NFP_NET_RSS_IPV4:
@@
-245,9
+245,9
@@
nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
/* If IPv4 and IP checksum error, fail */
if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
/* If IPv4 and IP checksum error, fail */
if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
- mb->ol_flags |=
PKT
_RX_IP_CKSUM_BAD;
+ mb->ol_flags |=
RTE_MBUF_F
_RX_IP_CKSUM_BAD;
else
else
- mb->ol_flags |=
PKT
_RX_IP_CKSUM_GOOD;
+ mb->ol_flags |=
RTE_MBUF_F
_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
@@
-255,9
+255,9
@@
nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
return;
if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
return;
if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
- mb->ol_flags |=
PKT
_RX_L4_CKSUM_GOOD;
+ mb->ol_flags |=
RTE_MBUF_F
_RX_L4_CKSUM_GOOD;
else
else
- mb->ol_flags |=
PKT
_RX_L4_CKSUM_BAD;
+ mb->ol_flags |=
RTE_MBUF_F
_RX_L4_CKSUM_BAD;
}
/*
}
/*
@@
-403,7
+403,7
@@
nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |=
PKT_RX_VLAN | PKT
_RX_VLAN_STRIPPED;
+ mb->ol_flags |=
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F
_RX_VLAN_STRIPPED;
}
/* Adding the mbuf to the mbuf array passed by the app */
}
/* Adding the mbuf to the mbuf array passed by the app */
@@
-464,9
+464,9
@@
nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
}
void
}
void
-nfp_net_rx_queue_release(
void *rx_queue
)
+nfp_net_rx_queue_release(
struct rte_eth_dev *dev, uint16_t queue_idx
)
{
{
- struct nfp_net_rxq *rxq =
rx_queue
;
+ struct nfp_net_rxq *rxq =
dev->data->rx_queues[queue_idx]
;
if (rxq) {
nfp_net_rx_queue_release_mbufs(rxq);
if (rxq) {
nfp_net_rx_queue_release_mbufs(rxq);
@@
-513,7
+513,7
@@
nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* calling nfp_net_stop
*/
if (dev->data->rx_queues[queue_idx]) {
* calling nfp_net_stop
*/
if (dev->data->rx_queues[queue_idx]) {
- nfp_net_rx_queue_release(dev
->data->rx_queues[queue_idx]
);
+ nfp_net_rx_queue_release(dev
, queue_idx
);
dev->data->rx_queues[queue_idx] = NULL;
}
dev->data->rx_queues[queue_idx] = NULL;
}
@@
-523,6
+523,8
@@
nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (rxq == NULL)
return -ENOMEM;
if (rxq == NULL)
return -ENOMEM;
+ dev->data->rx_queues[queue_idx] = rxq;
+
/* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
/* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
@@
-556,7
+558,8
@@
nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating rx dma");
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating rx dma");
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
return -ENOMEM;
}
@@
-569,7
+572,8
@@
nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
sizeof(*rxq->rxbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->rxbufs == NULL) {
sizeof(*rxq->rxbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->rxbufs == NULL) {
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
return -ENOMEM;
}
@@
-578,7
+582,6
@@
nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
nfp_net_reset_rx_queue(rxq);
nfp_net_reset_rx_queue(rxq);
- dev->data->rx_queues[queue_idx] = rxq;
rxq->hw = hw;
/*
rxq->hw = hw;
/*
@@
-651,9
+654,9
@@
nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
}
void
}
void
-nfp_net_tx_queue_release(
void *tx_queue
)
+nfp_net_tx_queue_release(
struct rte_eth_dev *dev, uint16_t queue_idx
)
{
{
- struct nfp_net_txq *txq =
tx_queue
;
+ struct nfp_net_txq *txq =
dev->data->tx_queues[queue_idx]
;
if (txq) {
nfp_net_tx_queue_release_mbufs(txq);
if (txq) {
nfp_net_tx_queue_release_mbufs(txq);
@@
-714,7
+717,7
@@
nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (dev->data->tx_queues[queue_idx]) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
if (dev->data->tx_queues[queue_idx]) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
- nfp_net_tx_queue_release(dev
->data->tx_queues[queue_idx]
);
+ nfp_net_tx_queue_release(dev
, queue_idx
);
dev->data->tx_queues[queue_idx] = NULL;
}
dev->data->tx_queues[queue_idx] = NULL;
}
@@
-726,6
+729,8
@@
nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return -ENOMEM;
}
return -ENOMEM;
}
+ dev->data->tx_queues[queue_idx] = txq;
+
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
@@
-737,7
+742,8
@@
nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating tx dma");
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating tx dma");
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
return -ENOMEM;
}
@@
-763,7
+769,8
@@
nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->txbufs == NULL) {
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->txbufs == NULL) {
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
return -ENOMEM;
}
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
@@
-771,7
+778,6
@@
nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
nfp_net_reset_tx_queue(txq);
nfp_net_reset_tx_queue(txq);
- dev->data->tx_queues[queue_idx] = txq;
txq->hw = hw;
/*
txq->hw = hw;
/*
@@
-821,7
+827,7
@@
nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
ol_flags = mb->ol_flags;
ol_flags = mb->ol_flags;
- if (!(ol_flags &
PKT
_TX_TCP_SEG))
+ if (!(ol_flags &
RTE_MBUF_F
_TX_TCP_SEG))
goto clean_txd;
txd->l3_offset = mb->l2_len;
goto clean_txd;
txd->l3_offset = mb->l2_len;
@@
-853,19
+859,19
@@
nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
ol_flags = mb->ol_flags;
/* IPv6 does not need checksum */
ol_flags = mb->ol_flags;
/* IPv6 does not need checksum */
- if (ol_flags &
PKT
_TX_IP_CKSUM)
+ if (ol_flags &
RTE_MBUF_F
_TX_IP_CKSUM)
txd->flags |= PCIE_DESC_TX_IP4_CSUM;
txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- switch (ol_flags &
PKT
_TX_L4_MASK) {
- case
PKT
_TX_UDP_CKSUM:
+ switch (ol_flags &
RTE_MBUF_F
_TX_L4_MASK) {
+ case
RTE_MBUF_F
_TX_UDP_CKSUM:
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
- case
PKT
_TX_TCP_CKSUM:
+ case
RTE_MBUF_F
_TX_TCP_CKSUM:
txd->flags |= PCIE_DESC_TX_TCP_CSUM;
break;
}
txd->flags |= PCIE_DESC_TX_TCP_CSUM;
break;
}
- if (ol_flags & (
PKT_TX_IP_CKSUM | PKT
_TX_L4_MASK))
+ if (ol_flags & (
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F
_TX_L4_MASK))
txd->flags |= PCIE_DESC_TX_CSUM;
}
txd->flags |= PCIE_DESC_TX_CSUM;
}
@@
-929,7
+935,7
@@
nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
- if ((pkt->ol_flags &
PKT_TX_VLAN_PKT
) &&
+ if ((pkt->ol_flags &
RTE_MBUF_F_TX_VLAN
) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;