}
/* Initialise and register driver with DPDK Application */
-static const struct eth_dev_ops nfp_net_eth_dev_ops = {
+static const struct eth_dev_ops nfp_net_nfd3_eth_dev_ops = {
.dev_configure = nfp_net_configure,
.dev_start = nfp_net_start,
.dev_stop = nfp_net_stop,
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .tx_queue_setup = nfp_net_tx_queue_setup,
+ .tx_queue_setup = nfp_net_nfd3_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
"NFP internal port number: %d", port, hw->nfp_idx);
- eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -ENODEV;
}
eth_dev->process_private = cpp;
- eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
rte_eth_dev_probing_finish(eth_dev);
}
}
/* Initialise and register VF driver with DPDK Application */
-static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
+static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops = {
.dev_configure = nfp_net_configure,
.dev_start = nfp_netvf_start,
.dev_stop = nfp_netvf_stop,
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .tx_queue_setup = nfp_net_tx_queue_setup,
+ .tx_queue_setup = nfp_net_nfd3_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
}
int
-nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
PMD_INIT_FUNC_TRACE();
/* Validating number of descriptors */
- tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
+ tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
nb_desc > NFP_NET_MAX_TX_DESC ||
nb_desc < NFP_NET_MIN_TX_DESC) {
* resizing in later calls to the queue setup function.
*/
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
- sizeof(struct nfp_net_tx_desc) *
+ sizeof(struct nfp_net_nfd3_tx_desc) *
NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
socket_id);
if (tz == NULL) {
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
- txq->txds = (struct nfp_net_tx_desc *)tz->addr;
+ txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
/* Leaving always free descriptors for avoiding wrapping confusion */
static inline
-uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+uint32_t nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
{
if (txq->wr_p >= txq->rd_p)
return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
* This function uses the host copy* of read/write pointers
*/
static inline
-uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
+uint32_t nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
{
- return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
+ return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh);
}
/* nfp_net_tx_tso - Set TX descriptor for TSO */
static inline void
-nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc *txd,
struct rte_mbuf *mb)
{
uint64_t ol_flags;
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
static inline void
-nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc *txd,
struct rte_mbuf *mb)
{
uint64_t ol_flags;
}
uint16_t
-nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct nfp_net_txq *txq;
struct nfp_net_hw *hw;
- struct nfp_net_tx_desc *txds, txd;
+ struct nfp_net_nfd3_tx_desc *txds, txd;
struct rte_mbuf *pkt;
uint64_t dma_addr;
int pkt_size, dma_size;
PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
txq->qidx, txq->wr_p, nb_pkts);
- if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+ if ((nfp_net_nfd3_free_tx_desc(txq) < nb_pkts) || (nfp_net_nfd3_txq_full(txq)))
nfp_net_tx_free_bufs(txq);
- free_descs = (uint16_t)nfp_free_tx_desc(txq);
+ free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
if (unlikely(free_descs == 0))
return 0;
* multisegment packet, but TSO info needs to be in all of them.
*/
txd.data_len = pkt->pkt_len;
- nfp_net_tx_tso(txq, &txd, pkt);
- nfp_net_tx_cksum(txq, &txd, pkt);
+ nfp_net_nfd3_tx_tso(txq, &txd, pkt);
+ nfp_net_nfd3_tx_cksum(txq, &txd, pkt);
if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
#define PCIE_DESC_TX_ENCAP_VXLAN (1 << 1)
#define PCIE_DESC_TX_ENCAP_GRE (1 << 0)
-struct nfp_net_tx_desc {
+struct nfp_net_nfd3_tx_desc {
union {
struct {
uint8_t dma_addr_hi; /* High bits of host buf address */
* of the queue and @size is the size in bytes for the queue
* (needed for free)
*/
- struct nfp_net_tx_desc *txds;
+ struct nfp_net_nfd3_tx_desc *txds;
/*
* At this point 48 bytes have been used for all the fields in the
struct rte_mempool *mp);
void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
-int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+int nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif /* _NFP_RXTX_H_ */