/* Ring physical address */
uint64_t ring_phys_addr;
/* Dma Channel register address */
- uint64_t dma_regs;
+ void *dma_regs;
/* Dma channel tail register address*/
volatile uint32_t *dma_tail_reg;
/* DPDK queue index */
uint64_t pkts;
uint64_t bytes;
uint64_t errors;
+ uint64_t rx_mbuf_alloc_failed;
/* Number of mbufs allocated from pool*/
uint64_t mbuf_alloc;
-} ____cacheline_aligned;
+} __rte_cache_aligned;
/*Tx descriptor format */
struct axgbe_tx_desc {
/* Physical address of ring */
uint64_t ring_phys_addr;
/* Dma channel register space */
- uint64_t dma_regs;
+ void *dma_regs;
/* Dma tail register address of ring*/
volatile uint32_t *dma_tail_reg;
/* Tx queue index/id*/
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
void axgbe_dev_rx_queue_release(void *rxq);
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
#endif /* _AXGBE_RXTX_H_ */