.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_queue_count = ixgbe_dev_rx_queue_count,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_queue_count = ixgbe_dev_rx_queue_count,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
};
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
return 0;
}
+uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct igb_rx_queue *rxq;
+ uint32_t nb_pkts_available;
+ uint32_t rx_rdh;
+ uint32_t rx_id;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rx_id = (uint16_t)((rxq->rx_tail == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rx_tail - 1));
+ rx_rdh = IXGBE_PCI_REG(rxq->rdh_reg_addr);
+ if (rx_rdh > rx_id)
+ nb_pkts_available = rx_rdh - rx_id;
+ else
+ nb_pkts_available = rx_rdh - rx_id + rxq->nb_rx_desc;
+
+ return (nb_pkts_available);
+}
+
void
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{