static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
struct rte_eth_dev *eth_dev);
-static int ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
- uint16_t nb_tx_q);
+static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
static void ixgbe_dev_close(struct rte_eth_dev *dev);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
struct rte_eth_dev *eth_dev);
-static int ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
- uint16_t nb_tx_q);
+static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
.dev_infos_get = ixgbe_dev_info_get,
.vlan_filter_set = ixgbe_vlan_filter_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.dev_led_off = ixgbe_dev_led_off,
.flow_ctrl_set = ixgbe_flow_ctrl_set,
.dev_infos_get = ixgbe_dev_info_get,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
};
/**
PMD_INIT_FUNC_TRACE();
- /* Allocate the array of pointers to RX queue structures */
- diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
- "pointers to RX queues failed", dev->data->port_id,
- nb_rx_q);
- return diag;
}
- /* Allocate the array of pointers to TX queue structures */
- diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
- "pointers to TX queues failed", dev->data->port_id,
- nb_tx_q);
- return diag;
}
/* set flag to update link status after init */
error:
PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
+ ixgbe_dev_clear_queues(dev);
return -EIO;
}
}
static int
-ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- int diag;
struct rte_eth_conf* conf = &dev->data->dev_conf;
- PMD_INIT_FUNC_TRACE();
-
- /* Allocate the array of pointers to RX queue structures */
- diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
- "pointers to RX queues failed", dev->data->port_id,
- nb_rx_q);
- return diag;
- }
-
- /* Allocate the array of pointers to TX queue structures */
- diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
- if (diag != 0) {
- PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
- "pointers to TX queues failed", dev->data->port_id,
- nb_tx_q);
- return diag;
- }
if (!conf->rxmode.hw_strip_crc) {
/*
*/
void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
-int ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_rx_queues);
+void ixgbe_dev_rx_queue_release(void *rxq);
-int ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_tx_queues);
+void ixgbe_dev_tx_queue_release(void *txq);
int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
-uint16_t ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq,
+uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-uint16_t ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
/*
}
uint16_t
-ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct igb_tx_queue *txq;
struct igb_tx_entry *sw_ring;
struct igb_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
uint32_t ctx;
uint32_t new_ctx;
+ txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
}
uint16_t
-ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
}
uint16_t
-ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
static void
ixgbe_tx_queue_release(struct igb_tx_queue *txq)
{
+ if (txq != NULL) {
ixgbe_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
rte_free(txq);
+ }
}
-int
-ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+ixgbe_dev_tx_queue_release(void *txq)
{
- uint16_t old_nb_queues = dev->data->nb_tx_queues;
- struct igb_tx_queue **txq;
- unsigned i;
-
- PMD_INIT_FUNC_TRACE();
-
- if (dev->data->tx_queues == NULL) {
- dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->tx_queues == NULL) {
- dev->data->nb_tx_queues = 0;
- return -1;
- }
- }
- else {
- for (i = nb_queues; i < old_nb_queues; i++)
- ixgbe_tx_queue_release(dev->data->tx_queues[i]);
- txq = rte_realloc(dev->data->tx_queues,
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (txq == NULL)
- return -1;
- else
- dev->data->tx_queues = txq;
- if (nb_queues > old_nb_queues)
- memset(&dev->data->tx_queues[old_nb_queues], 0,
- sizeof(struct igb_tx_queue *) *
- (nb_queues - old_nb_queues));
- }
- dev->data->nb_tx_queues = nb_queues;
- return 0;
+ ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic igb_tx_queue fields to defaults */
rte_free(rxq);
}
-int
-ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+ixgbe_dev_rx_queue_release(void *rxq)
{
- uint16_t old_nb_queues = dev->data->nb_rx_queues;
- struct igb_rx_queue **rxq;
- unsigned i;
+ ixgbe_rx_queue_release(rxq);
+}
- PMD_INIT_FUNC_TRACE();
if (dev->data->rx_queues == NULL) {
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
/* Setup RX queues */
- dev->rx_pkt_burst = ixgbe_recv_pkts;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
/* Allocate buffers for descriptor rings */
ret = ixgbe_alloc_rx_queue_mbufs(rxq);
- if (ret) {
- ixgbe_dev_clear_queues(dev);
+ if (ret)
return ret;
- }
/*
* Reset crc_len in case it was changed after queue setup by a