return -1;
}
-static void
-virtual_ethdev_rx_queue_release(void *q __rte_unused)
-{
-}
-
-static void
-virtual_ethdev_tx_queue_release(void *q __rte_unused)
-{
-}
-
static int
virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
int wait_to_complete __rte_unused)
.dev_infos_get = virtual_ethdev_info_get,
.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
- .rx_queue_release = virtual_ethdev_rx_queue_release,
- .tx_queue_release = virtual_ethdev_tx_queue_release,
.link_update = virtual_ethdev_link_update_success,
.mac_addr_set = virtual_ethdev_mac_address_set,
.stats_get = virtual_ethdev_stats_get,
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
.promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
.promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
return 0;
}
-static
-void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static
int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
return 0;
}
-static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static uint32_t
dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
.rx_queue_setup = dpaa_eth_rx_queue_setup,
.tx_queue_setup = dpaa_eth_tx_queue_setup,
- .rx_queue_release = dpaa_eth_rx_queue_release,
- .tx_queue_release = dpaa_eth_tx_queue_release,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
.rxq_info_get = dpaa_rxq_info_get,
}
}
-static void
-dpaa2_dev_tx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static uint32_t
dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
- .tx_queue_release = dpaa2_dev_tx_queue_release,
.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
return 0;
}
-static void
-ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
-{
-}
-
static int
ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
__rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
return 0;
}
-static void
-ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
-{
-}
-
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct ipn3ke_rpst_eth_stats {
uint64_t tx_bytes; /* gotc */
.tx_queue_start = ipn3ke_rpst_tx_queue_start,
.tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
.rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
- .rx_queue_release = ipn3ke_rpst_rx_queue_release,
.tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
- .tx_queue_release = ipn3ke_rpst_tx_queue_release,
.dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
.dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
return 0;
}
-static void
-eth_kni_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
.dev_infos_get = eth_kni_dev_info,
.rx_queue_setup = eth_kni_rx_queue_setup,
.tx_queue_setup = eth_kni_tx_queue_setup,
- .rx_queue_release = eth_kni_queue_release,
- .tx_queue_release = eth_kni_queue_release,
.link_update = eth_kni_link_update,
.stats_get = eth_kni_stats_get,
.stats_reset = eth_kni_stats_reset,
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
.tx_queue_start = eth_tx_queue_start,
.rx_queue_stop = eth_rx_queue_stop,
.tx_queue_stop = eth_tx_queue_stop,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
return 0;
}
-static void
-pfe_rx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
-static void
-pfe_tx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static int
pfe_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
.dev_configure = pfe_eth_configure,
.dev_infos_get = pfe_eth_info,
.rx_queue_setup = pfe_rx_queue_setup,
- .rx_queue_release = pfe_rx_queue_release,
.tx_queue_setup = pfe_tx_queue_setup,
- .tx_queue_release = pfe_tx_queue_release,
.dev_supported_ptypes_get = pfe_supported_ptypes_get,
.link_update = pfe_eth_link_update,
.promiscuous_enable = pfe_promiscuous_enable,
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused) { ; }
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused) { return 0; }
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
return 0;
}
-static void
-virtio_dev_queue_release(void *queue __rte_unused)
-{
- /* do nothing */
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
- .rx_queue_release = virtio_dev_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- .tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
.vlan_filter_set = virtio_vlan_filter_set,
return ret;
}
+static void
+eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **rxq = dev->data->rx_queues;
+
+ if (rxq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->rx_queue_release != NULL)
+ (*dev->dev_ops->rx_queue_release)(rxq[qid]);
+ rxq[qid] = NULL;
+}
+
+static void
+eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **txq = dev->data->tx_queues;
+
+ if (txq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->tx_queue_release != NULL)
+ (*dev->dev_ops->tx_queue_release)(txq[qid]);
+ txq[qid] = NULL;
+}
+
static int
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
return -(ENOMEM);
}
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+ for (i = nb_queues; i < old_nb_queues; i++)
+ eth_dev_rxq_release(dev, i);
rxq = dev->data->rx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
dev->data->rx_queues = rxq;
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- rxq = dev->data->rx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ eth_dev_rxq_release(dev, i);
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
return -(ENOMEM);
}
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+ for (i = nb_queues; i < old_nb_queues; i++)
+ eth_dev_txq_release(dev, i);
txq = dev->data->tx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
dev->data->tx_queues = txq;
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- txq = dev->data->tx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
+ eth_dev_txq_release(dev, i);
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf local_conf;
- void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
int ret;
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **rxq;
int i;
int count;
}
if (dev->data->dev_started)
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
nb_rx_desc, conf);
if (ret == 0)
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf local_conf;
- void **txq;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
{
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **txq;
int i;
int count;
int ret;
}
if (dev->data->dev_started)
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
ret = (*dev->dev_ops->tx_hairpin_queue_setup)
(dev, tx_queue_id, nb_tx_desc, conf);
if (ret == 0)