#include "rte_ethdev.h"
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-#define PMD_DEBUG_TRACE(fmt, args...) do { \
+#define RTE_PMD_DEBUG_TRACE(fmt, args...) do { \
RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
} while (0)
#else
-#define PMD_DEBUG_TRACE(fmt, args...)
+#define RTE_PMD_DEBUG_TRACE(fmt, args...)
#endif
/* Macros for checking for restricting functions to primary instance only */
-#define PROC_PRIMARY_OR_ERR_RET(retval) do { \
+#define RTE_PROC_PRIMARY_OR_ERR_RET(retval) do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
return (retval); \
} \
} while (0)
-#define PROC_PRIMARY_OR_RET() do { \
+#define RTE_PROC_PRIMARY_OR_RET() do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
- PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ RTE_PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
return; \
} \
} while (0)
/* Macros to check for invalid function pointers in dev_ops structure */
-#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
+#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \
if ((func) == NULL) { \
- PMD_DEBUG_TRACE("Function not supported\n"); \
+ RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
return (retval); \
} \
} while (0)
-#define FUNC_PTR_OR_RET(func) do { \
+#define RTE_FUNC_PTR_OR_RET(func) do { \
if ((func) == NULL) { \
- PMD_DEBUG_TRACE("Function not supported\n"); \
+ RTE_PMD_DEBUG_TRACE("Function not supported\n"); \
return; \
} \
} while (0)
/* Macros to check for valid port */
-#define VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
- if (!rte_eth_dev_is_valid_port(port_id)) { \
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
- return retval; \
- } \
+#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
+ if (!rte_eth_dev_is_valid_port(port_id)) { \
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ return retval; \
+ } \
} while (0)
-#define VALID_PORTID_OR_RET(port_id) do { \
- if (!rte_eth_dev_is_valid_port(port_id)) { \
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
- return; \
- } \
+#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
+ if (!rte_eth_dev_is_valid_port(port_id)) { \
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ return; \
+ } \
} while (0)
+
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
static struct rte_eth_dev_data *rte_eth_dev_data;
port_id = rte_eth_dev_find_free_port();
if (port_id == RTE_MAX_ETHPORTS) {
- PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
+ RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
return NULL;
}
rte_eth_dev_data_alloc();
if (rte_eth_dev_allocated(name) != NULL) {
- PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
+ RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
name);
return NULL;
}
if (diag == 0)
return 0;
- PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
+ RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x) failed\n",
pci_drv->name,
(unsigned) pci_dev->id.vendor_id,
(unsigned) pci_dev->id.device_id);
static int
rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
{
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
if (addr == NULL) {
- PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
return -EINVAL;
}
{
char *tmp;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
if (name == NULL) {
- PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
return -EINVAL;
}
int i;
if (name == NULL) {
- PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
return -EINVAL;
}
struct rte_pci_device *pci_dev = NULL;
if (addr == NULL) {
- PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
return -EINVAL;
}
uint32_t dev_flags;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return -EINVAL;
}
return -(ENOMEM);
}
} else { /* re-configure */
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
rxq = dev->data->rx_queues;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
- PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
" already started\n",
rx_queue_id, port_id);
return 0;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
- PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
" already stopped\n",
rx_queue_id, port_id);
return 0;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
- PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
" already started\n",
tx_queue_id, port_id);
return 0;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
- PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
" already stopped\n",
tx_queue_id, port_id);
return 0;
return -(ENOMEM);
}
} else { /* re-configure */
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
txq = dev->data->tx_queues;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
- PMD_DEBUG_TRACE(
+ RTE_PMD_DEBUG_TRACE(
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
return -EINVAL;
}
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
- PMD_DEBUG_TRACE(
+ RTE_PMD_DEBUG_TRACE(
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
return -EINVAL;
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
+ RTE_PMD_DEBUG_TRACE(
"port %d must be stopped to allow configuration\n", port_id);
return -EBUSY;
}
*/
(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
if (nb_rx_q > dev_info.max_rx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
return -EINVAL;
}
if (nb_rx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
return -EINVAL;
}
if (nb_tx_q > dev_info.max_tx_queues) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
port_id, nb_tx_q, dev_info.max_tx_queues);
return -EINVAL;
}
if (nb_tx_q == 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
return -EINVAL;
}
*/
if ((dev_conf->intr_conf.lsc == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
- PMD_DEBUG_TRACE("driver %s does not support lsc\n",
+ RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
dev->data->drv_name);
return -EINVAL;
}
if (dev_conf->rxmode.jumbo_frame == 1) {
if (dev_conf->rxmode.max_rx_pkt_len >
dev_info.max_rx_pktlen) {
- PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
" > max valid value %u\n",
port_id,
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
(unsigned)dev_info.max_rx_pktlen);
return -EINVAL;
} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
- PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
" < min valid value %u\n",
port_id,
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
*/
diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
+ RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
port_id, diag);
return diag;
}
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
+ RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
return diag;
diag = (*dev->dev_ops->dev_configure)(dev);
if (diag != 0) {
- PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
+ RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
(dev->data->mac_pool_sel[i] & (1ULL << pool)))
(*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
else {
- PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
port_id);
/* exit the loop but not return an error */
break;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (dev->data->dev_started != 0) {
- PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
" already started\n",
port_id);
return 0;
rte_eth_dev_config_restore(port_id);
if (dev->data->dev_conf.intr_conf.lsc == 0) {
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
(*dev->dev_ops->link_update)(dev, 0);
}
return 0;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_RET();
+ RTE_PROC_PRIMARY_OR_RET();
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (dev->data->dev_started == 0) {
- PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
" already stopped\n",
port_id);
return;
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
return (*dev->dev_ops->dev_set_link_up)(dev);
}
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
return (*dev->dev_ops->dev_set_link_down)(dev);
}
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_RET();
+ RTE_PROC_PRIMARY_OR_RET();
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
dev->data->dev_started = 0;
(*dev->dev_ops->dev_close)(dev);
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
}
if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
+ RTE_PMD_DEBUG_TRACE(
"port %d must be stopped to allow configuration\n", port_id);
return -EBUSY;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
/*
* Check the size of the mbuf data buffer.
*/
rte_eth_dev_info_get(port_id, &dev_info);
if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
- PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
+ RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
mp->name, (int) mp->private_data_size,
(int) sizeof(struct rte_pktmbuf_pool_private));
return -ENOSPC;
mbp_buf_size = rte_pktmbuf_data_room_size(mp);
if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
- PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
+ RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
"(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
"=%d)\n",
mp->name,
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
- PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
+ RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
"should be: <= %hu, = %hu, and a product of %hu\n",
nb_rx_desc,
dev_info.rx_desc_lim.nb_max,
/* This function is only safe when called from the primary process
* in a multi-process setup*/
- PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+ RTE_PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
}
if (dev->data->dev_started) {
- PMD_DEBUG_TRACE(
+ RTE_PMD_DEBUG_TRACE(
"port %d must be stopped to allow configuration\n", port_id);
return -EBUSY;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
rte_eth_dev_info_get(port_id, &dev_info);
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
- PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
+ RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
"should be: <= %hu, = %hu, and a product of %hu\n",
nb_tx_desc,
dev_info.tx_desc_lim.nb_max,
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
(*dev->dev_ops->promiscuous_enable)(dev);
dev->data->promiscuous = 1;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
dev->data->promiscuous = 0;
(*dev->dev_ops->promiscuous_disable)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
return dev->data->promiscuous;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
(*dev->dev_ops->allmulticast_enable)(dev);
dev->data->all_multicast = 1;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
dev->data->all_multicast = 0;
(*dev->dev_ops->allmulticast_disable)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
return dev->data->all_multicast;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
if (dev->data->dev_conf.intr_conf.lsc != 0)
rte_eth_dev_atomic_read_link_status(dev, eth_link);
else {
- FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 1);
*eth_link = dev->data->dev_link;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
if (dev->data->dev_conf.intr_conf.lsc != 0)
rte_eth_dev_atomic_read_link_status(dev, eth_link);
else {
- FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 0);
*eth_link = dev->data->dev_link;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
memset(stats, 0, sizeof(*stats));
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
(*dev->dev_ops->stats_get)(dev, stats);
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
return 0;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
(*dev->dev_ops->stats_reset)(dev);
}
signed xcount = 0;
uint64_t val, *stats_ptr;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
/* implemented by the driver */
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
return (*dev->dev_ops->queue_stats_mapping_set)
(dev, queue_id, stat_idx, is_rx);
}
.nb_align = 1,
};
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
- FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->pci_dev = dev->pci_dev;
dev_info->driver_name = dev->data->drv_name;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
*mtu = dev->data->mtu;
int ret;
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
if (!ret)
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
- PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
+ RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
return -ENOSYS;
}
if (vlan_id > 4095) {
- PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
+ RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
port_id, (unsigned) vlan_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
return 0;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
(*dev->dev_ops->vlan_tpid_set)(dev, tpid);
return 0;
int mask = 0;
int cur, org = 0;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
/*check which option changed by application*/
if (mask == 0)
return ret;
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
(*dev->dev_ops->vlan_offload_set)(dev, mask);
return ret;
struct rte_eth_dev *dev;
int ret = 0;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
(*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
return 0;
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
memset(fc_conf, 0, sizeof(*fc_conf));
return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
- PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
+ RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
- PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
+ RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
return -EINVAL;
}
return -EINVAL;
if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
- PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
+ RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
RTE_RETA_GROUP_SIZE);
return -EINVAL;
}
return -EINVAL;
if (max_rxq == 0) {
- PMD_DEBUG_TRACE("No receive queue is available\n");
+ RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
return -EINVAL;
}
shift = i % RTE_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) &&
(reta_conf[idx].reta[shift] >= max_rxq)) {
- PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
+ RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
"the maximum rxq index: %u\n", idx, shift,
reta_conf[idx].reta[shift], max_rxq);
return -EINVAL;
struct rte_eth_dev *dev;
int ret;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
/* Check mask bits */
ret = rte_eth_check_reta_mask(reta_conf, reta_size);
if (ret < 0)
if (ret < 0)
return ret;
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
}
int ret;
if (port_id >= nb_ports) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return -ENODEV;
}
return ret;
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
}
struct rte_eth_dev *dev;
uint16_t rss_hash_protos;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rss_hash_protos = rss_conf->rss_hf;
if ((rss_hash_protos != 0) &&
((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
- PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
+ RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
rss_hash_protos);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (udp_tunnel == NULL) {
- PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
return -EINVAL;
}
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (udp_tunnel == NULL) {
- PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
return -EINVAL;
}
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
return (*dev->dev_ops->dev_led_on)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
return (*dev->dev_ops->dev_led_off)(dev);
}
int index;
uint64_t pool_mask;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
}
if (pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
+ RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
return -EINVAL;
}
if (index < 0) {
index = get_mac_addr_index(port_id, &null_mac_addr);
if (index < 0) {
- PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
port_id);
return -ENOSPC;
}
struct rte_eth_dev *dev;
int index;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
index = get_mac_addr_index(port_id, addr);
if (index == 0) {
- PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
return -EADDRINUSE;
} else if (index < 0)
return 0; /* Do nothing if address wasn't found */
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (!is_valid_assigned_ether_addr(addr))
return -EINVAL;
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
/* Update default address in NIC data structure */
ether_addr_copy(addr, &dev->data->mac_addrs[0]);
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
if (vf > num_vfs) {
- PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
+ RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
return -EINVAL;
}
if (rx_mode == 0) {
- PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
+ RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
}
int ret;
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (is_zero_ether_addr(addr)) {
- PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
}
if (index < 0) {
if (!on) {
- PMD_DEBUG_TRACE("port %d: the MAC address was not "
+ RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
"set in UTA\n", port_id);
return -EINVAL;
}
index = get_hash_mac_addr_index(port_id, &null_mac_addr);
if (index < 0) {
- PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
port_id);
return -ENOSPC;
}
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
if (ret == 0) {
/* Update address in NIC data structure */
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
}
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
if (vf > num_vfs) {
- PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
+ RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
}
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
num_vfs = dev_info.max_vfs;
if (vf > num_vfs) {
- PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
+ RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (vlan_id > ETHER_MAX_VLAN_ID) {
- PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
+ RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
vlan_id);
return -EINVAL;
}
if (vf_mask == 0) {
- PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
+ RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
vf_mask, vlan_on);
}
struct rte_eth_dev_info dev_info;
struct rte_eth_link link;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
link = dev->data->dev_link;
if (queue_idx > dev_info.max_tx_queues) {
- PMD_DEBUG_TRACE("set queue rate limit:port %d: "
+ RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
"invalid queue id=%d\n", port_id, queue_idx);
return -EINVAL;
}
if (tx_rate > link.link_speed) {
- PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
+ RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
"bigger than link speed= %d\n",
tx_rate, link.link_speed);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
}
if (q_msk == 0)
return 0;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
link = dev->data->dev_link;
if (vf > dev_info.max_vfs) {
- PMD_DEBUG_TRACE("set VF rate limit:port %d: "
+ RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
"invalid vf id=%d\n", port_id, vf);
return -EINVAL;
}
if (tx_rate > link.link_speed) {
- PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
+ RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
"bigger than link speed= %d\n",
tx_rate, link.link_speed);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
}
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (mirror_conf->rule_type == 0) {
- PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
+ RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
return -EINVAL;
}
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
- PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
+ RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
ETH_64_POOLS - 1);
return -EINVAL;
}
if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
(mirror_conf->pool_mask == 0)) {
- PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
+ RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
return -EINVAL;
}
if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
mirror_conf->vlan.vlan_mask == 0) {
- PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
+ RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
}
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
if (queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
return 0;
}
return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
if (queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
return 0;
}
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
offset);
}
if (!cb_fn)
return -EINVAL;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
if (!cb_fn)
return -EINVAL;
- VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
int rc;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
return -ENODEV;
}
dev = &rte_eth_devices[port_id];
intr_handle = &dev->pci_dev->intr_handle;
if (!intr_handle->intr_vec) {
- PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
return -EPERM;
}
vec = intr_handle->intr_vec[qid];
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
- PMD_DEBUG_TRACE("p %u q %u rx ctl error"
+ RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
" op %d epfd %d vec %u\n",
port_id, qid, op, epfd, vec);
}
int rc;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
return -ENODEV;
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
return -EINVAL;
}
intr_handle = &dev->pci_dev->intr_handle;
if (!intr_handle->intr_vec) {
- PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
return -EPERM;
}
vec = intr_handle->intr_vec[queue_id];
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
- PMD_DEBUG_TRACE("p %u q %u rx ctl error"
+ RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
" op %d epfd %d vec %u\n",
port_id, queue_id, op, epfd, vec);
return rc;
struct rte_eth_dev *dev;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return -ENODEV;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
}
struct rte_eth_dev *dev;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return -ENODEV;
}
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
(*dev->dev_ops->bypass_init)(dev);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
(*dev->dev_ops->bypass_state_show)(dev, state);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
(*dev->dev_ops->bypass_state_set)(dev, new_state);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
(*dev->dev_ops->bypass_event_show)(dev, event, state);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
(*dev->dev_ops->bypass_event_set)(dev, event, state);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
(*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
(*dev->dev_ops->bypass_ver_show)(dev, ver);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
(*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
(*dev->dev_ops->bypass_wd_reset)(dev);
return 0;
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
RTE_ETH_FILTER_NOP, NULL);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (qinfo == NULL)
return -EINVAL;
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (qinfo == NULL)
return -EINVAL;
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_tx_queues) {
- PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
return -EINVAL;
}
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
return (*dev->dev_ops->timesync_enable)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
return (*dev->dev_ops->timesync_disable)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP);
return (*dev->dev_ops->get_reg_length)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
return (*dev->dev_ops->get_reg)(dev, info);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
return (*dev->dev_ops->get_eeprom_length)(dev);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
return (*dev->dev_ops->get_eeprom)(dev, info);
}
{
struct rte_eth_dev *dev;
- VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
return (*dev->dev_ops->set_eeprom)(dev, info);
}
struct rte_eth_dev *dev;
if (!rte_eth_dev_is_valid_port(port_id)) {
- PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return -ENODEV;
}
dev = &rte_eth_devices[port_id];
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
}
rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
{
if ((eth_dev == NULL) || (pci_dev == NULL)) {
- PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
+ RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
eth_dev, pci_dev);
return;
}