#include "rte_ethdev_driver.h"
#include "ethdev_profile.h"
+static int ethdev_logtype;
+
+#define ethdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, ethdev_logtype, fmt "\n", ## __VA_ARGS__)
+
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
static uint8_t eth_dev_last_created_port;
port_id = rte_eth_dev_find_free_port();
if (port_id == RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
+ ethdev_log(ERR, "Reached maximum number of Ethernet ports");
goto unlock;
}
if (rte_eth_dev_allocated(name) != NULL) {
- RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
- name);
+ ethdev_log(ERR,
+ "Ethernet Device with name %s already allocated!",
+ name);
goto unlock;
}
}
void *
-rte_eth_dev_get_sec_ctx(uint8_t port_id)
+rte_eth_dev_get_sec_ctx(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
return rte_eth_devices[port_id].security_ctx;
/* no point looking at the port count if no port exists */
if (!rte_eth_dev_count()) {
- RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
+ ethdev_log(ERR, "No port found for device (%s)", name);
ret = -1;
goto err;
}
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
- RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
- port_id);
+ ethdev_log(ERR,
+ "Port %" PRIu16 " is bonded, cannot detach", port_id);
ret = -ENOTSUP;
goto err;
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n", port_id);
+ return -EINVAL;
+ }
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n", port_id);
+ return -EINVAL;
+ }
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
return dev->data->all_multicast;
}
-static inline int
-rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
void
rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 1);
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 0);
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
+ dev_info->device = dev->device;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ ethdev_log(ERR, "Invalid port_id=%d", port_id);
return -EINVAL;
}
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ ethdev_log(ERR, "Invalid port_id=%d", port_id);
return -EINVAL;
}
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
}
int
filter_op, arg));
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param)
{
int
rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
int
rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
return (*dev->dev_ops->pool_ops_supported)(dev, pool);
}
+
+RTE_INIT(ethdev_init_log);
+static void
+ethdev_init_log(void)
+{
+ ethdev_logtype = rte_log_register("lib.ethdev");
+ if (ethdev_logtype >= 0)
+ rte_log_set_level(ethdev_logtype, RTE_LOG_INFO);
+}