static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
+/* public fast-path API */
+struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
+
/* spinlock for eth device callbacks */
static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
-/* spinlock for add/remove rx callbacks */
+/* spinlock for add/remove Rx callbacks */
static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
-/* spinlock for add/remove tx callbacks */
+/* spinlock for add/remove Tx callbacks */
static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
/* spinlock for shared data allocation */
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
- RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
#undef RTE_TX_OFFLOAD_BIT2STR
+static const struct {
+ uint64_t offload;
+ const char *name;
+} rte_eth_dev_capa_names[] = {
+ {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
+ {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
+ {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
+};
+
/**
* The user application callback description.
*
char *cls_str = NULL;
int str_size;
+ if (iter == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
+ return -EINVAL;
+ }
+
+ if (devargs_str == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot initialize iterator from NULL device description string\n");
+ return -EINVAL;
+ }
+
memset(iter, 0, sizeof(*iter));
memset(&devargs, 0, sizeof(devargs));
* - 0000:08:00.0,representor=[1-3]
* - pci:0000:06:00.0,representor=[0,5]
* - class=eth,mac=00:11:22:33:44:55
- * A new syntax is in development (not yet supported):
* - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
*/
}
/* Convert bus args to new syntax for use with new API dev_iterate. */
- if (strcmp(iter->bus->name, "vdev") == 0) {
+ if ((strcmp(iter->bus->name, "vdev") == 0) ||
+ (strcmp(iter->bus->name, "fslmc") == 0) ||
+ (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
bus_param_key = "name";
} else if (strcmp(iter->bus->name, "pci") == 0) {
bus_param_key = "addr";
uint16_t
rte_eth_iterator_next(struct rte_dev_iterator *iter)
{
+ if (iter == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get next device from NULL iterator\n");
+ return RTE_MAX_ETHPORTS;
+ }
+
if (iter->cls == NULL) /* invalid ethdev iterator */
return RTE_MAX_ETHPORTS;
void
rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
{
+ if (iter == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
+ return;
+ }
+
if (iter->bus_str == NULL)
return; /* nothing to free in pure class filter */
free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
eth_dev = eth_dev_get(port_id);
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
eth_dev->data->port_id = port_id;
+ eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
eth_dev->data->mtu = RTE_ETHER_MTU;
pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
/*
* Attach to a port already registered by the primary process, which
- * makes sure that the same device would have the same port id both
+ * makes sure that the same device would have the same port ID both
* in the primary and secondary process.
*/
struct rte_eth_dev *
rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_DESTROY, NULL);
+ eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
+
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
eth_dev->state = RTE_ETH_DEV_UNUSED;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->rx_queue_count = NULL;
- eth_dev->rx_descriptor_done = NULL;
eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_descriptor_status = NULL;
eth_dev->dev_ops = NULL;
int
rte_eth_dev_owner_new(uint64_t *owner_id)
{
+ if (owner_id == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
+ return -EINVAL;
+ }
+
eth_dev_shared_data_prepare();
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
struct rte_eth_dev_owner *port_owner;
if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
- RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
port_id);
return -ENODEV;
}
+ if (new_owner == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u owner from NULL owner\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (!eth_is_valid_owner_id(new_owner->id) &&
!eth_is_valid_owner_id(old_owner_id)) {
RTE_ETHDEV_LOG(ERR,
owner_id);
} else {
RTE_ETHDEV_LOG(ERR,
- "Invalid owner id=%016"PRIx64"\n",
+ "Invalid owner ID=%016"PRIx64"\n",
owner_id);
ret = -EINVAL;
}
int
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
- int ret = 0;
- struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *ethdev;
- eth_dev_shared_data_prepare();
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ethdev = &rte_eth_devices[port_id];
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ if (!eth_dev_is_allocated(ethdev)) {
+ RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
+ port_id);
+ return -ENODEV;
+ }
- if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
- RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ if (owner == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
port_id);
- ret = -ENODEV;
- } else {
- rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
+ return -EINVAL;
}
+ eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
- return ret;
+
+ return 0;
}
int
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (name == NULL) {
- RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
+ port_id);
return -EINVAL;
}
uint16_t pid;
if (name == NULL) {
- RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
+ RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
+ return -EINVAL;
+ }
+
+ if (port_id == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get port ID to NULL for %s\n", name);
return -EINVAL;
}
return ret;
}
+static void
+eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **rxq = dev->data->rx_queues;
+
+ if (rxq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->rx_queue_release != NULL)
+ (*dev->dev_ops->rx_queue_release)(dev, qid);
+ rxq[qid] = NULL;
+}
+
+static void
+eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **txq = dev->data->tx_queues;
+
+ if (txq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->tx_queue_release != NULL)
+ (*dev->dev_ops->tx_queue_release)(dev, qid);
+ txq[qid] = NULL;
+}
+
static int
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
uint16_t old_nb_queues = dev->data->nb_rx_queues;
- void **rxq;
unsigned i;
if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(dev->data->rx_queues[0]) * nb_queues,
+ sizeof(dev->data->rx_queues[0]) *
+ RTE_MAX_QUEUES_PER_PORT,
RTE_CACHE_LINE_SIZE);
if (dev->data->rx_queues == NULL) {
dev->data->nb_rx_queues = 0;
return -(ENOMEM);
}
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- rxq = dev->data->rx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
- rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -(ENOMEM);
- if (nb_queues > old_nb_queues) {
- uint16_t new_qs = nb_queues - old_nb_queues;
-
- memset(rxq + old_nb_queues, 0,
- sizeof(rxq[0]) * new_qs);
- }
-
- dev->data->rx_queues = rxq;
+ eth_dev_rxq_release(dev, i);
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- rxq = dev->data->rx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ eth_dev_rxq_release(dev, i);
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (!dev->data->dev_started) {
RTE_ETHDEV_LOG(ERR,
"Port %u must be started before start any queue\n",
return 0;
}
- return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
- rx_queue_id));
-
+ return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
}
int
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
}
return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
-
}
int
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (!dev->data->dev_started) {
RTE_ETHDEV_LOG(ERR,
"Port %u must be started before start any queue\n",
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
}
return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
-
}
static int
eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
uint16_t old_nb_queues = dev->data->nb_tx_queues;
- void **txq;
unsigned i;
if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(dev->data->tx_queues[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
+ sizeof(dev->data->tx_queues[0]) *
+ RTE_MAX_QUEUES_PER_PORT,
+ RTE_CACHE_LINE_SIZE);
if (dev->data->tx_queues == NULL) {
dev->data->nb_tx_queues = 0;
return -(ENOMEM);
}
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- txq = dev->data->tx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
- txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
- RTE_CACHE_LINE_SIZE);
- if (txq == NULL)
- return -ENOMEM;
- if (nb_queues > old_nb_queues) {
- uint16_t new_qs = nb_queues - old_nb_queues;
-
- memset(txq + old_nb_queues, 0,
- sizeof(txq[0]) * new_qs);
- }
-
- dev->data->tx_queues = txq;
+ eth_dev_txq_release(dev, i);
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- txq = dev->data->tx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
+ eth_dev_txq_release(dev, i);
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
return name;
}
+const char *
+rte_eth_dev_capability_name(uint64_t capability)
+{
+ const char *name = "UNKNOWN";
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
+ if (capability == rte_eth_dev_capa_names[i].offload) {
+ name = rte_eth_dev_capa_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
static inline int
eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
uint32_t max_rx_pkt_len, uint32_t dev_info_size)
/*
* Validate offloads that are requested through rte_eth_dev_configure against
- * the offloads successfully set by the ethernet device.
+ * the offloads successfully set by the Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param req_offloads
* The offloads that have been requested through `rte_eth_dev_configure`.
* @param set_offloads
- * The offloads successfully set by the ethernet device.
+ * The offloads successfully set by the Ethernet device.
* @param offload_type
* The offload type i.e. Rx/Tx string.
* @param offload_name
while (offloads_diff != 0) {
/* Check if any offload is requested but not enabled. */
- offload = 1ULL << __builtin_ctzll(offloads_diff);
+ offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
if (offload & req_offloads) {
RTE_ETHDEV_LOG(ERR,
"Port %u failed to enable %s offload %s\n",
return ret;
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+/* rte_eth_dev_info_get() should be called prior to this function */
+static int
+eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
+ uint16_t mtu)
+{
+ uint32_t overhead_len;
+ uint32_t frame_size;
+
+ if (mtu < dev_info->min_mtu) {
+ RTE_ETHDEV_LOG(ERR,
+ "MTU (%u) < device min MTU (%u) for port_id %u\n",
+ mtu, dev_info->min_mtu, port_id);
+ return -EINVAL;
+ }
+ if (mtu > dev_info->max_mtu) {
+ RTE_ETHDEV_LOG(ERR,
+ "MTU (%u) > device max MTU (%u) for port_id %u\n",
+ mtu, dev_info->max_mtu, port_id);
+ return -EINVAL;
+ }
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ frame_size = mtu + overhead_len;
+ if (frame_size < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR,
+ "Frame size (%u) < min frame size (%u) for port_id %u\n",
+ frame_size, RTE_ETHER_MIN_LEN, port_id);
+ return -EINVAL;
+ }
+
+ if (frame_size > dev_info->max_rx_pktlen) {
+ RTE_ETHDEV_LOG(ERR,
+ "Frame size (%u) > device max frame size (%u) for port_id %u\n",
+ frame_size, dev_info->max_rx_pktlen, port_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf orig_conf;
- uint16_t overhead_len;
int diag;
int ret;
uint16_t old_mtu;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+ if (dev_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot configure ethdev port %u from NULL config\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (dev->data->dev_started) {
return -EBUSY;
}
+ /*
+ * Ensure that "dev_configured" is always 0 each time prepare to do
+ * dev_configure() to avoid any non-anticipated behaviour.
+ * And set to 1 when dev_configure() is executed successfully.
+ */
+ dev->data->dev_configured = 0;
+
/* Store original config, as rollback required on failure */
memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
if (ret != 0)
goto rollback;
- /* Get the real Ethernet overhead length */
- if (dev_info.max_mtu != UINT16_MAX &&
- dev_info.max_rx_pktlen > dev_info.max_mtu)
- overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
- else
- overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-
/* If number of queues specified by application for both Rx and Tx is
* zero, use driver preferred values. This cannot be done individually
* as it is valid for either Tx or Rx (but not both) to be zero.
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
- "Number of RX queues requested (%u) is greater than max supported(%d)\n",
+ "Number of Rx queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
ret = -EINVAL;
goto rollback;
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
- "Number of TX queues requested (%u) is greater than max supported(%d)\n",
+ "Number of Tx queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
ret = -EINVAL;
goto rollback;
}
/*
- * Check that the numbers of RX and TX queues are not greater
- * than the maximum number of RX and TX queues supported by the
+ * Check that the numbers of Rx and Tx queues are not greater
+ * than the maximum number of Rx and Tx queues supported by the
* configured device.
*/
if (nb_rx_q > dev_info.max_rx_queues) {
goto rollback;
}
- /*
- * If jumbo frames are enabled, check that the maximum RX packet
- * length is supported by the configured device.
- */
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
- port_id, dev_conf->rxmode.max_rx_pkt_len,
- dev_info.max_rx_pktlen);
- ret = -EINVAL;
- goto rollback;
- } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
- port_id, dev_conf->rxmode.max_rx_pkt_len,
- (unsigned int)RTE_ETHER_MIN_LEN);
- ret = -EINVAL;
- goto rollback;
- }
+ if (dev_conf->rxmode.mtu == 0)
+ dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
- /* Scale the MTU size to adapt max_rx_pkt_len */
- dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- overhead_len;
- } else {
- uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
- if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
- pktlen > RTE_ETHER_MTU + overhead_len)
- /* Use default value */
- dev->data->dev_conf.rxmode.max_rx_pkt_len =
- RTE_ETHER_MTU + overhead_len;
- }
+ ret = eth_dev_validate_mtu(port_id, &dev_info,
+ dev->data->dev_conf.rxmode.mtu);
+ if (ret != 0)
+ goto rollback;
+
+ dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
/*
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ uint32_t max_rx_pktlen;
+ uint32_t overhead_len;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
if (dev_conf->rxmode.max_lro_pkt_size == 0)
- dev->data->dev_conf.rxmode.max_lro_pkt_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
ret = eth_dev_check_lro_pkt_size(port_id,
dev->data->dev_conf.rxmode.max_lro_pkt_size,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ max_rx_pktlen,
dev_info.max_lro_pkt_size);
if (ret != 0)
goto rollback;
}
/*
- * Setup new number of RX/TX queues and reconfigure device.
+ * Setup new number of Rx/Tx queues and reconfigure device.
*/
diag = eth_dev_rx_queue_config(dev, nb_rx_q);
if (diag != 0) {
goto reset_queues;
}
+ dev->data->dev_configured = 1;
rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
return 0;
reset_queues:
pool_mask = dev->data->mac_pool_sel[i];
do {
- if (pool_mask & 1ULL)
+ if (pool_mask & UINT64_C(1))
(*dev->dev_ops->mac_addr_add)(dev,
addr, i, pool);
pool_mask >>= 1;
int ret, ret_stop;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+ if (dev->data->dev_configured == 0) {
+ RTE_ETHDEV_LOG(INFO,
+ "Device with port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (dev->data->dev_started != 0) {
RTE_ETHDEV_LOG(INFO,
"Device with port_id=%"PRIu16" already started\n",
(*dev->dev_ops->link_update)(dev, 0);
}
+ /* expose selection of PMD fast-path functions */
+ eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
+
rte_ethdev_trace_start(port_id);
return 0;
}
return 0;
}
+ /* point fast-path functions to dummy ones */
+ eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
+
dev->data->dev_started = 0;
ret = (*dev->dev_ops->dev_stop)(dev);
rte_ethdev_trace_stop(port_id, ret);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
*lasterr = (*dev->dev_ops->dev_close)(dev);
if (*lasterr != 0)
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
-
dev = &rte_eth_devices[port_id];
if (dev->state == RTE_ETH_DEV_REMOVED)
* for each segment specified in extended configuration.
*/
mp_first = rx_seg[0].mp;
- offset_mask = (1u << seg_capa->offset_align_log2) - 1;
+ offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
struct rte_mempool *mpl = rx_seg[seg_idx].mp;
uint32_t length = rx_seg[seg_idx].length;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf local_conf;
- void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
return -EINVAL;
}
+ if (local_conf.share_group > 0 &&
+ (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
+ port_id, rx_queue_id, local_conf.share_group);
+ return -EINVAL;
+ }
+
/*
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
+ /* Get the real Ethernet overhead length */
if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ uint32_t overhead_len;
+ uint32_t max_rx_pktlen;
+ int ret;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ max_rx_pktlen = dev->data->mtu + overhead_len;
if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
- dev->data->dev_conf.rxmode.max_lro_pkt_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
- int ret = eth_dev_check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
+ ret = eth_dev_check_lro_pkt_size(port_id,
dev->data->dev_conf.rxmode.max_lro_pkt_size,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ max_rx_pktlen,
dev_info.max_lro_pkt_size);
if (ret != 0)
return ret;
int ret;
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **rxq;
int i;
int count;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ if (conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
+ port_id);
return -EINVAL;
}
+
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
if (ret != 0)
return ret;
}
if (dev->data->dev_started)
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
nb_rx_desc, conf);
if (ret == 0)
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf local_conf;
- void **txq;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
{
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **txq;
int i;
int count;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ if (conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
+ port_id);
return -EINVAL;
}
+
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
if (ret != 0)
return ret;
}
if (dev->data->dev_started)
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
ret = (*dev->dev_ops->tx_hairpin_queue_setup)
(dev, tx_queue_id, nb_tx_desc, conf);
if (ret == 0)
RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
dev = &rte_eth_devices[tx_port];
+
if (dev->data->dev_started == 0) {
RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
return -EBUSY;
RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
dev = &rte_eth_devices[tx_port];
+
if (dev->data->dev_started == 0) {
RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
return -EBUSY;
struct rte_eth_dev *dev;
int ret;
- if (peer_ports == NULL || len == 0)
- return -EINVAL;
-
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (peer_ports == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u hairpin peer ports to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
-ENOTSUP);
rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
buffer_tx_error_fn cbfn, void *userdata)
{
+ if (buffer == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set Tx buffer error callback to NULL buffer\n");
+ return -EINVAL;
+ }
+
buffer->error_callback = cbfn;
buffer->error_userdata = userdata;
return 0;
{
int ret = 0;
- if (buffer == NULL)
+ if (buffer == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
return -EINVAL;
+ }
buffer->size = size;
if (buffer->error_callback == NULL) {
int
rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev;
int ret;
- /* Validate Input Data. Bail if not valid or not supported. */
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
/* Call driver to free pending mbufs. */
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
return dev->data->promiscuous;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
return dev->data->all_multicast;
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc &&
- dev->data->dev_started)
+ if (eth_link == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc &&
- dev->data->dev_started)
+ if (eth_link == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
int
rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
{
+ if (str == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot convert link to string with zero size\n");
+ return -EINVAL;
+ }
+
+ if (eth_link == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
+ return -EINVAL;
+ }
+
if (eth_link->link_status == ETH_LINK_DOWN)
return snprintf(str, len, "Link down");
else
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
+ if (stats == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
memset(stats, 0, sizeof(*stats));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->dev_ops->xstats_get_names_by_id != NULL) {
- count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
- NULL, 0);
- if (count < 0)
- return eth_err(port_id, count);
- }
if (dev->dev_ops->xstats_get_names != NULL) {
count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
if (count < 0)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (!id) {
- RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
+ if (xstat_name == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
+ port_id);
return -ENOMEM;
}
- if (!xstat_name) {
- RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
+ if (id == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u xstats ID to NULL\n",
+ port_id);
return -ENOMEM;
}
if (no_basic_stat_requested)
return (*dev->dev_ops->xstats_get_names_by_id)(dev,
- xstats_names, ids_copy, size);
+ ids_copy, xstats_names, size);
}
/* Retrieve all stats */
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
ret = eth_dev_get_xstats_count(port_id);
if (ret < 0)
return ret;
expected_entries = (uint16_t)ret;
struct rte_eth_xstat xstats[expected_entries];
- dev = &rte_eth_devices[port_id];
basic_count = eth_dev_get_xstats_basic_count(dev);
/* Return max number of stats if no ids given */
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
-
if (is_rx && (queue_id >= dev->data->nb_rx_queues))
return -EINVAL;
if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
return -EINVAL;
- return (*dev->dev_ops->queue_stats_mapping_set)
- (dev, queue_id, stat_idx, is_rx);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+ return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
}
-
int
rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
uint8_t stat_idx)
stat_idx, STAT_QMAP_TX));
}
-
int
rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
uint8_t stat_idx)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (fw_version == NULL && fw_size > 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
fw_version, fw_size));
};
int diag;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev_info == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
/*
* Init dev_info before port_id check since caller does not have
* return status and does not know if get is successful or not.
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- dev = &rte_eth_devices[port_id];
-
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
dev_info->device = dev->device;
- dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
return 0;
}
+int
+rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u configuration to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
+
+ return 0;
+}
+
int
rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (ptypes == NULL && num > 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (num > 0 && set_ptypes == NULL)
+ if (num > 0 && set_ptypes == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
+ port_id);
return -EINVAL;
+ }
if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
*dev->dev_ops->dev_ptypes_set == NULL) {
return ret;
}
+int
+rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
+ unsigned int num)
+{
+ int32_t ret;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ if (ma == NULL) {
+ RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /* will check for us that port_id is a valid one */
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+ num = RTE_MIN(dev_info.max_mac_addrs, num);
+ memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
+
+ return num;
+}
+
int
rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (mac_addr == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u MAC address to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
return 0;
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
+ if (mtu == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
*mtu = dev->data->mtu;
return 0;
}
if (ret != 0)
return ret;
- if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
- return -EINVAL;
+ ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (dev->data->dev_configured == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be configured before MTU set\n",
+ port_id);
+ return -EINVAL;
}
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
- if (!ret)
+ if (ret == 0)
dev->data->mtu = mtu;
return eth_err(port_id, ret);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
if (!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER)) {
- RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
+ RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
port_id);
return -ENOSYS;
}
vbit = vlan_id % 64;
if (on)
- vfc->ids[vidx] |= UINT64_C(1) << vbit;
+ vfc->ids[vidx] |= RTE_BIT64(vbit);
else
- vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
+ vfc->ids[vidx] &= ~RTE_BIT64(vbit);
}
return eth_err(port_id, ret);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
return -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
tpid));
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (fc_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u flow control config to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
memset(fc_conf, 0, sizeof(*fc_conf));
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (fc_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u flow control from NULL config\n",
+ port_id);
+ return -EINVAL;
+ }
+
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (pfc_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u priority flow control from NULL config\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
/* High water, low water validation are device specific */
if (*dev->dev_ops->priority_flow_ctrl_set)
return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
{
uint16_t i, num;
- if (!reta_conf)
- return -EINVAL;
-
num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
if (reta_conf[i].mask)
{
uint16_t i, idx, shift;
- if (!reta_conf)
- return -EINVAL;
-
if (max_rxq == 0) {
RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
return -EINVAL;
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
- if ((reta_conf[idx].mask & (1ULL << shift)) &&
+ if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
(reta_conf[idx].reta[shift] >= max_rxq)) {
RTE_ETHDEV_LOG(ERR,
"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (reta_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot update ethdev port %u RSS RETA to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (reta_size == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot update ethdev port %u RSS RETA with zero size\n",
+ port_id);
+ return -EINVAL;
+ }
+
/* Check mask bits */
ret = eth_check_reta_mask(reta_conf, reta_size);
if (ret < 0)
return ret;
- dev = &rte_eth_devices[port_id];
-
/* Check entry value */
ret = eth_check_reta_entry(reta_conf, reta_size,
dev->data->nb_rx_queues);
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (reta_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot query ethdev port %u RSS RETA from NULL config\n",
+ port_id);
+ return -EINVAL;
+ }
/* Check mask bits */
ret = eth_check_reta_mask(reta_conf, reta_size);
if (ret < 0)
return ret;
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
reta_size));
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (rss_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot update ethdev port %u RSS hash from NULL config\n",
+ port_id);
+ return -EINVAL;
+ }
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
return ret;
rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
-
- dev = &rte_eth_devices[port_id];
if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
dev_info.flow_type_rss_offloads) {
RTE_ETHDEV_LOG(ERR,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (rss_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u RSS hash config to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
rss_conf));
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
if (udp_tunnel == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
+ port_id);
return -EINVAL;
}
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
udp_tunnel));
dev = &rte_eth_devices[port_id];
if (udp_tunnel == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
+ port_id);
return -EINVAL;
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
}
struct rte_eth_dev *dev;
int ret;
- if (speed_fec_capa == NULL && num > 0)
- return -EINVAL;
-
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (speed_fec_capa == NULL && num > 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
{
struct rte_eth_dev *dev;
- if (fec_capa == NULL)
- return -EINVAL;
-
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (fec_capa == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u current FEC mode to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (addr == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot add ethdev port %u MAC address from NULL address\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
if (rte_is_zero_ether_addr(addr)) {
return -EINVAL;
}
if (pool >= ETH_64_POOLS) {
- RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+ RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", ETH_64_POOLS - 1);
return -EINVAL;
}
pool_mask = dev->data->mac_pool_sel[index];
/* Check if both MAC address and pool is already there, and do nothing */
- if (pool_mask & (1ULL << pool))
+ if (pool_mask & RTE_BIT64(pool))
return 0;
}
rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
/* Update pool bitmap in NIC data structure */
- dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
}
return eth_err(port_id, ret);
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+
+ if (addr == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot remove ethdev port %u MAC address from NULL address\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
index = eth_dev_get_mac_addr_index(port_id, addr);
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (addr == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u default MAC address from NULL address\n",
+ port_id);
+ return -EINVAL;
+ }
if (!rte_is_valid_assigned_ether_addr(addr))
return -EINVAL;
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
+ if (addr == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u unicast hash table from NULL address\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (rte_is_zero_ether_addr(addr)) {
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
port_id);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
return ret;
- dev = &rte_eth_devices[port_id];
link = dev->data->dev_link;
if (queue_idx > dev_info.max_tx_queues) {
RTE_ETHDEV_LOG(ERR,
- "Set queue rate limit:port %u: invalid queue id=%u\n",
+ "Set queue rate limit:port %u: invalid queue ID=%u\n",
port_id, queue_idx);
return -EINVAL;
}
queue_idx, tx_rate));
}
-int
-rte_eth_mirror_rule_set(uint16_t port_id,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
+RTE_INIT(eth_dev_init_fp_ops)
{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (mirror_conf->rule_type == 0) {
- RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
- return -EINVAL;
- }
-
- if (mirror_conf->dst_pool >= ETH_64_POOLS) {
- RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
- ETH_64_POOLS - 1);
- return -EINVAL;
- }
-
- if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
- ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
- (mirror_conf->pool_mask == 0)) {
- RTE_ETHDEV_LOG(ERR,
- "Invalid mirror pool, pool mask can not be 0\n");
- return -EINVAL;
- }
-
- if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
- mirror_conf->vlan.vlan_mask == 0) {
- RTE_ETHDEV_LOG(ERR,
- "Invalid vlan mask, vlan mask can not be 0\n");
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
-
- return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
- mirror_conf, rule_id, on));
-}
-
-int
-rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ uint32_t i;
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
-
- return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
- rule_id));
+ for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
+ eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
}
RTE_INIT(eth_dev_init_cb_lists)
uint16_t next_port;
uint16_t last_port;
- if (!cb_fn)
+ if (cb_fn == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot register ethdev port %u callback from NULL\n",
+ port_id);
return -EINVAL;
+ }
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
uint16_t next_port;
uint16_t last_port;
- if (!cb_fn)
+ if (cb_fn == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot unregister ethdev port %u callback from NULL\n",
+ port_id);
return -EINVAL;
+ }
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
if (dev == NULL)
return;
+ /*
+ * for secondary process, at that point we expect device
+ * to be already 'usable', so shared data and all function pointers
+ * for fast-path devops have to be setup properly inside rte_eth_dev.
+ */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
+
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
dev->state = RTE_ETH_DEV_ATTACHED;
int rc;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -EPERM;
}
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
RTE_ETHDEV_LOG(ERR,
- "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
port_id, qid, op, epfd, vec);
}
}
int fd;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
-
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -1;
}
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -1;
}
intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -1;
}
int rc;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
}
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -EPERM;
}
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
RTE_ETHDEV_LOG(ERR,
- "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
port_id, queue_id, op, epfd, vec);
return rc;
}
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
ret = eth_dev_validate_rx_queue(dev, queue_id);
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
- queue_id));
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
}
int
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
ret = eth_dev_validate_rx_queue(dev, queue_id);
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
- queue_id));
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
- if (qinfo == NULL)
+ if (queue_id >= dev->data->nb_rx_queues) {
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
- if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ if (qinfo == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
+ port_id, queue_id);
return -EINVAL;
}
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+ qinfo->queue_state = dev->data->rx_queue_state[queue_id];
+
return 0;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
- if (qinfo == NULL)
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
- if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ if (qinfo == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
+ port_id, queue_id);
return -EINVAL;
}
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+ qinfo->queue_state = dev->data->tx_queue_state[queue_id];
return 0;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- if (mode == NULL)
- return -EINVAL;
-
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (mode == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
+ port_id, queue_id);
return -EINVAL;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- if (mode == NULL)
- return -EINVAL;
-
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (mode == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
+ port_id, queue_id);
return -EINVAL;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
-
if (queue_id >= dev->data->nb_rx_queues) {
RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
}
if (pmc == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid power monitor condition=%p\n",
- pmc);
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
+ port_id, queue_id);
return -EINVAL;
}
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
return eth_err(port_id,
- dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id],
- pmc));
+ dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
}
int
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
mc_addr_set, nb_mc_addr));
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (timestamp == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot read ethdev port %u Rx timestamp to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
(dev, timestamp, flags));
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (timestamp == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot read ethdev port %u Tx timestamp to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
(dev, timestamp));
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
- delta));
+ return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
}
int
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (timestamp == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot read ethdev port %u timesync time to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
timestamp));
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (timestamp == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot write ethdev port %u timesync from NULL time\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
timestamp));
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (clock == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (info == NULL)
+ dev = &rte_eth_devices[port_id];
+
+ if (info == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u register info to NULL\n",
+ port_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (info == NULL)
+ dev = &rte_eth_devices[port_id];
+
+ if (info == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u EEPROM info to NULL\n",
+ port_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (info == NULL)
+ dev = &rte_eth_devices[port_id];
+
+ if (info == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set ethdev port %u EEPROM from NULL info\n",
+ port_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (modinfo == NULL)
+ dev = &rte_eth_devices[port_id];
+
+ if (modinfo == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u EEPROM module info to NULL\n",
+ port_id);
return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
return (*dev->dev_ops->get_module_info)(dev, modinfo);
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (info == NULL || info->data == NULL || info->length == 0)
+ dev = &rte_eth_devices[port_id];
+
+ if (info == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u module EEPROM info to NULL\n",
+ port_id);
return -EINVAL;
+ }
+
+ if (info->data == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u module EEPROM data to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (info->length == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u module EEPROM to data with zero size\n",
+ port_id);
+ return -EINVAL;
+ }
- dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
return (*dev->dev_ops->get_module_eeprom)(dev, info);
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
+ if (dcb_info == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u DCB info to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
dev = &rte_eth_devices[port_id];
+
+ if (cap == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot get ethdev port %u hairpin capability to NULL\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
memset(cap, 0, sizeof(*cap));
return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
int
rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
{
- if (dev->data->rx_queue_state[queue_id] ==
- RTE_ETH_QUEUE_STATE_HAIRPIN)
+ if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
return 1;
return 0;
}
int
rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
{
- if (dev->data->tx_queue_state[queue_id] ==
- RTE_ETH_QUEUE_STATE_HAIRPIN)
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
return 1;
return 0;
}
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
- if (pool == NULL)
+ if (pool == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot test ethdev port %u mempool operation from NULL pool\n",
+ port_id);
return -EINVAL;
-
- dev = &rte_eth_devices[port_id];
+ }
if (*dev->dev_ops->pool_ops_supported == NULL)
return 1; /* all pools are supported */
}
int
-rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
+rte_eth_representor_id_get(uint16_t port_id,
enum rte_eth_representor_type type,
int controller, int pf, int representor_port,
uint16_t *repr_id)
{
- int ret, n, i, count;
+ int ret, n, count;
+ uint32_t i;
struct rte_eth_representor_info *info = NULL;
size_t size;
return -EINVAL;
/* Get PMD representor range info. */
- ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
+ ret = rte_eth_representor_info_get(port_id, NULL);
if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
controller == -1 && pf == -1) {
/* Direct mapping for legacy VF representor. */
info = calloc(1, size);
if (info == NULL)
return -ENOMEM;
- ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
+ info->nb_ranges_alloc = n;
+ ret = rte_eth_representor_info_get(port_id, info);
if (ret < 0)
goto out;
/* Locate representor ID. */
ret = -ENOENT;
- for (i = 0; i < n; ++i) {
+ for (i = 0; i < info->nb_ranges; ++i) {
if (info->ranges[i].type != type)
continue;
if (info->ranges[i].controller != controller)
continue;
if (info->ranges[i].id_end < info->ranges[i].id_base) {
RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
- ethdev->data->port_id, info->ranges[i].id_base,
+ port_id, info->ranges[i].id_base,
info->ranges[i].id_end, i);
continue;
return 0;
}
+static int
+eth_dev_handle_port_info(const char *cmd __rte_unused,
+ const char *params,
+ struct rte_tel_data *d)
+{
+ struct rte_tel_data *rxq_state, *txq_state;
+ char mac_addr[RTE_ETHER_ADDR_LEN];
+ struct rte_eth_dev *eth_dev;
+ char *end_param;
+ int port_id, i;
+
+ if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+ return -1;
+
+ port_id = strtoul(params, &end_param, 0);
+ if (*end_param != '\0')
+ RTE_ETHDEV_LOG(NOTICE,
+ "Extra parameters passed to ethdev telemetry command, ignoring");
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (!eth_dev)
+ return -EINVAL;
+
+ rxq_state = rte_tel_data_alloc();
+ if (!rxq_state)
+ return -ENOMEM;
+
+ txq_state = rte_tel_data_alloc();
+ if (!txq_state)
+ return -ENOMEM;
+
+ rte_tel_data_start_dict(d);
+ rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
+ rte_tel_data_add_dict_int(d, "state", eth_dev->state);
+ rte_tel_data_add_dict_int(d, "nb_rx_queues",
+ eth_dev->data->nb_rx_queues);
+ rte_tel_data_add_dict_int(d, "nb_tx_queues",
+ eth_dev->data->nb_tx_queues);
+ rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
+ rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
+ rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
+ eth_dev->data->min_rx_buf_size);
+ rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
+ eth_dev->data->rx_mbuf_alloc_failed);
+ snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->mac_addrs->addr_bytes[0],
+ eth_dev->data->mac_addrs->addr_bytes[1],
+ eth_dev->data->mac_addrs->addr_bytes[2],
+ eth_dev->data->mac_addrs->addr_bytes[3],
+ eth_dev->data->mac_addrs->addr_bytes[4],
+ eth_dev->data->mac_addrs->addr_bytes[5]);
+ rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
+ rte_tel_data_add_dict_int(d, "promiscuous",
+ eth_dev->data->promiscuous);
+ rte_tel_data_add_dict_int(d, "scattered_rx",
+ eth_dev->data->scattered_rx);
+ rte_tel_data_add_dict_int(d, "all_multicast",
+ eth_dev->data->all_multicast);
+ rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
+ rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
+ rte_tel_data_add_dict_int(d, "dev_configured",
+ eth_dev->data->dev_configured);
+
+ rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ rte_tel_data_add_array_int(rxq_state,
+ eth_dev->data->rx_queue_state[i]);
+
+ rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ rte_tel_data_add_array_int(txq_state,
+ eth_dev->data->tx_queue_state[i]);
+
+ rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
+ rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
+ rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
+ rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
+ rte_tel_data_add_dict_int(d, "rx_offloads",
+ eth_dev->data->dev_conf.rxmode.offloads);
+ rte_tel_data_add_dict_int(d, "tx_offloads",
+ eth_dev->data->dev_conf.txmode.offloads);
+ rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
+ eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+
+ return 0;
+}
+
int
rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
struct rte_hairpin_peer_info *cur_info,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev,
- info));
+ return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
+}
+
+int
+rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_configured != 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "The port (ID=%"PRIu16") is already configured\n",
+ port_id);
+ return -EBUSY;
+ }
+
+ if (features == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
+ return eth_err(port_id,
+ (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
}
-RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
+RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
RTE_INIT(ethdev_init_telemetry)
{
rte_telemetry_register_cmd("/ethdev/link_status",
eth_dev_handle_port_link_status,
"Returns the link status for a port. Parameters: int port_id");
+ rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
+ "Returns the device info for a port. Parameters: int port_id");
}