The ring ethdev creation function creates an ethdev, but does not
actually set it up for use. Even if it's just a single ring, the user
still needs to create a mempool, call rte_eth_dev_configure, then call
rx and tx setup functions before the ethdev can be used.
This patch changes things so that the ethdev is fully set up after the
call to create the ethdev. The above-mentionned functions can still be
called - as will be the case, for instance, if the NIC is created via
commandline parameters - but they no longer are essential.
The function now also sets rte_errno appropriately on error, so the
caller can get a better indication of why a call may have failed.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_kvargs.h>
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
#define ETH_RING_ACTION_CREATE "CREATE"
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
#define ETH_RING_ACTION_CREATE "CREATE"
unsigned i;
/* do some parameter checking */
unsigned i;
/* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
+ if (rx_queues == NULL && nb_rx_queues > 0) {
+ rte_errno = EINVAL;
- if (tx_queues == NULL && nb_tx_queues > 0)
+ }
+ if (tx_queues == NULL && nb_tx_queues > 0) {
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
+ rte_errno = EINVAL;
RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
numa_node);
RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
numa_node);
* and internal (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
* and internal (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL) {
+ rte_errno = ENOMEM;
+ }
+
+ data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
+ 0, numa_node);
+ if (data->rx_queues == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
+ 0, numa_node);
+ if (data->tx_queues == NULL) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
+ if (pci_dev == NULL) {
+ rte_errno = ENOMEM;
internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+ if (internals == NULL) {
+ rte_errno = ENOMEM;
/* reserve an ethdev entry */
eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
/* reserve an ethdev entry */
eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ if (eth_dev == NULL) {
+ rte_errno = ENOSPC;
/* now put it all together
/* now put it all together
internals->nb_tx_queues = nb_tx_queues;
for (i = 0; i < nb_rx_queues; i++) {
internals->rx_ring_queues[i].rng = rx_queues[i];
internals->nb_tx_queues = nb_tx_queues;
for (i = 0; i < nb_rx_queues; i++) {
internals->rx_ring_queues[i].rng = rx_queues[i];
+ data->rx_queues[i] = &internals->rx_ring_queues[i];
}
for (i = 0; i < nb_tx_queues; i++) {
internals->tx_ring_queues[i].rng = tx_queues[i];
}
for (i = 0; i < nb_tx_queues; i++) {
internals->tx_ring_queues[i].rng = tx_queues[i];
+ data->tx_queues[i] = &internals->tx_ring_queues[i];
}
rte_ring_pmd.pci_drv.name = ring_ethdev_driver_name;
}
rte_ring_pmd.pci_drv.name = ring_ethdev_driver_name;
return data->port_id;
error:
return data->port_id;
error:
+ rte_free(data->rx_queues);
+ rte_free(data->tx_queues);
rte_free(data);
rte_free(pci_dev);
rte_free(internals);
rte_free(data);
rte_free(pci_dev);
rte_free(internals);