static unsigned default_packet_size = 64;
static unsigned default_packet_copy;
-static const char const *valid_arguments[] = {
+static const char *valid_arguments[] = {
ETH_NULL_PACKET_SIZE_ARG,
ETH_NULL_PACKET_COPY_ARG,
NULL
{
int i;
struct null_queue *h = q;
- unsigned packet_size = h->internals->packet_size;
+ unsigned packet_size;
if ((q == NULL) || (bufs == NULL))
return 0;
+ packet_size = h->internals->packet_size;
for (i = 0; i < nb_bufs; i++) {
rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
packet_size);
static void
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
{
- unsigned i;
+ unsigned i, num_stats;
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal;
internal = dev->data->dev_private;
memset(igb_stats, 0, sizeof(*igb_stats));
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
- i < internal->nb_rx_queues; i++) {
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ internal->nb_rx_queues);
+ for (i = 0; i < num_stats; i++) {
igb_stats->q_ipackets[i] =
internal->rx_null_queues[i].rx_pkts.cnt;
rx_total += igb_stats->q_ipackets[i];
}
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
- i < internal->nb_tx_queues; i++) {
+ num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ internal->nb_tx_queues);
+ for (i = 0; i < num_stats; i++) {
igb_stats->q_opackets[i] =
internal->tx_null_queues[i].tx_pkts.cnt;
igb_stats->q_errors[i] =
}
}
+static struct eth_driver rte_null_pmd = {
+ .pci_drv = {
+ .name = "rte_null_pmd",
+ .drv_flags = RTE_PCI_DRV_DETACHABLE,
+ },
+};
+
static void
eth_queue_release(void *q)
{
return;
nq = q;
- if (nq->dummy_packet)
- rte_free(nq->dummy_packet);
+ rte_free(nq->dummy_packet);
}
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused) { return 0; }
-static struct eth_dev_ops ops = {
- .dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
- .dev_configure = eth_dev_configure,
- .dev_infos_get = eth_dev_info,
- .rx_queue_setup = eth_rx_queue_setup,
- .tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
- .link_update = eth_link_update,
- .stats_get = eth_stats_get,
- .stats_reset = eth_stats_reset,
+static const struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
};
static int
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = ð_addr;
+ strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
eth_dev->data = data;
eth_dev->dev_ops = &ops;
eth_dev->pci_dev = pci_dev;
+ eth_dev->driver = &rte_null_pmd;
/* finally assign rx and tx ops */
if (packet_copy) {
return 0;
error:
- if (data)
- rte_free(data);
- if (pci_dev)
- rte_free(pci_dev);
- if (internals)
- rte_free(internals);
+ rte_free(data);
+ rte_free(pci_dev);
+ rte_free(internals);
+
return -1;
}
unsigned numa_node;
unsigned packet_size = default_packet_size;
unsigned packet_copy = default_packet_copy;
- struct rte_kvargs *kvlist;
+ struct rte_kvargs *kvlist = NULL;
int ret;
if (name == NULL)
ETH_NULL_PACKET_SIZE_ARG,
&get_packet_size_arg, &packet_size);
if (ret < 0)
- return -1;
+ goto free_kvlist;
}
if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
ETH_NULL_PACKET_COPY_ARG,
&get_packet_copy_arg, &packet_copy);
if (ret < 0)
- return -1;
+ goto free_kvlist;
}
}
"packet copy is %s\n", packet_size,
packet_copy ? "enabled" : "disabled");
- return eth_dev_null_create(name, numa_node, packet_size, packet_copy);
+ ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
+
+free_kvlist:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+rte_pmd_null_devuninit(const char *name)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
+ rte_socket_id());
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -1;
+
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+ rte_free(eth_dev->pci_dev);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
}
static struct rte_driver pmd_null_drv = {
.name = "eth_null",
.type = PMD_VDEV,
.init = rte_pmd_null_devinit,
+ .uninit = rte_pmd_null_devuninit,
};
PMD_REGISTER_DRIVER(pmd_null_drv);