/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
-#include <rte_vdev.h>
+#include <rte_dev.h>
#include <rte_kvargs.h>
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
{
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
- const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
+ const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
{
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
- const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
+ const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
- memset(igb_stats, 0, sizeof(*igb_stats));
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_rx_queues; i++) {
igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
};
int
-rte_eth_from_rings(struct rte_ring *const rx_queues[],
+rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
const unsigned nb_rx_queues,
struct rte_ring *const tx_queues[],
const unsigned nb_tx_queues,
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
- data = rte_zmalloc_socket(NULL, sizeof(*data), 0, numa_node);
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
goto error;
- pci_dev = rte_zmalloc_socket(NULL, sizeof(*pci_dev), 0, numa_node);
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
if (pci_dev == NULL)
goto error;
- internals = rte_zmalloc_socket(NULL, sizeof(*internals), 0, numa_node);
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
if (internals == NULL)
goto error;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate();
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL)
goto error;
RTE_PMD_RING_MAX_TX_RINGS);
for (i = 0; i < num_rings; i++) {
- rte_snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
+ snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
rxtx[i] = (action == DEV_CREATE) ?
rte_ring_create(rng_name, 1024, numa_node,
RING_F_SP_ENQ|RING_F_SC_DEQ) :
return -1;
}
- if (rte_eth_from_rings(rxtx, num_rings, rxtx, num_rings, numa_node))
+ if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node))
return -1;
return 0;
struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
unsigned i;
- char rng_name[RTE_RING_NAMESIZE];
+ char rx_rng_name[RTE_RING_NAMESIZE];
+ char tx_rng_name[RTE_RING_NAMESIZE];
unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
RTE_PMD_RING_MAX_TX_RINGS);
for (i = 0; i < num_rings; i++) {
- rte_snprintf(rng_name, sizeof(rng_name), "ETH_RX%u_%s", i, name);
+ snprintf(rx_rng_name, sizeof(rx_rng_name), "ETH_RX%u_%s", i, name);
rx[i] = (action == DEV_CREATE) ?
- rte_ring_create(rng_name, 1024, numa_node,
+ rte_ring_create(rx_rng_name, 1024, numa_node,
RING_F_SP_ENQ|RING_F_SC_DEQ) :
- rte_ring_lookup(rng_name);
+ rte_ring_lookup(rx_rng_name);
if (rx[i] == NULL)
return -1;
- rte_snprintf(rng_name, sizeof(rng_name), "ETH_TX%u_%s", i, name);
+ snprintf(tx_rng_name, sizeof(tx_rng_name), "ETH_TX%u_%s", i, name);
tx[i] = (action == DEV_CREATE) ?
- rte_ring_create(rng_name, 1024, numa_node,
+ rte_ring_create(tx_rng_name, 1024, numa_node,
RING_F_SP_ENQ|RING_F_SC_DEQ):
- rte_ring_lookup(rng_name);
+ rte_ring_lookup(tx_rng_name);
if (tx[i] == NULL)
return -1;
}
- if (rte_eth_from_rings(rx, num_rings, tx, num_rings, numa_node) ||
- rte_eth_from_rings(tx, num_rings, rx, num_rings, numa_node) )
+ if (rte_eth_from_rings(rx_rng_name, rx, num_rings, tx, num_rings,
+ numa_node) || rte_eth_from_rings(tx_rng_name, tx, num_rings, rx,
+ num_rings, numa_node))
return -1;
return 0;
goto out;
}
- rte_snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
+ snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
info->count++;
return ret;
}
-int
+static int
rte_pmd_ring_devinit(const char *name, const char *params)
{
struct rte_kvargs *kvlist;
eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
return 0;
} else {
- eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
(sizeof(struct node_action_pair) * ret), 0);
goto out_free;
for (info->count = 0; info->count < info->total; info->count++) {
- eth_dev_ring_pair_create(name, info->list[info->count].node,
+ eth_dev_ring_create(name, info->list[info->count].node,
info->list[info->count].action);
}
}
return ret;
}
-static struct rte_vdev_driver pmd_ring_drv = {
+static struct rte_driver pmd_ring_drv = {
.name = "eth_ring",
+ .type = PMD_VDEV,
.init = rte_pmd_ring_devinit,
};
-PMD_REGISTER_DRIVER(pmd_ring_drv, PMD_VDEV);
+PMD_REGISTER_DRIVER(pmd_ring_drv);