* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
#include <string.h>
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_ring.h>
#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define EVENTDEV_NAME_SW_PMD event_sw
#define NUMA_NODE_ARG "numa_node"
} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
p->num_ordered_qids++;
p->num_qids_mapped++;
- } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
+ q->type == RTE_SCHED_TYPE_PARALLEL) {
p->num_qids_mapped++;
}
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_port *p = &sw->ports[port_id];
- char buf[QE_RING_NAMESIZE];
+ char buf[RTE_RING_NAMESIZE];
unsigned int i;
struct rte_event_dev_info info;
p->id = port_id;
p->sw = sw;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "rx_worker_ring");
- p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
- dev->data->socket_id);
+ /* check to see if rings exists - port_setup() can be called multiple
+ * times legally (assuming device is stopped). If ring exists, free it
+ * to so it gets re-created with the correct size
+ */
+ snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+ port_id, "rx_worker_ring");
+ struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->rx_worker_ring == NULL) {
SW_LOG_ERR("Error creating RX worker ring for port %d\n",
port_id);
p->inflight_max = conf->new_event_threshold;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "cq_worker_ring");
- p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
- dev->data->socket_id);
+ /* check if ring exists, same as rx_worker above */
+ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+ port_id, "cq_worker_ring");
+ existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->cq_worker_ring == NULL) {
- qe_ring_destroy(p->rx_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
port_id);
return -1;
if (p == NULL)
return;
- qe_ring_destroy(p->rx_worker_ring);
- qe_ring_destroy(p->cq_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
+ rte_event_ring_free(p->cq_worker_ring);
memset(p, 0, sizeof(*p));
}
return 0;
}
+struct rte_eth_dev;
+
+static int
+sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ return 0;
+}
+
static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
{
.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS),
};
*info = evdev_sw_info;
}
+static void
+sw_dump(struct rte_eventdev *dev, FILE *f)
+{
+ const struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ static const char * const q_type_strings[] = {
+ "Ordered", "Atomic", "Parallel", "Directed"
+ };
+ uint32_t i;
+ fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
+ sw->port_count, sw->qid_count);
+
+ fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
+ sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
+ fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
+ fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
+ fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
+ fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
+ uint32_t inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credits = sw->nb_events_limit - inflights;
+ fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
+
+#define COL_RED "\x1b[31m"
+#define COL_RESET "\x1b[0m"
+
+ for (i = 0; i < sw->port_count; i++) {
+ int max, j;
+ const struct sw_port *p = &sw->ports[i];
+ if (!p->initialized) {
+ fprintf(f, " %sPort %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ fprintf(f, " Port %d %s\n", i,
+ p->is_directed ? " (SingleCons)" : "");
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
+ "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
+ sw->ports[i].stats.rx_dropped,
+ sw->ports[i].stats.tx_pkts,
+ (p->inflights == p->inflight_max) ?
+ COL_RED : COL_RESET,
+ sw->ports[i].inflights, COL_RESET);
+
+ fprintf(f, "\tMax New: %u"
+ "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
+ sw->ports[i].inflight_max,
+ sw->ports[i].avg_pkt_ticks,
+ sw->ports[i].inflight_credits);
+ fprintf(f, "\tReceive burst distribution:\n");
+ float zp_percent = p->zero_polls * 100.0 / p->total_polls;
+ fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
+ zp_percent);
+ for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
+ if (p->poll_buckets[max] != 0)
+ break;
+ for (j = 0; j <= max; j++) {
+ if (p->poll_buckets[j] != 0) {
+ float poll_pc = p->poll_buckets[j] * 100.0 /
+ p->total_polls;
+ fprintf(f, "%u-%u:%.02f%% ",
+ ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
+ ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
+ poll_pc);
+ }
+ }
+ fprintf(f, "\n");
+
+ if (p->rx_worker_ring) {
+ uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->rx_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\trx ring not initialized.\n");
+
+ if (p->cq_worker_ring) {
+ uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->cq_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\tcq ring not initialized.\n");
+ }
+
+ for (i = 0; i < sw->qid_count; i++) {
+ const struct sw_qid *qid = &sw->qids[i];
+ if (!qid->initialized) {
+ fprintf(f, " %sQueue %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ int affinities_per_port[SW_PORTS_MAX] = {0};
+ uint32_t inflights = 0;
+
+ fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
+ qid->stats.rx_pkts, qid->stats.rx_dropped,
+ qid->stats.tx_pkts);
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ struct rte_ring *rob_buf_free =
+ qid->reorder_buffer_freelist;
+ if (rob_buf_free)
+ fprintf(f, "\tReorder entries in use: %u\n",
+ rte_ring_free_count(rob_buf_free));
+ else
+ fprintf(f,
+ "\tReorder buffer not initialized\n");
+ }
+
+ uint32_t flow;
+ for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
+ if (qid->fids[flow].cq != -1) {
+ affinities_per_port[qid->fids[flow].cq]++;
+ inflights += qid->fids[flow].pcount;
+ }
+
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
+
+ uint32_t iq;
+ uint32_t iq_printed = 0;
+ for (iq = 0; iq < SW_IQS_MAX; iq++) {
+ if (!qid->iq[iq]) {
+ fprintf(f, "\tiq %d is not initialized.\n", iq);
+ iq_printed = 1;
+ continue;
+ }
+ uint32_t used = iq_ring_count(qid->iq[iq]);
+ uint32_t free = iq_ring_free_count(qid->iq[iq]);
+ const char *col = (free == 0) ? COL_RED : COL_RESET;
+ if (used > 0) {
+ fprintf(f, "\t%siq %d: Used %d\tFree %d"
+ COL_RESET"\n", col, iq, used, free);
+ iq_printed = 1;
+ }
+ }
+ if (iq_printed == 0)
+ fprintf(f, "\t-- iqs empty --\n");
+ }
+}
+
static int
sw_start(struct rte_eventdev *dev)
{
unsigned int i, j;
struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ /* check a service core is mapped to this service */
+ if (!rte_service_runstate_get(sw->service_id))
+ SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
+ sw->service_name);
+
/* check all ports are set up */
for (i = 0; i < sw->port_count; i++)
if (sw->ports[i].rx_worker_ring == NULL) {
}
}
+ if (sw_xstats_init(sw) < 0)
+ return -EINVAL;
+
rte_smp_wmb();
sw->started = 1;
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();
}
return 0;
}
+
+static int32_t sw_sched_service_func(void *args)
+{
+ struct rte_eventdev *dev = args;
+ sw_event_schedule(dev);
+ return 0;
+}
+
static int
-sw_probe(const char *name, const char *params)
+sw_probe(struct rte_vdev_device *vdev)
{
static const struct rte_eventdev_ops evdev_sw_ops = {
.dev_configure = sw_dev_configure,
.dev_close = sw_close,
.dev_start = sw_start,
.dev_stop = sw_stop,
+ .dump = sw_dump,
.queue_def_conf = sw_queue_def_conf,
.queue_setup = sw_queue_setup,
.port_release = sw_port_release,
.port_link = sw_port_link,
.port_unlink = sw_port_unlink,
+
+ .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+
+ .xstats_get = sw_xstats_get,
+ .xstats_get_names = sw_xstats_get_names,
+ .xstats_get_by_name = sw_xstats_get_by_name,
+ .xstats_reset = sw_xstats_reset,
};
static const char *const args[] = {
CREDIT_QUANTA_ARG,
NULL
};
+ const char *name;
+ const char *params;
struct rte_eventdev *dev;
struct sw_evdev *sw;
int socket_id = rte_socket_id();
int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
if (params != NULL && params[0] != '\0') {
struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
dev->dev_ops = &evdev_sw_ops;
dev->enqueue = sw_event_enqueue;
dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->enqueue_new_burst = sw_event_enqueue_burst;
+ dev->enqueue_forward_burst = sw_event_enqueue_burst;
dev->dequeue = sw_event_dequeue;
dev->dequeue_burst = sw_event_dequeue_burst;
dev->schedule = sw_event_schedule;
sw->credit_update_quanta = credit_quanta;
sw->sched_quanta = sched_quanta;
+ /* register service with EAL */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_service", name);
+ snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
+ name);
+ service.socket_id = socket_id;
+ service.callback = sw_sched_service_func;
+ service.callback_userdata = (void *)dev;
+
+ int32_t ret = rte_service_component_register(&service, &sw->service_id);
+ if (ret) {
+ SW_LOG_ERR("service register() failed");
+ return -ENOEXEC;
+ }
+
return 0;
}
static int
-sw_remove(const char *name)
+sw_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;