From: Sunil Kumar Kori Date: Wed, 30 Oct 2019 16:26:45 +0000 (+0530) Subject: examples/l2fwd-event: setup event device X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=6ab8760019c16a44245c463d75d33c7ca1395a80;p=dpdk.git examples/l2fwd-event: setup event device Add event device device setup based on event eth Tx adapter capabilities. Signed-off-by: Sunil Kumar Kori Signed-off-by: Pavan Nikhilesh Acked-by: Nipun Gupta Acked-by: Jerin Jacob --- diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c index 7f90e63112..a5c1c2c405 100644 --- a/examples/l2fwd-event/l2fwd_event.c +++ b/examples/l2fwd-event/l2fwd_event.c @@ -57,4 +57,7 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc) /* Setup eventdev capability callbacks */ l2fwd_event_capability_setup(evt_rsrc); + + /* Event device configuration */ + evt_rsrc->ops.event_device_setup(rsrc); } diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h index b7aaa39f9a..6b5beb041d 100644 --- a/examples/l2fwd-event/l2fwd_event.h +++ b/examples/l2fwd-event/l2fwd_event.h @@ -13,11 +13,27 @@ #include "l2fwd_common.h" +typedef uint32_t (*event_device_setup_cb)(struct l2fwd_resources *rsrc); + +struct event_queues { + uint8_t nb_queues; +}; + +struct event_ports { + uint8_t nb_ports; +}; + struct event_setup_ops { + event_device_setup_cb event_device_setup; }; struct l2fwd_event_resources { uint8_t tx_mode_q; + uint8_t has_burst; + uint8_t event_d_id; + uint8_t disable_implicit_release; + struct event_ports evp; + struct event_queues evq; struct event_setup_ops ops; }; diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c index 9afade7d2f..33e5705853 100644 --- a/examples/l2fwd-event/l2fwd_event_generic.c +++ b/examples/l2fwd-event/l2fwd_event_generic.c @@ -16,8 +16,81 @@ #include "l2fwd_common.h" #include "l2fwd_event.h" +static uint32_t +l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc) +{ + struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; + struct rte_event_dev_config event_d_conf = { + .nb_events_limit = 4096, + .nb_event_queue_flows = 1024, + .nb_event_port_dequeue_depth = 128, + .nb_event_port_enqueue_depth = 128 + }; + struct rte_event_dev_info dev_info; + const uint8_t event_d_id = 0; /* Always use first event device only */ + uint32_t event_queue_cfg = 0; + uint16_t ethdev_count = 0; + uint16_t num_workers = 0; + uint16_t port_id; + int ret; + + RTE_ETH_FOREACH_DEV(port_id) { + if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) + continue; + ethdev_count++; + } + + /* Event device configurtion */ + rte_event_dev_info_get(event_d_id, &dev_info); + evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap & + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE); + + if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) + event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; + + /* One queue for each ethdev port + one Tx adapter Single link queue. */ + event_d_conf.nb_event_queues = ethdev_count + 1; + if (dev_info.max_event_queues < event_d_conf.nb_event_queues) + event_d_conf.nb_event_queues = dev_info.max_event_queues; + + if (dev_info.max_num_events < event_d_conf.nb_events_limit) + event_d_conf.nb_events_limit = dev_info.max_num_events; + + if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) + event_d_conf.nb_event_queue_flows = + dev_info.max_event_queue_flows; + + if (dev_info.max_event_port_dequeue_depth < + event_d_conf.nb_event_port_dequeue_depth) + event_d_conf.nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth; + + if (dev_info.max_event_port_enqueue_depth < + event_d_conf.nb_event_port_enqueue_depth) + event_d_conf.nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth; + + num_workers = rte_lcore_count() - rte_service_lcore_count(); + if (dev_info.max_event_ports < num_workers) + num_workers = dev_info.max_event_ports; + + event_d_conf.nb_event_ports = num_workers; + evt_rsrc->evp.nb_ports = num_workers; + evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; + + evt_rsrc->has_burst = !!(dev_info.event_dev_cap & + RTE_EVENT_DEV_CAP_BURST_MODE); + + ret = rte_event_dev_configure(event_d_id, &event_d_conf); + if (ret < 0) + rte_panic("Error in configuring event device\n"); + + evt_rsrc->event_d_id = event_d_id; + return event_queue_cfg; +} + void l2fwd_event_set_generic_ops(struct event_setup_ops *ops) { - RTE_SET_USED(ops); + ops->event_device_setup = l2fwd_event_device_setup_generic; } diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c index ce95b8e6d7..acd98798e8 100644 --- a/examples/l2fwd-event/l2fwd_event_internal_port.c +++ b/examples/l2fwd-event/l2fwd_event_internal_port.c @@ -16,8 +16,83 @@ #include "l2fwd_common.h" #include "l2fwd_event.h" +static uint32_t +l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc) +{ + struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; + struct rte_event_dev_config event_d_conf = { + .nb_events_limit = 4096, + .nb_event_queue_flows = 1024, + .nb_event_port_dequeue_depth = 128, + .nb_event_port_enqueue_depth = 128 + }; + struct rte_event_dev_info dev_info; + uint8_t disable_implicit_release; + const uint8_t event_d_id = 0; /* Always use first event device only */ + uint32_t event_queue_cfg = 0; + uint16_t ethdev_count = 0; + uint16_t num_workers = 0; + uint16_t port_id; + int ret; + + RTE_ETH_FOREACH_DEV(port_id) { + if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) + continue; + ethdev_count++; + } + + /* Event device configurtion */ + rte_event_dev_info_get(event_d_id, &dev_info); + + disable_implicit_release = !!(dev_info.event_dev_cap & + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE); + evt_rsrc->disable_implicit_release = + disable_implicit_release; + + if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) + event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; + + event_d_conf.nb_event_queues = ethdev_count; + if (dev_info.max_event_queues < event_d_conf.nb_event_queues) + event_d_conf.nb_event_queues = dev_info.max_event_queues; + + if (dev_info.max_num_events < event_d_conf.nb_events_limit) + event_d_conf.nb_events_limit = dev_info.max_num_events; + + if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) + event_d_conf.nb_event_queue_flows = + dev_info.max_event_queue_flows; + + if (dev_info.max_event_port_dequeue_depth < + event_d_conf.nb_event_port_dequeue_depth) + event_d_conf.nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth; + + if (dev_info.max_event_port_enqueue_depth < + event_d_conf.nb_event_port_enqueue_depth) + event_d_conf.nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth; + + num_workers = rte_lcore_count(); + if (dev_info.max_event_ports < num_workers) + num_workers = dev_info.max_event_ports; + + event_d_conf.nb_event_ports = num_workers; + evt_rsrc->evp.nb_ports = num_workers; + evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; + evt_rsrc->has_burst = !!(dev_info.event_dev_cap & + RTE_EVENT_DEV_CAP_BURST_MODE); + + ret = rte_event_dev_configure(event_d_id, &event_d_conf); + if (ret < 0) + rte_panic("Error in configuring event device\n"); + + evt_rsrc->event_d_id = event_d_id; + return event_queue_cfg; +} + void l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops) { - RTE_SET_USED(ops); + ops->event_device_setup = l2fwd_event_device_setup_internal_port; }