l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ /* Configures event device as per below configuration. 8< */
struct rte_event_dev_config event_d_conf = {
.nb_events_limit = 4096,
.nb_event_queue_flows = 1024,
.nb_event_port_dequeue_depth = 128,
.nb_event_port_enqueue_depth = 128
};
+ /* >8 End of configuration event device as per below configuration. */
struct rte_event_dev_info dev_info;
const uint8_t event_d_id = 0; /* Always use first event device only */
uint32_t event_queue_cfg = 0;
ethdev_count++;
}
- /* Event device configurtion */
+ /* Event device configuration */
rte_event_dev_info_get(event_d_id, &dev_info);
- evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
- num_workers = rte_lcore_count() - rte_service_lcore_count();
+ /* Ignore Main core and service cores. */
+ num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
uint8_t event_d_id = evt_rsrc->event_d_id;
+ /* Event port initialization. 8< */
struct rte_event_port_conf event_p_conf = {
.dequeue_depth = 32,
.enqueue_depth = 32,
rte_panic("No space is available\n");
memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
- rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ if (ret < 0)
+ rte_panic("Error to get default configuration of event port\n");
if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
event_p_conf.new_event_threshold =
if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
- event_p_conf.disable_implicit_release =
- evt_rsrc->disable_implicit_release;
+ event_p_conf.event_port_cfg = 0;
+ if (evt_rsrc->disable_implicit_release)
+ event_p_conf.event_port_cfg |=
+ RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
rte_panic("Error in linking event port %d to queues\n",
event_p_id);
evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+ /* >8 End of event port initialization. */
}
/* init spinlock */
rte_spinlock_init(&evt_rsrc->evp.lock);
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
uint8_t event_d_id = evt_rsrc->event_d_id;
+ /* Event queue initialization. 8< */
struct rte_event_queue_conf event_q_conf = {
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
if (!evt_rsrc->evq.event_q_id)
rte_panic("Memory allocation failure\n");
- rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+ ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+ if (ret < 0)
+ rte_panic("Error to get default config of event queue\n");
+ /* >8 End of event queue initialization. */
+
if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
- struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
- .rx_queue_flags = 0,
- .ev = {
- .queue_id = 0,
- .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- }
- };
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
uint8_t event_d_id = evt_rsrc->event_d_id;
uint8_t rx_adptr_id = 0;
uint8_t tx_adptr_id = 0;
uint32_t service_id;
int32_t ret, i = 0;
+ memset(ð_q_conf, 0, sizeof(eth_q_conf));
+ eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
/* Rx adapter setup */
evt_rsrc->rx_adptr.nb_rx_adptr = 1;
evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
continue;
eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
+ if (rsrc->evt_vec.enabled) {
+ uint32_t cap;
+
+ if (rte_event_eth_rx_adapter_caps_get(event_d_id,
+ port_id, &cap))
+ rte_panic(
+ "Failed to get event rx adapter capability");
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
+ eth_q_conf.vector_sz = rsrc->evt_vec.size;
+ eth_q_conf.vector_timeout_ns =
+ rsrc->evt_vec.timeout_ns;
+ eth_q_conf.vector_mp = rsrc->evt_vec_pool;
+ eth_q_conf.rx_queue_flags |=
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
+ } else {
+ rte_panic(
+ "Rx adapter doesn't support event vector");
+ }
+ }
+
ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
-1, ð_q_conf);
if (ret)
rte_service_set_runstate_mapped_check(service_id, 0);
evt_rsrc->tx_adptr.service_id = service_id;
+ /* Extra port created. 8< */
ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
if (ret)
rte_panic("Failed to get Tx adapter port id: %d\n", ret);
if (ret != 1)
rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
ret);
+ /* >8 End of extra port created. */
ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
if (ret)