]> git.droids-corp.org - dpdk.git/commitdiff
examples/l2fwd-event: fix event device config
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Fri, 20 Dec 2019 14:27:41 +0000 (19:57 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 14 Jan 2020 19:20:15 +0000 (20:20 +0100)
Always enable implicit release since we don't support explicit release
in datapath.
Master lcore is used only for printing stats so don't allocate event
port for it.
Fix service launch for event device without distributed scheduling.

Fixes: bcb6f841d42a ("examples/l2fwd-event: setup service core")
Cc: stable@dpdk.org
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
examples/l2fwd-event/l2fwd_event.c
examples/l2fwd-event/l2fwd_event_generic.c
examples/l2fwd-event/l2fwd_event_internal_port.c

index 0379c580d6407345b0331a1bf68cf1208c5f7210..38d590c14c93f867746dd159cad50e2f940699f1 100644 (file)
@@ -67,7 +67,7 @@ l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
        int ret, i;
 
        rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
-       if (evdev_info.event_dev_cap  & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
+       if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
                ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
                                &service_id);
                if (ret != -ESRCH && ret != 0)
index b7e467c1e14aeddbcc3304a574eda78ecf920063..b07306a17b05fe65c90f6b6afefbfa4152e392ab 100644 (file)
@@ -42,8 +42,10 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
 
        /* Event device configurtion */
        rte_event_dev_info_get(event_d_id, &dev_info);
-       evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
-                                   RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+       /* Enable implicit release */
+       if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+               evt_rsrc->disable_implicit_release = 0;
 
        if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
                event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
@@ -70,7 +72,8 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
                event_d_conf.nb_event_port_enqueue_depth =
                                dev_info.max_event_port_enqueue_depth;
 
-       num_workers = rte_lcore_count() - rte_service_lcore_count();
+       /* Ignore Master core and service cores. */
+       num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
        if (dev_info.max_event_ports < num_workers)
                num_workers = dev_info.max_event_ports;
 
index b382763dd9cdf40470ded3ba540200f065853170..5e6e8598af58af73738697c333a32ca38c7b4af4 100644 (file)
@@ -27,7 +27,6 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
                .nb_event_port_enqueue_depth = 128
        };
        struct rte_event_dev_info dev_info;
-       uint8_t disable_implicit_release;
        const uint8_t event_d_id = 0; /* Always use first event device only */
        uint32_t event_queue_cfg = 0;
        uint16_t ethdev_count = 0;
@@ -44,10 +43,9 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
        /* Event device configurtion */
        rte_event_dev_info_get(event_d_id, &dev_info);
 
-       disable_implicit_release = !!(dev_info.event_dev_cap &
-                                   RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
-       evt_rsrc->disable_implicit_release =
-                                               disable_implicit_release;
+       /* Enable implicit release */
+       if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+               evt_rsrc->disable_implicit_release = 0;
 
        if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
                event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
@@ -73,7 +71,8 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
                event_d_conf.nb_event_port_enqueue_depth =
                                dev_info.max_event_port_enqueue_depth;
 
-       num_workers = rte_lcore_count();
+       /* Ignore Master core. */
+       num_workers = rte_lcore_count() - 1;
        if (dev_info.max_event_ports < num_workers)
                num_workers = dev_info.max_event_ports;