]> git.droids-corp.org - dpdk.git/commitdiff
eventdev: express DLB/DLB2 PMD constraints
authorTimothy McDaniel <timothy.mcdaniel@intel.com>
Thu, 15 Oct 2020 18:07:15 +0000 (13:07 -0500)
committerJerin Jacob <jerinj@marvell.com>
Thu, 15 Oct 2020 21:16:07 +0000 (23:16 +0200)
This commit implements the eventdev ABI changes required by
the DLB/DLB2 PMDs.  Several data structures and constants are modified
or added in this patch, thereby requiring modifications to the
dependent apps and examples.

The DLB/DLB2 hardware does not conform exactly to the eventdev interface.
1) It has a limit on the number of queues that may be linked to a port.
2) Some ports a further restricted to a maximum of 1 linked queue.
3) DLB does not have the ability to carry the flow_id as part
   of the event (QE) payload. Note that the DLB2 hardware is capable of
   carrying the flow_id.

Following is a detailed description of the changes that have been made.

1) Add new fields to the rte_event_dev_info struct. These fields allow
the device to advertise its capabilities so that applications can take
the appropriate actions based on those capabilities.

    struct rte_event_dev_info {
uint32_t max_event_port_links;
/**< Maximum number of queues that can be linked to a single event
 * port by this device.
 */

uint8_t max_single_link_event_port_queue_pairs;
/**< Maximum number of event ports and queues that are optimized for
 * (and only capable of) single-link configurations supported by this
 * device. These ports and queues are not accounted for in
 * max_event_ports or max_event_queues.
 */
    }

2) Add a new field to the rte_event_dev_config struct. This field allows
the application to specify how many of its ports are limited to a single
link, or will be used in single link mode.

    /** Event device configuration structure */
    struct rte_event_dev_config {
uint8_t nb_single_link_event_port_queues;
/**< Number of event ports and queues that will be singly-linked to
 * each other. These are a subset of the overall event ports and
 * queues; this value cannot exceed *nb_event_ports* or
 * *nb_event_queues*. If the device has ports and queues that are
 * optimized for single-link usage, this field is a hint for how many
 * to allocate; otherwise, regular event ports and queues can be used.
 */
    }

3) Replace the dedicated implicit_release_disabled field with a bit field
of explicit port capabilities. The implicit_release_disable functionality
is assigned to one bit, and a port-is-single-link-only  attribute is
assigned to other, with the remaining bits available for future assignment.

* Event port configuration bitmap flags */
#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
/**< Configure the port not to release outstanding events in
 * rte_event_dev_dequeue_burst(). If set, all events received through
 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
 * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
 */
#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)

/**< This event port links only to a single event queue.
 *
 *  @see rte_event_port_setup(), rte_event_port_link()
 */

#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
/**
 * The implicit release disable attribute of the port
 */

struct rte_event_port_conf {
uint32_t event_port_cfg;
/**< Port cfg flags(EVENT_PORT_CFG_) */
}

This patch also removes the depreciation notice and announce
the new eventdev ABI changes in release note.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
28 files changed:
app/test-eventdev/evt_common.h
app/test-eventdev/test_order_atq.c
app/test-eventdev/test_order_common.c
app/test-eventdev/test_order_queue.c
app/test/test_eventdev.c
doc/guides/rel_notes/deprecation.rst
doc/guides/rel_notes/release_20_11.rst
drivers/event/dpaa/dpaa_eventdev.c
drivers/event/dpaa2/dpaa2_eventdev.c
drivers/event/dsw/dsw_evdev.c
drivers/event/octeontx/ssovf_evdev.c
drivers/event/octeontx2/otx2_evdev.c
drivers/event/opdl/opdl_evdev.c
drivers/event/skeleton/skeleton_eventdev.c
drivers/event/sw/sw_evdev.c
drivers/event/sw/sw_evdev_selftest.c
examples/eventdev_pipeline/pipeline_worker_generic.c
examples/eventdev_pipeline/pipeline_worker_tx.c
examples/l2fwd-event/l2fwd_event_generic.c
examples/l2fwd-event/l2fwd_event_internal_port.c
examples/l3fwd/l3fwd_event_generic.c
examples/l3fwd/l3fwd_event_internal_port.c
lib/librte_eventdev/rte_event_eth_tx_adapter.c
lib/librte_eventdev/rte_eventdev.c
lib/librte_eventdev/rte_eventdev.h
lib/librte_eventdev/rte_eventdev_pmd_pci.h
lib/librte_eventdev/rte_eventdev_trace.h
lib/librte_eventdev/rte_eventdev_version.map

index f9d7378d30ff74a382461957307695e73b96013c..a1da1cf11e28492741f82855c96773e466958be4 100644 (file)
@@ -104,6 +104,16 @@ evt_has_all_types_queue(uint8_t dev_id)
                        true : false;
 }
 
+static inline bool
+evt_has_flow_id(uint8_t dev_id)
+{
+       struct rte_event_dev_info dev_info;
+
+       rte_event_dev_info_get(dev_id, &dev_info);
+       return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
+                       true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
@@ -169,6 +179,7 @@ evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
                        .dequeue_timeout_ns = opt->deq_tmo_nsec,
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 0,
                        .nb_events_limit  = info.max_num_events,
                        .nb_event_queue_flows = opt->nb_flows,
                        .nb_event_port_dequeue_depth =
index 3366cfce9a60d0d248a510c522d6f84361006a76..cfcb1dc4e9e6f2fd6fd52e5abeffe844529ccc89 100644 (file)
@@ -19,7 +19,7 @@ order_atq_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev;
@@ -34,6 +34,9 @@ order_atq_worker(void *arg)
                        continue;
                }
 
+               if (!flow_id_cap)
+                       ev.flow_id = ev.mbuf->udata64;
+
                if (ev.sub_event_type == 0) { /* stage 0 from producer */
                        order_atq_process_stage_0(&ev);
                        while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_atq_worker(void *arg)
 }
 
 static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,9 @@ order_atq_worker_burst(void *arg)
                }
 
                for (i = 0; i < nb_rx; i++) {
+                       if (!flow_id_cap)
+                               ev[i].flow_id = ev[i].mbuf->udata64;
+
                        if (ev[i].sub_event_type == 0) { /*stage 0 */
                                order_atq_process_stage_0(&ev[i]);
                        } else if (ev[i].sub_event_type == 1) { /* stage 1 */
@@ -95,11 +101,19 @@ worker_wrapper(void *arg)
 {
        struct worker_data *w  = arg;
        const bool burst = evt_has_burst_mode(w->dev_id);
-
-       if (burst)
-               return order_atq_worker_burst(arg);
-       else
-               return order_atq_worker(arg);
+       const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+       if (burst) {
+               if (flow_id_cap)
+                       return order_atq_worker_burst(arg, true);
+               else
+                       return order_atq_worker_burst(arg, false);
+       } else {
+               if (flow_id_cap)
+                       return order_atq_worker(arg, true);
+               else
+                       return order_atq_worker(arg, false);
+       }
 }
 
 static int
index 4190f9ade82baa2a906e46b53a5ec050cf8647bd..79423907bd70d07d27605e5e771d2693a35ddfbe 100644 (file)
@@ -49,6 +49,7 @@ order_producer(void *arg)
                const uint32_t flow = (uintptr_t)m % nb_flows;
                /* Maintain seq number per flow */
                m->seqn = producer_flow_seq[flow]++;
+               m->udata64 = flow;
 
                ev.flow_id = flow;
                ev.mbuf = m;
index 495efd92f94e7632eaebf17b6c25aa2c5eaa37a2..1511c0092da7e72aae416564bf7e8b8b340c5c37 100644 (file)
@@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev;
@@ -34,6 +34,9 @@ order_queue_worker(void *arg)
                        continue;
                }
 
+               if (!flow_id_cap)
+                       ev.flow_id = ev.mbuf->udata64;
+
                if (ev.queue_id == 0) { /* from ordered queue */
                        order_queue_process_stage_0(&ev);
                        while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_queue_worker(void *arg)
 }
 
 static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,10 @@ order_queue_worker_burst(void *arg)
                }
 
                for (i = 0; i < nb_rx; i++) {
+
+                       if (!flow_id_cap)
+                               ev[i].flow_id = ev[i].mbuf->udata64;
+
                        if (ev[i].queue_id == 0) { /* from ordered queue */
                                order_queue_process_stage_0(&ev[i]);
                        } else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,19 @@ worker_wrapper(void *arg)
 {
        struct worker_data *w  = arg;
        const bool burst = evt_has_burst_mode(w->dev_id);
-
-       if (burst)
-               return order_queue_worker_burst(arg);
-       else
-               return order_queue_worker(arg);
+       const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+       if (burst) {
+               if (flow_id_cap)
+                       return order_queue_worker_burst(arg, true);
+               else
+                       return order_queue_worker_burst(arg, false);
+       } else {
+               if (flow_id_cap)
+                       return order_queue_worker(arg, true);
+               else
+                       return order_queue_worker(arg, false);
+       }
 }
 
 static int
index 43ccb1ce9788ec802ae8b85b7fae38c57408afb3..62019c1856963b1be5bc726189d1aa3456953b05 100644 (file)
@@ -559,10 +559,10 @@ test_eventdev_port_setup(void)
        if (!(info.event_dev_cap &
              RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
                pconf.enqueue_depth = info.max_event_port_enqueue_depth;
-               pconf.disable_implicit_release = 1;
+               pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
                ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
                TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
-               pconf.disable_implicit_release = 0;
+               pconf.event_port_cfg = 0;
        }
 
        ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
index 604f198059c56102473c281144cf0b1fce577c4f..6cbfd118475646c5378747b916f5103e0081055b 100644 (file)
@@ -177,19 +177,6 @@ Deprecation Notices
   to one it means it represents IV, when is set to zero it means J0 is used
   directly, in this case 16 bytes of J0 need to be passed.
 
-* eventdev: Following structures will be modified to support DLB PMD
-  and future extensions:
-
-  - ``rte_event_dev_info``
-  - ``rte_event_dev_config``
-  - ``rte_event_port_conf``
-
-  Patches containing justification, documentation, and proposed modifications
-  can be found at:
-
-  - https://patches.dpdk.org/patch/71457/
-  - https://patches.dpdk.org/patch/71456/
-
 * sched: To allow more traffic classes, flexible mapping of pipe queues to
   traffic classes, and subport level configuration of pipes and queues
   changes will be made to macros, data structures and API functions defined
index 7e96c45ae4116d96ef34e9a980a9c29c68e8c252..cda5b2f5b25d2399dc5f1dd2ecfd3a8ad2e34686 100644 (file)
@@ -474,6 +474,13 @@ ABI Changes
 
   * ``ethdev`` internal functions are marked with ``__rte_internal`` tag.
 
+* eventdev: Following structures are modified to support DLB/DLB2 PMDs
+  and future extensions:
+
+  * ``rte_event_dev_info``
+  * ``rte_event_dev_config``
+  * ``rte_event_port_conf``
+
 * sched: Added new fields to ``struct rte_sched_subport_port_params``.
 
 
index b5ae87a4ea1d9efb08fbc51674763dac5cd45ce2..07cd07976844043651ce880e50dbf85f23af7c37 100644 (file)
@@ -355,7 +355,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
                RTE_EVENT_DEV_CAP_BURST_MODE |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-               RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+               RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
index f7383ca738429bd8e1b7468f4856c10dc0f63888..95f03c8b9e39683571f27b9fb8e590ae2ca26ff6 100644 (file)
@@ -406,7 +406,8 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -536,7 +537,7 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
                DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
        port_conf->enqueue_depth =
                DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static int
index e796975dfcf1540c3b56c82cbdddfbf38127a183..933a5a559bac349825f8aeb7b33f15d512ccf03a 100644 (file)
@@ -224,7 +224,8 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
                .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
                RTE_EVENT_DEV_CAP_NONSEQ_MODE|
-               RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
+               RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
        };
 }
 
index 33cb502043c8b222ccf7695c953471239f43ca5b..6f242aac1b0174c2c48fa337c9efde8b15cd8a9e 100644 (file)
@@ -152,7 +152,8 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
                                        RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -218,7 +219,7 @@ ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
        port_conf->new_event_threshold = edev->max_num_events;
        port_conf->dequeue_depth = 1;
        port_conf->enqueue_depth = 1;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static void
index 256b6a55d82fdfe9e8d82ad820e8bcf058f83462..b31c26e9533c34787f5e76582904dfa94a4f5658 100644 (file)
@@ -501,7 +501,8 @@ otx2_sso_info_get(struct rte_eventdev *event_dev,
                                        RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static void
index 9b2f75fe37f049d9ab9da5e779da0b073df33717..3050578ffda1ec82497976cec6e83fca2e8e0740 100644 (file)
@@ -374,7 +374,8 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
                .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
                .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
                .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
-               .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+               .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
+                                RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
        };
 
        *info = evdev_opdl_info;
index c889220e06c77ee7296a2609b18711ac69980f88..6fd11025968355a66a4a6a196ad7543bfd0ee38f 100644 (file)
@@ -101,7 +101,8 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
        dev_info->max_num_events = (1ULL << 20);
        dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
                                        RTE_EVENT_DEV_CAP_BURST_MODE |
-                                       RTE_EVENT_DEV_CAP_EVENT_QOS;
+                                       RTE_EVENT_DEV_CAP_EVENT_QOS |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
@@ -209,7 +210,7 @@ skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
        port_conf->new_event_threshold = 32 * 1024;
        port_conf->dequeue_depth = 16;
        port_conf->enqueue_depth = 16;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static void
index e310c8c34696196306ca85f2708e854e087f137f..0d8013adf7cb384b92f7327567e9241109ba1232 100644 (file)
@@ -179,7 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
        }
 
        p->inflight_max = conf->new_event_threshold;
-       p->implicit_release = !conf->disable_implicit_release;
+       p->implicit_release = !(conf->event_port_cfg &
+                               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
 
        /* check if ring exists, same as rx_worker above */
        snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -501,7 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
        port_conf->new_event_threshold = 1024;
        port_conf->dequeue_depth = 16;
        port_conf->enqueue_depth = 16;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static int
@@ -608,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
                                RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
                                RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                               RTE_EVENT_DEV_CAP_NONSEQ_MODE),
+                               RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
        };
 
        *info = evdev_sw_info;
index 38c21fa0fa3d1e3ca68cf80d998ad1cc39e4a3f9..4a7d8230a63b24e01261e92d6372107e8b1ab62d 100644 (file)
@@ -172,7 +172,6 @@ create_ports(struct test *t, int num_ports)
                        .new_event_threshold = 1024,
                        .dequeue_depth = 32,
                        .enqueue_depth = 64,
-                       .disable_implicit_release = 0,
        };
        if (num_ports > MAX_PORTS)
                return -1;
@@ -1227,7 +1226,6 @@ port_reconfig_credits(struct test *t)
                                .new_event_threshold = 128,
                                .dequeue_depth = 32,
                                .enqueue_depth = 64,
-                               .disable_implicit_release = 0,
                };
                if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
                        printf("%d Error setting up port\n", __LINE__);
@@ -1317,7 +1315,6 @@ port_single_lb_reconfig(struct test *t)
                .new_event_threshold = 128,
                .dequeue_depth = 32,
                .enqueue_depth = 64,
-               .disable_implicit_release = 0,
        };
        if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
                printf("%d Error setting up port\n", __LINE__);
@@ -3079,7 +3076,8 @@ worker_loopback(struct test *t, uint8_t disable_implicit_release)
         * only be initialized once - and this needs to be set for multiple runs
         */
        conf.new_event_threshold = 512;
-       conf.disable_implicit_release = disable_implicit_release;
+       conf.event_port_cfg = disable_implicit_release ?
+               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
        if (rte_event_port_setup(evdev, 0, &conf) < 0) {
                printf("Error setting up RX port\n");
index 42ff4eeb96e96c0819c5de5d52f2ad984a646f45..f70ab0cc9e380811c83aaa005eb042cf3d578de4 100644 (file)
@@ -129,6 +129,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
        struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 1,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = 1024,
                        .nb_event_port_dequeue_depth = 128,
@@ -143,7 +144,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
                        .schedule_type = cdata.queue_type,
                        .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
                        .nb_atomic_flows = 1024,
-               .nb_atomic_order_sequences = 1024,
+                       .nb_atomic_order_sequences = 1024,
        };
        struct rte_event_queue_conf tx_q_conf = {
                        .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@@ -167,7 +168,8 @@ setup_eventdev_generic(struct worker_data *worker_data)
        disable_implicit_release = (dev_info.event_dev_cap &
                        RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
 
-       wkr_p_conf.disable_implicit_release = disable_implicit_release;
+       wkr_p_conf.event_port_cfg = disable_implicit_release ?
+               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
        if (dev_info.max_num_events < config.nb_events_limit)
                config.nb_events_limit = dev_info.max_num_events;
index 55bb2f7624e96e185e839a4a474f607a793e3fa6..ca6cd200caad316a9b24c85c76fa61aa1d49ddc9 100644 (file)
@@ -436,6 +436,7 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data)
        struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 0,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = 1024,
                        .nb_event_port_dequeue_depth = 128,
index 2dc95e5f7d1a7272059dd209fed6dd0d6df924fe..9a3167c05fd728293fa388ffc895a9679feb53e0 100644 (file)
@@ -126,8 +126,11 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
        evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
index 63d57b46c2dab3409018f802e41931a39c8deaf1..203a14cf2461f19a074e81f8cce7064005152d8f 100644 (file)
@@ -123,8 +123,10 @@ l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
                                                                event_p_id++) {
index f8c98435d3f3baa947f5cb606b8000893a062874..c80573fc582f997f55c434ea1a125d4caa0a5a59 100644 (file)
@@ -115,8 +115,11 @@ l3fwd_event_port_setup_generic(void)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
        evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
index 03ac581d634aa36c1494476eea3ec913d01413b8..9916a7f556e6a580cab8b36b842eb034427df7cd 100644 (file)
@@ -113,8 +113,10 @@ l3fwd_event_port_setup_internal_port(void)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
                                                                event_p_id++) {
index 86287b4e6636b6b8b8d11440f62e49af38c3e5a0..cc27bbca32ea95e1fdf2646d211d794d84913cab 100644 (file)
@@ -286,7 +286,7 @@ txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
                return ret;
        }
 
-       pc->disable_implicit_release = 0;
+       pc->event_port_cfg = 0;
        ret = rte_event_port_setup(dev_id, port_id, pc);
        if (ret) {
                RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
index 557198f4a18159877f452d9ae2e73f10e793ed33..322453c532d7810cbb8d94107114171a377dbb33 100644 (file)
@@ -438,9 +438,29 @@ rte_event_dev_configure(uint8_t dev_id,
                                        dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_queues > info.max_event_queues) {
-               RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
-               dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+       if (dev_conf->nb_event_queues > info.max_event_queues +
+                       info.max_single_link_event_port_queue_pairs) {
+               RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
+                                dev_id, dev_conf->nb_event_queues,
+                                info.max_event_queues,
+                                info.max_single_link_event_port_queue_pairs);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_event_queues -
+                       dev_conf->nb_single_link_event_port_queues >
+                       info.max_event_queues) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
+                                dev_id, dev_conf->nb_event_queues,
+                                dev_conf->nb_single_link_event_port_queues,
+                                info.max_event_queues);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_single_link_event_port_queues >
+                       dev_conf->nb_event_queues) {
+               RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
+                                dev_id,
+                                dev_conf->nb_single_link_event_port_queues,
+                                dev_conf->nb_event_queues);
                return -EINVAL;
        }
 
@@ -449,9 +469,31 @@ rte_event_dev_configure(uint8_t dev_id,
                RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_ports > info.max_event_ports) {
-               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
-               dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+       if (dev_conf->nb_event_ports > info.max_event_ports +
+                       info.max_single_link_event_port_queue_pairs) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
+                                dev_id, dev_conf->nb_event_ports,
+                                info.max_event_ports,
+                                info.max_single_link_event_port_queue_pairs);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_event_ports -
+                       dev_conf->nb_single_link_event_port_queues
+                       > info.max_event_ports) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
+                                dev_id, dev_conf->nb_event_ports,
+                                dev_conf->nb_single_link_event_port_queues,
+                                info.max_event_ports);
+               return -EINVAL;
+       }
+
+       if (dev_conf->nb_single_link_event_port_queues >
+           dev_conf->nb_event_ports) {
+               RTE_EDEV_LOG_ERR(
+                                "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
+                                dev_id,
+                                dev_conf->nb_single_link_event_port_queues,
+                                dev_conf->nb_event_ports);
                return -EINVAL;
        }
 
@@ -738,7 +780,8 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
                return -EINVAL;
        }
 
-       if (port_conf && port_conf->disable_implicit_release &&
+       if (port_conf &&
+           (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
            !(dev->data->event_dev_cap &
              RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
                RTE_EDEV_LOG_ERR(
@@ -831,6 +874,14 @@ rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
        case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
                *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
                break;
+       case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
+       {
+               uint32_t config;
+
+               config = dev->data->ports_cfg[port_id].event_port_cfg;
+               *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+               break;
+       }
        default:
                return -EINVAL;
        };
index 7dc832353556f30aad89330f45a8164c024c2b86..ce1fc2ce0f1073e661ffca96a123036ed1e8ef25 100644 (file)
@@ -291,6 +291,12 @@ struct rte_event;
  * single queue to each port or map a single queue to many port.
  */
 
+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
+/**< Event device preserves the flow ID from the enqueued
+ * event to the dequeued event if the flag is set. Otherwise,
+ * the content of this field is implementation dependent.
+ */
+
 /* Event device priority levels */
 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
 /**< Highest priority expressed across eventdev subsystem
@@ -380,6 +386,10 @@ struct rte_event_dev_info {
         * event port by this device.
         * A device that does not support bulk enqueue will set this as 1.
         */
+       uint8_t max_event_port_links;
+       /**< Maximum number of queues that can be linked to a single event
+        * port by this device.
+        */
        int32_t max_num_events;
        /**< A *closed system* event dev has a limit on the number of events it
         * can manage at a time. An *open system* event dev does not have a
@@ -387,6 +397,12 @@ struct rte_event_dev_info {
         */
        uint32_t event_dev_cap;
        /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+       uint8_t max_single_link_event_port_queue_pairs;
+       /**< Maximum number of event ports and queues that are optimized for
+        * (and only capable of) single-link configurations supported by this
+        * device. These ports and queues are not accounted for in
+        * max_event_ports or max_event_queues.
+        */
 };
 
 /**
@@ -494,6 +510,14 @@ struct rte_event_dev_config {
         */
        uint32_t event_dev_cfg;
        /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+       uint8_t nb_single_link_event_port_queues;
+       /**< Number of event ports and queues that will be singly-linked to
+        * each other. These are a subset of the overall event ports and
+        * queues; this value cannot exceed *nb_event_ports* or
+        * *nb_event_queues*. If the device has ports and queues that are
+        * optimized for single-link usage, this field is a hint for how many
+        * to allocate; otherwise, regular event ports and queues can be used.
+        */
 };
 
 /**
@@ -519,7 +543,6 @@ int
 rte_event_dev_configure(uint8_t dev_id,
                        const struct rte_event_dev_config *dev_conf);
 
-
 /* Event queue specific APIs */
 
 /* Event queue configuration bitmap flags */
@@ -671,6 +694,20 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
 
 /* Event port specific APIs */
 
+/* Event port configuration bitmap flags */
+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
+/**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If set, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
+/**< This event port links only to a single event queue.
+ *
+ *  @see rte_event_port_setup(), rte_event_port_link()
+ */
+
 /** Event port configuration structure */
 struct rte_event_port_conf {
        int32_t new_event_threshold;
@@ -698,13 +735,7 @@ struct rte_event_port_conf {
         * which previously supplied to rte_event_dev_configure().
         * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
         */
-       uint8_t disable_implicit_release;
-       /**< Configure the port not to release outstanding events in
-        * rte_event_dev_dequeue_burst(). If true, all events received through
-        * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
-        * RTE_EVENT_OP_FORWARD. Must be false when the device is not
-        * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
-        */
+       uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
 };
 
 /**
@@ -769,6 +800,10 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
  * The new event threshold of the port
  */
 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+/**
+ * The implicit release disable attribute of the port
+ */
+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
 
 /**
  * Get an attribute from a port.
index 443cd38c2331db5efc470f15b1d894d75919f090..a3f9244d3a66b6b00e0da22bac30609cde322ef7 100644 (file)
@@ -88,7 +88,6 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
        return -ENXIO;
 }
 
-
 /**
  * @internal
  * Wrapper for use by pci drivers as a .remove function to detach a event
index 4de6341ca73b99fcc7ee4be428cd026ee5d7cbf2..5ec43d80ee7141f285e1e97f23dd59ff5a0bc15c 100644 (file)
@@ -34,6 +34,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_u32(dev_conf->nb_event_port_dequeue_depth);
        rte_trace_point_emit_u32(dev_conf->nb_event_port_enqueue_depth);
        rte_trace_point_emit_u32(dev_conf->event_dev_cfg);
+       rte_trace_point_emit_u8(dev_conf->nb_single_link_event_port_queues);
        rte_trace_point_emit_int(rc);
 )
 
@@ -59,7 +60,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
        rte_trace_point_emit_int(rc);
 )
 
@@ -165,7 +166,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
        rte_trace_point_emit_ptr(conf_cb);
        rte_trace_point_emit_int(rc);
 )
@@ -257,7 +258,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
 )
 
 RTE_TRACE_POINT(
index 3d9d0ca05486818d27c3e4a1cfdd808d92654ff0..2846d04483d9878e40f81de266f8397e9adb7a2e 100644 (file)
@@ -100,7 +100,6 @@ EXPERIMENTAL {
        # added in 20.05
        __rte_eventdev_trace_configure;
        __rte_eventdev_trace_queue_setup;
-       __rte_eventdev_trace_port_setup;
        __rte_eventdev_trace_port_link;
        __rte_eventdev_trace_port_unlink;
        __rte_eventdev_trace_start;
@@ -134,4 +133,7 @@ EXPERIMENTAL {
        __rte_eventdev_trace_crypto_adapter_queue_pair_del;
        __rte_eventdev_trace_crypto_adapter_start;
        __rte_eventdev_trace_crypto_adapter_stop;
+
+       # changed in 20.11
+       __rte_eventdev_trace_port_setup;
 };