eventdev: negate maintenance capability flag
authorMattias Rönnblom <mattias.ronnblom@ericsson.com>
Wed, 10 Nov 2021 11:32:10 +0000 (12:32 +0100)
committerJerin Jacob <jerinj@marvell.com>
Mon, 15 Nov 2021 07:22:38 +0000 (08:22 +0100)
Replace RTE_EVENT_DEV_CAP_REQUIRES_MAINT, which signaled the need
for the application to call rte_event_maintain(), with
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, which does the opposite (i.e.,
signifies that the event device does not require maintenance).

This approach is more in line with how other eventdev hardware and/or
software limitations are handled in the Eventdev API.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
12 files changed:
doc/guides/eventdevs/dsw.rst
drivers/event/cnxk/cnxk_eventdev.c
drivers/event/dlb2/dlb2.c
drivers/event/dpaa/dpaa_eventdev.c
drivers/event/dpaa2/dpaa2_eventdev.c
drivers/event/dsw/dsw_evdev.c
drivers/event/octeontx/ssovf_evdev.c
drivers/event/octeontx2/otx2_evdev.c
drivers/event/opdl/opdl_evdev.c
drivers/event/skeleton/skeleton_eventdev.c
drivers/event/sw/sw_evdev.c
lib/eventdev/rte_eventdev.h

index 18f7e95..5c6b51f 100644 (file)
@@ -44,8 +44,8 @@ Port Maintenance
 ~~~~~~~~~~~~~~~~
 
 The distributed software eventdev uses an internal signaling scheme
-between the ports to achieve load balancing. Therefore, it sets the
-``RTE_EVENT_DEV_CAP_REQUIRES_MAINT`` flag.
+between the ports to achieve load balancing. Therefore, it does not
+set the ``RTE_EVENT_DEV_CAP_MAINTENANCE_FREE`` flag.
 
 During periods when the application thread using a particular port is
 neither attempting to enqueue nor to dequeue events, it must
index 50d5c35..f7a5026 100644 (file)
@@ -119,7 +119,8 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
                                  RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                  RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                                  RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-                                 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+                                 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                                 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 }
 
 int
index 0dbe857..16e9764 100644 (file)
@@ -66,7 +66,8 @@ static struct rte_event_dev_info evdev_dlb2_default_info = {
                          RTE_EVENT_DEV_CAP_BURST_MODE |
                          RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
                          RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
-                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
+                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
+                         RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
 };
 
 struct process_local_port_data
index eec4bc6..ff6cc0b 100644 (file)
@@ -356,7 +356,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_BURST_MODE |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+               RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 }
 
 static int
index 710156a..4d94c31 100644 (file)
@@ -408,7 +408,8 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                RTE_EVENT_DEV_CAP_NONSEQ_MODE |
                RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
-               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+               RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 
 }
 
index 5ff8fcc..ffabf0d 100644 (file)
@@ -222,8 +222,7 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
                RTE_EVENT_DEV_CAP_NONSEQ_MODE|
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
-               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID|
-               RTE_EVENT_DEV_CAP_REQUIRES_MAINT
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
        };
 }
 
index 366b6d3..9e14e35 100644 (file)
@@ -155,7 +155,8 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                                        RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                                       RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 
 }
 
index f26bed3..ccf28b6 100644 (file)
@@ -505,7 +505,8 @@ otx2_sso_info_get(struct rte_eventdev *event_dev,
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                                        RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                                       RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 }
 
 static void
index 5007e9a..15c1024 100644 (file)
@@ -375,7 +375,8 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
                .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
                .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
                .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
-                                RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
+                                RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                                RTE_EVENT_DEV_CAP_MAINTENANCE_FREE,
        };
 
        *info = evdev_opdl_info;
index af0efb3..bf3b01e 100644 (file)
@@ -102,7 +102,8 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
        dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
                                        RTE_EVENT_DEV_CAP_BURST_MODE |
                                        RTE_EVENT_DEV_CAP_EVENT_QOS |
-                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                                       RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
 }
 
 static int
index fa72ceb..6ae613e 100644 (file)
@@ -609,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
                                RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                                RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-                               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
+                               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
+                               RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
        };
 
        *info = evdev_sw_info;
index e026486..eef47d8 100644 (file)
@@ -299,13 +299,14 @@ struct rte_event;
  * the content of this field is implementation dependent.
  */
 
-#define RTE_EVENT_DEV_CAP_REQUIRES_MAINT (1ULL << 10)
-/**< Event device requires calls to rte_event_maintain() during
- * periods when neither rte_event_dequeue_burst() nor
- * rte_event_enqueue_burst() are called on a port. This will allow the
- * event device to perform internal processing, such as flushing
- * buffered events, return credits to a global pool, or process
- * signaling related to load balancing.
+#define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
+/**< Event device *does not* require calls to rte_event_maintain().
+ * An event device that does not set this flag requires calls to
+ * rte_event_maintain() during periods when neither
+ * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
+ * on a port. This will allow the event device to perform internal
+ * processing, such as flushing buffered events, return credits to a
+ * global pool, or process signaling related to load balancing.
  */
 
 /* Event device priority levels */
@@ -2082,8 +2083,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
 /**
  * Maintain an event device.
  *
- * This function is only relevant for event devices which have the
- * @ref RTE_EVENT_DEV_CAP_REQUIRES_MAINT flag set. Such devices
+ * This function is only relevant for event devices which do not have
+ * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
  * require an application thread using a particular port to
  * periodically call rte_event_maintain() on that port during periods
  * which it is neither attempting to enqueue events to nor dequeue
@@ -2098,9 +2099,9 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
  * or dequeue functions are being called, at the cost of a slight
  * increase in overhead.
  *
- * rte_event_maintain() may be called on event devices which haven't
- * set @ref RTE_EVENT_DEV_CAP_REQUIRES_MAINT flag, in which case it is
- * no-operation.
+ * rte_event_maintain() may be called on event devices which have set
+ * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
+ * no-operation.
  *
  * @param dev_id
  *   The identifier of the device.
@@ -2112,7 +2113,7 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
  *  - 0 on success.
  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
  *
- * @see RTE_EVENT_DEV_CAP_REQUIRES_MAINT
+ * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
  */
 __rte_experimental
 static inline int