]> git.droids-corp.org - dpdk.git/commitdiff
examples: use event port quiescing
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Fri, 13 May 2022 17:58:40 +0000 (23:28 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 17 May 2022 14:43:30 +0000 (16:43 +0200)
Quiesce event ports used by the workers core on exit to free up
any outstanding resources.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
examples/eventdev_pipeline/pipeline_common.h
examples/ipsec-secgw/ipsec_worker.c
examples/l2fwd-event/l2fwd_common.c
examples/l3fwd/l3fwd_event.c

index 9899b257b0f80a64b70f468dd2bf15850e0d87f0..28b6ab85ff9c81bd135df40077203875e994322e 100644 (file)
@@ -140,6 +140,13 @@ schedule_devices(unsigned int lcore_id)
        }
 }
 
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+                void *args __rte_unused)
+{
+       rte_mempool_put(args, ev.event_ptr);
+}
+
 static inline void
 worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
               uint16_t nb_enq, uint16_t nb_deq)
@@ -160,6 +167,8 @@ worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
                        events[i].op = RTE_EVENT_OP_RELEASE;
                rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
        }
+
+       rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
 }
 
 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
index 3df5acf384b2330dbe9ae341d8ed3569c2474345..7f259e4cf3cf3262db6947f3550b34f716e32865 100644 (file)
@@ -737,6 +737,13 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
  * selected.
  */
 
+static void
+ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
+                      void *args __rte_unused)
+{
+       rte_pktmbuf_free(ev.mbuf);
+}
+
 /* Workers registered */
 #define IPSEC_EVENTMODE_WORKERS                2
 
@@ -861,6 +868,9 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
                rte_event_enqueue_burst(links[0].eventdev_id,
                                        links[0].event_port_id, &ev, 1);
        }
+
+       rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+                              ipsec_event_port_flush, NULL);
 }
 
 /*
@@ -974,6 +984,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
                rte_event_enqueue_burst(links[0].eventdev_id,
                                        links[0].event_port_id, &ev, 1);
        }
+
+       rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+                              ipsec_event_port_flush, NULL);
 }
 
 static uint8_t
index 15bfe790a0a331e9d225c71b86c900a1abeb7d43..41a0d3f22f6829af88b126b9406ec22ed493cab2 100644 (file)
@@ -128,6 +128,16 @@ l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
        }
 }
 
+static void
+l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+                      void *args __rte_unused)
+{
+       if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+               l2fwd_event_vector_array_free(&ev, 1);
+       else
+               rte_pktmbuf_free(ev.mbuf);
+}
+
 void
 l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
                           struct rte_event events[], uint16_t nb_enq,
@@ -147,4 +157,7 @@ l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
                        events[i].op = RTE_EVENT_OP_RELEASE;
                rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
        }
+
+       rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,
+                              NULL);
 }
index a14a21b41433f74cf9ec9b9b9ca1788111a44eee..0b58475c85b875a6fa5a3e6a4a0aac2a5b346238 100644 (file)
@@ -301,6 +301,16 @@ l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
        }
 }
 
+static void
+l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+                      void *args __rte_unused)
+{
+       if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+               l3fwd_event_vector_array_free(&ev, 1);
+       else
+               rte_pktmbuf_free(ev.mbuf);
+}
+
 void
 l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
                           struct rte_event events[], uint16_t nb_enq,
@@ -320,4 +330,7 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
                        events[i].op = RTE_EVENT_OP_RELEASE;
                rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
        }
+
+       rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
+                              NULL);
 }