This commit adds a counter to each port, which counts the
number of unlinks that have been performed. When the scheduler
thread starts its scheduling routine, it "acks" all unlinks that
have been requested, and the application is gauranteed that no
more events will be scheduled to the port from the unlinked queue.
Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
+
+ p->unlinks_in_progress += unlinked;
+ rte_smp_mb();
+
+static int
+sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port)
+{
+ RTE_SET_USED(dev);
+ struct sw_port *p = port;
+ return p->unlinks_in_progress;
+}
+
static int
sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *conf)
static int
sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *conf)
.port_release = sw_port_release,
.port_link = sw_port_link,
.port_unlink = sw_port_unlink,
.port_release = sw_port_release,
.port_link = sw_port_link,
.port_unlink = sw_port_unlink,
+ .port_unlinks_in_progress = sw_port_unlinks_in_progress,
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
/* A numeric ID for the port */
uint8_t id;
/* A numeric ID for the port */
uint8_t id;
+ /* An atomic counter for when the port has been unlinked, and the
+ * scheduler has not yet acked this unlink - hence there may still be
+ * events in the buffers going to the port. When the unlinks in
+ * progress is read by the scheduler, no more events will be pushed to
+ * the port - hence the scheduler core can just assign zero.
+ */
+ uint8_t unlinks_in_progress;
+
int16_t is_directed; /** Takes from a single directed QID */
/**
* For loadbalanced we can optimise pulling packets from
int16_t is_directed; /** Takes from a single directed QID */
/**
* For loadbalanced we can optimise pulling packets from
/* Pull from rx_ring for ports */
do {
in_pkts = 0;
/* Pull from rx_ring for ports */
do {
in_pkts = 0;
- for (i = 0; i < sw->port_count; i++)
+ for (i = 0; i < sw->port_count; i++) {
+ /* ack the unlinks in progress as done */
+ if (sw->ports[i].unlinks_in_progress)
+ sw->ports[i].unlinks_in_progress = 0;
+
if (sw->ports[i].is_directed)
in_pkts += sw_schedule_pull_port_dir(sw, i);
else if (sw->ports[i].num_ordered_qids > 0)
in_pkts += sw_schedule_pull_port_lb(sw, i);
else
in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
if (sw->ports[i].is_directed)
in_pkts += sw_schedule_pull_port_dir(sw, i);
else if (sw->ports[i].num_ordered_qids > 0)
in_pkts += sw_schedule_pull_port_lb(sw, i);
else
in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
/* QID scan for re-ordered */
in_pkts += sw_schedule_reorder(sw, 0,
/* QID scan for re-ordered */
in_pkts += sw_schedule_reorder(sw, 0,