+static void
+dsw_port_flush_paused_events(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ const struct dsw_queue_flow *qf)
+{
+ uint16_t paused_events_len = source_port->paused_events_len;
+ struct rte_event paused_events[paused_events_len];
+ uint8_t dest_port_id;
+ uint16_t i;
+
+ if (paused_events_len == 0)
+ return;
+
+ if (dsw_port_is_flow_paused(source_port, qf->queue_id, qf->flow_hash))
+ return;
+
+ rte_memcpy(paused_events, source_port->paused_events,
+ paused_events_len * sizeof(struct rte_event));
+
+ source_port->paused_events_len = 0;
+
+ dest_port_id = dsw_schedule(dsw, qf->queue_id, qf->flow_hash);
+
+ for (i = 0; i < paused_events_len; i++) {
+ struct rte_event *event = &paused_events[i];
+ uint16_t flow_hash;
+
+ flow_hash = dsw_flow_id_hash(event->flow_id);
+
+ if (event->queue_id == qf->queue_id &&
+ flow_hash == qf->flow_hash)
+ dsw_port_buffer_non_paused(dsw, source_port,
+ dest_port_id, event);
+ else
+ dsw_port_buffer_paused(source_port, event);
+ }
+}
+
+static void
+dsw_port_emigration_stats(struct dsw_port *port, uint8_t finished)
+{
+ uint64_t flow_migration_latency;
+
+ flow_migration_latency =
+ (rte_get_timer_cycles() - port->emigration_start);
+ port->emigration_latency += (flow_migration_latency * finished);
+ port->emigrations += finished;
+}
+
+static void
+dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
+ uint8_t schedule_type)
+{
+ uint8_t i;
+ struct dsw_queue_flow left_qfs[DSW_MAX_FLOWS_PER_MIGRATION];
+ uint8_t left_port_ids[DSW_MAX_FLOWS_PER_MIGRATION];
+ uint8_t left_qfs_len = 0;
+ uint8_t finished;
+
+ for (i = 0; i < port->emigration_targets_len; i++) {
+ struct dsw_queue_flow *qf = &port->emigration_target_qfs[i];
+ uint8_t queue_id = qf->queue_id;
+ uint8_t queue_schedule_type =
+ dsw->queues[queue_id].schedule_type;
+ uint16_t flow_hash = qf->flow_hash;
+
+ if (queue_schedule_type != schedule_type) {
+ left_port_ids[left_qfs_len] =
+ port->emigration_target_port_ids[i];
+ left_qfs[left_qfs_len] = *qf;
+ left_qfs_len++;
+ continue;
+ }
+
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for "
+ "queue_id %d flow_hash %d.\n", queue_id,
+ flow_hash);
+
+ if (queue_schedule_type == RTE_SCHED_TYPE_ATOMIC) {
+ dsw_port_remove_paused_flow(port, qf);
+ dsw_port_flush_paused_events(dsw, port, qf);
+ }
+ }
+
+ finished = port->emigration_targets_len - left_qfs_len;
+
+ if (finished > 0)
+ dsw_port_emigration_stats(port, finished);
+
+ for (i = 0; i < left_qfs_len; i++) {
+ port->emigration_target_port_ids[i] = left_port_ids[i];
+ port->emigration_target_qfs[i] = left_qfs[i];
+ }
+ port->emigration_targets_len = left_qfs_len;
+
+ if (port->emigration_targets_len == 0) {
+ port->migration_state = DSW_MIGRATION_STATE_IDLE;
+ port->seen_events_len = 0;
+ }
+}
+
+static void
+dsw_port_move_parallel_flows(struct dsw_evdev *dsw,
+ struct dsw_port *source_port)
+{
+ uint8_t i;
+
+ for (i = 0; i < source_port->emigration_targets_len; i++) {
+ struct dsw_queue_flow *qf =
+ &source_port->emigration_target_qfs[i];
+ uint8_t queue_id = qf->queue_id;
+
+ if (dsw->queues[queue_id].schedule_type ==
+ RTE_SCHED_TYPE_PARALLEL) {
+ uint8_t dest_port_id =
+ source_port->emigration_target_port_ids[i];
+ uint16_t flow_hash = qf->flow_hash;
+
+ /* Single byte-sized stores are always atomic. */
+ dsw->queues[queue_id].flow_to_port_map[flow_hash] =
+ dest_port_id;
+ }
+ }
+
+ rte_smp_wmb();
+
+ dsw_port_end_emigration(dsw, source_port, RTE_SCHED_TYPE_PARALLEL);
+}
+
+static void
+dsw_port_consider_emigration(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ uint64_t now)
+{
+ bool any_port_below_limit;
+ struct dsw_queue_flow *seen_events = source_port->seen_events;
+ uint16_t seen_events_len = source_port->seen_events_len;
+ struct dsw_queue_flow_burst bursts[DSW_MAX_EVENTS_RECORDED];
+ uint16_t num_bursts;
+ int16_t source_port_load;
+ int16_t port_loads[dsw->num_ports];
+
+ if (now < source_port->next_emigration)
+ return;
+
+ if (dsw->num_ports == 1)
+ return;
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n");
+
+ /* Randomize interval to avoid having all threads considering
+ * emigration at the same in point in time, which might lead
+ * to all choosing the same target port.
+ */
+ source_port->next_emigration = now +
+ source_port->migration_interval / 2 +
+ rte_rand() % source_port->migration_interval;
+
+ if (source_port->migration_state != DSW_MIGRATION_STATE_IDLE) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Emigration already in progress.\n");
+ return;
+ }
+
+ /* For simplicity, avoid migration in the unlikely case there
+ * is still events to consume in the in_buffer (from the last
+ * emigration).
+ */
+ if (source_port->in_buffer_len > 0) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are still "
+ "events in the input buffer.\n");
+ return;
+ }
+
+ source_port_load = rte_atomic16_read(&source_port->load);
+ if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Load %d is below threshold level %d.\n",
+ DSW_LOAD_TO_PERCENT(source_port_load),
+ DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));
+ return;
+ }
+
+ /* Avoid starting any expensive operations (sorting etc), in
+ * case of a scenario with all ports above the load limit.
+ */
+ any_port_below_limit =
+ dsw_retrieve_port_loads(dsw, port_loads,
+ DSW_MAX_TARGET_LOAD_FOR_MIGRATION);
+ if (!any_port_below_limit) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Candidate target ports are all too highly "
+ "loaded.\n");
+ return;
+ }
+
+ num_bursts = dsw_sort_qfs_to_bursts(seen_events, seen_events_len,
+ bursts);
+
+ /* For non-big-little systems, there's no point in moving the
+ * only (known) flow.
+ */
+ if (num_bursts < 2) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Only a single flow "
+ "queue_id %d flow_hash %d has been seen.\n",
+ bursts[0].queue_flow.queue_id,
+ bursts[0].queue_flow.flow_hash);
+ return;
+ }
+
+ dsw_select_emigration_targets(dsw, source_port, bursts, num_bursts,
+ port_loads);
+
+ if (source_port->emigration_targets_len == 0)
+ return;
+
+ source_port->migration_state = DSW_MIGRATION_STATE_PAUSING;
+ source_port->emigration_start = rte_get_timer_cycles();
+
+ /* No need to go through the whole pause procedure for
+ * parallel queues, since atomic/ordered semantics need not to
+ * be maintained.
+ */
+ dsw_port_move_parallel_flows(dsw, source_port);
+
+ /* All flows were on PARALLEL queues. */
+ if (source_port->migration_state == DSW_MIGRATION_STATE_IDLE)
+ return;
+
+ /* There might be 'loopback' events already scheduled in the
+ * output buffers.
+ */
+ dsw_port_flush_out_buffers(dsw, source_port);
+
+ dsw_port_add_paused_flows(source_port,
+ source_port->emigration_target_qfs,
+ source_port->emigration_targets_len);
+
+ dsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_PAUS_REQ,
+ source_port->emigration_target_qfs,
+ source_port->emigration_targets_len);
+ source_port->cfm_cnt = 0;
+}
+
+static void
+dsw_port_flush_paused_events(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ const struct dsw_queue_flow *qf);
+
+static void
+dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,
+ uint8_t originating_port_id,
+ struct dsw_queue_flow *paused_qfs,
+ uint8_t qfs_len)
+{
+ uint16_t i;
+ struct dsw_ctl_msg cfm = {
+ .type = DSW_CTL_CFM,
+ .originating_port_id = port->id
+ };
+
+ dsw_port_remove_paused_flows(port, paused_qfs, qfs_len);
+
+ rte_smp_rmb();
+
+ dsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);
+
+ for (i = 0; i < qfs_len; i++) {
+ struct dsw_queue_flow *qf = &paused_qfs[i];
+
+ if (dsw_schedule(dsw, qf->queue_id, qf->flow_hash) == port->id)
+ port->immigrations++;
+
+ dsw_port_flush_paused_events(dsw, port, qf);
+ }
+}
+
+#define FORWARD_BURST_SIZE (32)
+
+static void
+dsw_port_forward_emigrated_flow(struct dsw_port *source_port,
+ struct rte_event_ring *dest_ring,
+ uint8_t queue_id,
+ uint16_t flow_hash)
+{
+ uint16_t events_left;
+
+ /* Control ring message should been seen before the ring count
+ * is read on the port's in_ring.
+ */
+ rte_smp_rmb();
+
+ events_left = rte_event_ring_count(source_port->in_ring);
+
+ while (events_left > 0) {
+ uint16_t in_burst_size =
+ RTE_MIN(FORWARD_BURST_SIZE, events_left);
+ struct rte_event in_burst[in_burst_size];
+ uint16_t in_len;
+ uint16_t i;
+
+ in_len = rte_event_ring_dequeue_burst(source_port->in_ring,
+ in_burst,
+ in_burst_size, NULL);
+ /* No need to care about bursting forwarded events (to
+ * the destination port's in_ring), since migration
+ * doesn't happen very often, and also the majority of
+ * the dequeued events will likely *not* be forwarded.
+ */
+ for (i = 0; i < in_len; i++) {
+ struct rte_event *e = &in_burst[i];
+ if (e->queue_id == queue_id &&
+ dsw_flow_id_hash(e->flow_id) == flow_hash) {
+ while (rte_event_ring_enqueue_burst(dest_ring,
+ e, 1,
+ NULL) != 1)
+ rte_pause();
+ } else {
+ uint16_t last_idx = source_port->in_buffer_len;
+ source_port->in_buffer[last_idx] = *e;
+ source_port->in_buffer_len++;
+ }
+ }
+
+ events_left -= in_len;
+ }
+}
+
+static void
+dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,
+ struct dsw_port *source_port)
+{
+ uint8_t i;
+
+ dsw_port_flush_out_buffers(dsw, source_port);
+
+ rte_smp_wmb();
+
+ for (i = 0; i < source_port->emigration_targets_len; i++) {
+ struct dsw_queue_flow *qf =
+ &source_port->emigration_target_qfs[i];
+ uint8_t dest_port_id =
+ source_port->emigration_target_port_ids[i];
+ struct dsw_port *dest_port = &dsw->ports[dest_port_id];
+
+ dsw->queues[qf->queue_id].flow_to_port_map[qf->flow_hash] =
+ dest_port_id;
+
+ dsw_port_forward_emigrated_flow(source_port, dest_port->in_ring,
+ qf->queue_id, qf->flow_hash);
+ }
+
+ /* Flow table update and migration destination port's enqueues
+ * must be seen before the control message.
+ */
+ rte_smp_wmb();
+
+ dsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_UNPAUS_REQ,
+ source_port->emigration_target_qfs,
+ source_port->emigration_targets_len);
+ source_port->cfm_cnt = 0;
+ source_port->migration_state = DSW_MIGRATION_STATE_UNPAUSING;
+}
+
+static void
+dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ port->cfm_cnt++;
+
+ if (port->cfm_cnt == (dsw->num_ports-1)) {
+ switch (port->migration_state) {
+ case DSW_MIGRATION_STATE_PAUSING:
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Going into forwarding "
+ "migration state.\n");
+ port->migration_state = DSW_MIGRATION_STATE_FORWARDING;
+ break;
+ case DSW_MIGRATION_STATE_UNPAUSING:
+ dsw_port_end_emigration(dsw, port,
+ RTE_SCHED_TYPE_ATOMIC);
+ break;
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+}
+
+static void
+dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ struct dsw_ctl_msg msg;
+
+ if (dsw_port_ctl_dequeue(port, &msg) == 0) {
+ switch (msg.type) {
+ case DSW_CTL_PAUS_REQ:
+ dsw_port_handle_pause_flows(dsw, port,
+ msg.originating_port_id,
+ msg.qfs, msg.qfs_len);
+ break;
+ case DSW_CTL_UNPAUS_REQ:
+ dsw_port_handle_unpause_flows(dsw, port,
+ msg.originating_port_id,
+ msg.qfs, msg.qfs_len);
+ break;
+ case DSW_CTL_CFM:
+ dsw_port_handle_confirm(dsw, port);
+ break;
+ }
+ }
+}
+
+static void
+dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
+{
+ /* To pull the control ring reasonbly often on busy ports,
+ * each dequeued/enqueued event is considered an 'op' too.
+ */
+ port->ops_since_bg_task += (num_events+1);
+}
+
+static void
+dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ if (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING &&
+ port->pending_releases == 0))
+ dsw_port_move_emigrating_flows(dsw, port);
+
+ /* Polling the control ring is relatively inexpensive, and
+ * polling it often helps bringing down migration latency, so
+ * do this for every iteration.
+ */
+ dsw_port_ctl_process(dsw, port);
+
+ /* To avoid considering migration and flushing output buffers
+ * on every dequeue/enqueue call, the scheduler only performs
+ * such 'background' tasks every nth
+ * (i.e. DSW_MAX_PORT_OPS_PER_BG_TASK) operation.
+ */
+ if (unlikely(port->ops_since_bg_task >= DSW_MAX_PORT_OPS_PER_BG_TASK)) {
+ uint64_t now;
+
+ now = rte_get_timer_cycles();
+
+ port->last_bg = now;
+
+ /* Logic to avoid having events linger in the output
+ * buffer too long.
+ */
+ dsw_port_flush_out_buffers(dsw, port);
+
+ dsw_port_consider_load_update(port, now);
+
+ dsw_port_consider_emigration(dsw, port, now);
+
+ port->ops_since_bg_task = 0;
+ }
+}
+