port->ctl_in_ring = ctl_in_ring;
rte_atomic16_init(&port->load);
+ rte_atomic32_init(&port->immigration_load);
port->load_update_interval =
(DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
/* Estimate of current port load. */
rte_atomic16_t load __rte_cache_aligned;
+ /* Estimate of flows currently migrating to this port. */
+ rte_atomic32_t immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
(DSW_OLD_LOAD_WEIGHT+1);
rte_atomic16_set(&port->load, new_load);
+
+ /* The load of the recently immigrated flows should hopefully
+ * be reflected the load estimate by now.
+ */
+ rte_atomic32_set(&port->immigration_load, 0);
}
static void
uint16_t i;
for (i = 0; i < dsw->num_ports; i++) {
- int16_t load = rte_atomic16_read(&dsw->ports[i].load);
+ int16_t measured_load = rte_atomic16_read(&dsw->ports[i].load);
+ int32_t immigration_load =
+ rte_atomic32_read(&dsw->ports[i].immigration_load);
+ int32_t load = measured_load + immigration_load;
+
+ load = RTE_MIN(load, DSW_MAX_LOAD);
+
if (load < load_limit)
below_limit = true;
port_loads[i] = load;
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
+ rte_atomic32_add(&dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load);
+
return true;
}
struct dsw_queue_flow *target_qfs = source_port->emigration_target_qfs;
uint8_t *target_port_ids = source_port->emigration_target_port_ids;
uint8_t *targets_len = &source_port->emigration_targets_len;
- uint8_t i;
+ uint16_t i;
for (i = 0; i < DSW_MAX_FLOWS_PER_MIGRATION; i++) {
bool found;