const struct rte_flow_action actions[],
efx_mae_actions_t *spec,
struct sfc_mae_encap_header *encap_header,
+ uint64_t *ft_group_hit_counter,
+ struct sfc_flow_tunnel *ft,
unsigned int n_counters,
struct sfc_mae_action_set **action_setp)
{
return ENOMEM;
}
+ for (i = 0; i < n_counters; ++i) {
+ action_set->counters[i].rte_id_valid = B_FALSE;
+ action_set->counters[i].mae_id.id =
+ EFX_MAE_RSRC_ID_INVALID;
+
+ action_set->counters[i].ft_group_hit_counter =
+ ft_group_hit_counter;
+ action_set->counters[i].ft = ft;
+ }
+
for (action = actions, i = 0;
action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
++action) {
conf = action->conf;
- action_set->counters[i].mae_id.id =
- EFX_MAE_RSRC_ID_INVALID;
+ action_set->counters[i].rte_id_valid = B_TRUE;
action_set->counters[i].rte_id = conf->id;
i++;
}
{
struct sfc_mae_encap_header *encap_header = NULL;
struct sfc_mae_actions_bundle bundle = {0};
+ struct sfc_flow_tunnel *counter_ft = NULL;
+ uint64_t *ft_group_hit_counter = NULL;
const struct rte_flow_action *action;
struct sfc_mae *mae = &sa->mae;
+ unsigned int n_count = 0;
efx_mae_actions_t *spec;
- unsigned int n_count;
int rc;
rte_errno = 0;
if (rc != 0)
goto fail_action_set_spec_init;
+ for (action = actions;
+ action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
+ ++n_count;
+ }
+
if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
/* JUMP rules don't decapsulate packets. GROUP rules do. */
rc = efx_mae_action_set_populate_decap(spec);
if (rc != 0)
goto fail_enforce_ft_decap;
+
+ if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
+ /*
+ * The user opted not to use action COUNT in this rule,
+ * but the counter should be enabled implicitly because
+ * packets hitting this rule contribute to the tunnel's
+ * total number of hits. See sfc_mae_counter_get().
+ */
+ rc = efx_mae_action_set_populate_count(spec);
+ if (rc != 0)
+ goto fail_enforce_ft_count;
+
+ n_count = 1;
+ }
}
/* Cleanup after previous encap. header bounce buffer usage. */
if (rc != 0)
goto fail_process_encap_header;
- n_count = efx_mae_action_set_get_nb_count(spec);
if (n_count > 1) {
rc = ENOTSUP;
sfc_err(sa, "too many count actions requested: %u", n_count);
rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
if (rc != 0)
goto fail_workaround_jump_delivery;
+
+ counter_ft = spec_mae->ft;
break;
case SFC_FT_RULE_GROUP:
/*
* MARK above, so don't check the return value here.
*/
(void)efx_mae_action_set_populate_mark(spec, 0);
+
+ ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
break;
default:
SFC_ASSERT(B_FALSE);
return 0;
}
- rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
+ rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
+ ft_group_hit_counter, counter_ft, n_count,
&spec_mae->action_set);
if (rc != 0)
goto fail_action_set_add;
fail_rule_parse_action:
efx_mae_action_set_spec_fini(sa->nic, spec);
+fail_enforce_ft_count:
fail_enforce_ft_decap:
fail_action_set_spec_init:
if (rc > 0 && rte_errno == 0) {
goto fail_outer_rule_enable;
}
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ spec_mae->ft->reset_jump_hit_counter =
+ spec_mae->ft->group_hit_counter;
+ }
+
if (action_set == NULL) {
sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
return 0;
for (i = 0; i < action_set->n_counters; i++) {
/*
* Get the first available counter of the flow rule if
- * counter ID is not specified.
+ * counter ID is not specified, provided that this
+ * counter is not an automatic (implicit) one.
*/
if (conf != NULL && action_set->counters[i].rte_id != conf->id)
continue;
return rte_flow_error_set(error, ENOENT,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "No such flow rule action count ID");
+ "no such flow rule action or such count ID");
}
int
&p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
p->generation_count = generation_count;
+ p->ft_group_hit_counter = counterp->ft_group_hit_counter;
+
/*
* The flag is set at the very end of add operation and reset
* at the beginning of delete operation. Release ordering is
__atomic_store(&p->value.pkts_bytes,
&cnt_val.pkts_bytes, __ATOMIC_RELAXED);
+ if (p->ft_group_hit_counter != NULL) {
+ uint64_t ft_group_hit_counter;
+
+ ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
+ __atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
+ __ATOMIC_RELAXED);
+ }
+
sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
pkts, cnt_val.pkts, bytes, cnt_val.bytes);
const struct sfc_mae_counter_id *counter,
struct rte_flow_query_count *data)
{
+ struct sfc_flow_tunnel *ft = counter->ft;
+ uint64_t non_reset_jump_hit_counter;
struct sfc_mae_counter *p;
union sfc_pkts_bytes value;
__ATOMIC_RELAXED);
data->hits_set = 1;
- data->bytes_set = 1;
data->hits = value.pkts - p->reset.pkts;
- data->bytes = value.bytes - p->reset.bytes;
+
+ if (ft != NULL) {
+ data->hits += ft->group_hit_counter;
+ non_reset_jump_hit_counter = data->hits;
+ data->hits -= ft->reset_jump_hit_counter;
+ } else {
+ data->bytes_set = 1;
+ data->bytes = value.bytes - p->reset.bytes;
+ }
if (data->reset != 0) {
- p->reset.pkts = value.pkts;
- p->reset.bytes = value.bytes;
+ if (ft != NULL) {
+ ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
+ } else {
+ p->reset.pkts = value.pkts;
+ p->reset.bytes = value.bytes;
+ }
}
return 0;
}
+
+bool
+sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
+{
+ if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
+ sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
+ return B_FALSE;
+ else
+ return B_TRUE;
+}