sfc_flow_cleanup_cb_t *cleanup;
sfc_flow_insert_cb_t *insert;
sfc_flow_remove_cb_t *remove;
+ sfc_flow_query_cb_t *query;
};
static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
.cleanup = NULL,
.insert = sfc_flow_filter_insert,
.remove = sfc_flow_filter_remove,
+ .query = NULL,
};
static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
.cleanup = sfc_mae_flow_cleanup,
.insert = sfc_mae_flow_insert,
.remove = sfc_mae_flow_remove,
+ .query = sfc_mae_flow_query,
};
static const struct sfc_flow_ops_by_spec *
return -ret;
}
+static int
+sfc_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ const struct sfc_flow_ops_by_spec *ops;
+ int ret;
+
+ sfc_adapter_lock(sa);
+
+ ops = sfc_flow_get_ops_by_spec(flow);
+ if (ops == NULL || ops->query == NULL) {
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "No backend to handle this flow");
+ goto fail_no_backend;
+ }
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Can't query the flow: the adapter is not started");
+ goto fail_not_started;
+ }
+
+ ret = ops->query(dev, flow, action, data, error);
+ if (ret != 0)
+ goto fail_query;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_query:
+fail_not_started:
+fail_no_backend:
+ sfc_adapter_unlock(sa);
+ return ret;
+}
+
static int
sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
struct rte_flow_error *error)
.create = sfc_flow_create,
.destroy = sfc_flow_destroy,
.flush = sfc_flow_flush,
- .query = NULL,
+ .query = sfc_flow_query,
.isolate = sfc_flow_isolate,
};
typedef int (sfc_flow_remove_cb_t)(struct sfc_adapter *sa,
struct rte_flow *flow);
+typedef int (sfc_flow_query_cb_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *data,
+ struct rte_flow_error *error);
+
#ifdef __cplusplus
}
#endif
return 0;
}
+
+static int
+sfc_mae_query_counter(struct sfc_adapter *sa,
+ struct sfc_flow_spec_mae *spec,
+ const struct rte_flow_action *action,
+ struct rte_flow_query_count *data,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_action_set *action_set = spec->action_set;
+ const struct rte_flow_action_count *conf = action->conf;
+ unsigned int i;
+ int rc;
+
+ if (action_set->n_counters == 0) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Queried flow rule does not have count actions");
+ }
+
+ for (i = 0; i < action_set->n_counters; i++) {
+ /*
+ * Get the first available counter of the flow rule if
+ * counter ID is not specified.
+ */
+ if (conf != NULL && action_set->counters[i].rte_id != conf->id)
+ continue;
+
+ rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
+ &action_set->counters[i], data);
+ if (rc != 0) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Queried flow rule counter action is invalid");
+ }
+
+ return 0;
+ }
+
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "No such flow rule action count ID");
+}
+
+int
+sfc_mae_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_mae *spec_mae = &spec->mae;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ return sfc_mae_query_counter(sa, spec_mae, action,
+ data, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Query for action of this type is not supported");
+ }
+}
sfc_flow_verify_cb_t sfc_mae_flow_verify;
sfc_flow_insert_cb_t sfc_mae_flow_insert;
sfc_flow_remove_cb_t sfc_mae_flow_remove;
+sfc_flow_query_cb_t sfc_mae_flow_query;
#ifdef __cplusplus
}
return rc;
}
+
+int
+sfc_mae_counter_get(struct sfc_mae_counters *counters,
+ const struct sfc_mae_counter_id *counter,
+ struct rte_flow_query_count *data)
+{
+ struct sfc_mae_counter *p;
+ union sfc_pkts_bytes value;
+
+ SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
+ p = &counters->mae_counters[counter->mae_id.id];
+
+ /*
+ * Ordering is relaxed since it is the only operation on counter value.
+ * And it does not depend on different stores/loads in other threads.
+ * Paired with relaxed ordering in counter increment.
+ */
+ value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
+ __ATOMIC_RELAXED);
+
+ data->hits_set = 1;
+ data->bytes_set = 1;
+ data->hits = value.pkts - p->reset.pkts;
+ data->bytes = value.bytes - p->reset.bytes;
+
+ if (data->reset != 0) {
+ p->reset.pkts = value.pkts;
+ p->reset.bytes = value.bytes;
+ }
+
+ return 0;
+}
struct sfc_mae_counter_id *counterp);
int sfc_mae_counter_disable(struct sfc_adapter *sa,
struct sfc_mae_counter_id *counter);
+int sfc_mae_counter_get(struct sfc_mae_counters *counters,
+ const struct sfc_mae_counter_id *counter,
+ struct rte_flow_query_count *data);
int sfc_mae_counter_start(struct sfc_adapter *sa);
void sfc_mae_counter_stop(struct sfc_adapter *sa);