+ TAILQ_FOREACH(flow, list, next) {
+ unsigned int i;
+
+ if (flow->drop) {
+ flow->drxq.ibv_flow =
+ ibv_create_flow(priv->flow_drop_queue->qp,
+ flow->drxq.ibv_attr);
+ if (!flow->drxq.ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ /* Next flow. */
+ continue;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_attr)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq[i].hrxq)
+ goto flow_create;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ }
+ if (!flow->mark)
+ continue;
+ for (i = 0; i != flow->queues_n; ++i)
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
+ }
+ return 0;
+}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ unsigned int i;
+ union {
+ struct rte_flow_action_rss rss;
+ struct {
+ const struct rte_eth_rss_conf *rss_conf;
+ uint16_t num;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ } local;
+ } action_rss;
+
+ if (!priv->reta_idx_n)
+ return EINVAL;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ action_rss.local.queue[i] = (*priv->reta_idx)[i];
+ action_rss.local.rss_conf = &priv->rss_conf;
+ action_rss.local.num = priv->reta_idx_n;
+ actions[0].conf = (const void *)&action_rss.rss;
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+ &error);
+ if (!flow)
+ return rte_errno;
+ return 0;
+}
+
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, &priv->flows, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv, &priv->flows);
+ priv_unlock(priv);
+ return 0;
+}
+
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+/**
+ * Query flow counter.
+ *
+ * @param cs
+ * the counter set.
+ * @param counter_value
+ * returned data from the counter.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_query_count(struct ibv_counter_set *cs,
+ struct mlx5_flow_counter_stats *counter_stats,
+ struct rte_flow_query_count *query_count,
+ struct rte_flow_error *error)
+{
+ uint64_t counters[2];
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+
+ if (res) {
+ rte_flow_error_set(error, -res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ return -res;
+ }
+ query_count->hits_set = 1;
+ query_count->bytes_set = 1;
+ query_count->hits = counters[0] - counter_stats->hits;
+ query_count->bytes = counters[1] - counter_stats->bytes;
+ if (query_count->reset) {
+ counter_stats->hits = counters[0];
+ counter_stats->bytes = counters[1];
+ }
+ return 0;
+}
+
+/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action __rte_unused,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int res = EINVAL;
+
+ priv_lock(priv);
+ if (flow->cs) {
+ res = priv_flow_query_count(flow->cs,
+ &flow->counter_stats,
+ (struct rte_flow_query_count *)data,
+ error);
+ } else {
+ rte_flow_error_set(error, res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no counter found for flow");
+ }
+ priv_unlock(priv);
+ return -res;
+}
+#endif
+
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ if (enable)
+ priv->dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ priv->dev->dev_ops = &mlx5_dev_ops;
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Convert a flow director filter to a generic flow.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ * @param attributes
+ * Generic flow parameters structure.
+ *
+ * @return
+ * 0 on success, errno value on error.
+ */
+static int
+priv_fdir_filter_convert(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct mlx5_fdir *attributes)
+{
+ const struct rte_eth_fdir_input *input = &fdir_filter->input;
+
+ /* Validate queue number. */
+ if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
+ ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
+ return EINVAL;
+ }
+ /* Validate the behavior. */
+ if (fdir_filter->action.behavior != RTE_ETH_FDIR_ACCEPT) {
+ ERROR("invalid behavior %d", fdir_filter->action.behavior);
+ return ENOTSUP;
+ }
+ attributes->attr.ingress = 1;
+ attributes->items[0] = (struct rte_flow_item) {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &attributes->l2,
+ };
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &attributes->queue,
+ };
+ attributes->queue.index = fdir_filter->action.rx_queue;
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.udp4_flow.ip.src_ip,
+ .dst_addr = input->flow.udp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.udp4_flow.ip.ttl,
+ .type_of_service = input->flow.udp4_flow.ip.tos,
+ .next_proto_id = input->flow.udp4_flow.ip.proto,
+ };
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.tcp4_flow.ip.src_ip,
+ .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.tcp4_flow.ip.ttl,
+ .type_of_service = input->flow.tcp4_flow.ip.tos,
+ .next_proto_id = input->flow.tcp4_flow.ip.proto,
+ };
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp4_flow.src_port,
+ .dst_port = input->flow.tcp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.udp6_flow.ip.hop_limits,
+ .proto = input->flow.udp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.udp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.udp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp6_flow.src_port,
+ .dst_port = input->flow.udp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
+ .proto = input->flow.tcp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.tcp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.tcp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp6_flow.src_port,
+ .dst_port = input->flow.tcp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ break;
+ default:
+ ERROR("invalid flow type%d",
+ fdir_filter->input.flow_type);
+ return ENOTSUP;
+ }
+ return 0;
+}
+
+/**
+ * Add new flow director filter and store it in list.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_add(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ };
+ struct mlx5_flow_parse parser = {
+ .layer = HASH_RXQ_ETH,
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ int ret;
+
+ ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);