net/ngbe: support MAC filters
[dpdk.git] / drivers / net / dpaa2 / dpaa2_flow.c
index cff01c4..84fe37a 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2020 NXP
+ * Copyright 2018-2021 NXP
  */
 
 #include <sys/queue.h>
 int mc_l4_port_identification;
 
 static char *dpaa2_flow_control_log;
+static uint16_t dpaa2_flow_miss_flow_id =
+       DPNI_FS_MISS_DROP;
 
-#define FIXED_ENTRY_SIZE 54
+#define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
 
 enum flow_rule_ipaddr_type {
        FLOW_NONE_IPADDR,
@@ -81,14 +83,21 @@ static const
 enum rte_flow_action_type dpaa2_supported_action_type[] = {
        RTE_FLOW_ACTION_TYPE_END,
        RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_PHY_PORT,
+       RTE_FLOW_ACTION_TYPE_PORT_ID,
        RTE_FLOW_ACTION_TYPE_RSS
 };
 
+static const
+enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
+       RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_PHY_PORT,
+       RTE_FLOW_ACTION_TYPE_PORT_ID
+};
+
 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
 
-enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
-
 #ifndef __cplusplus
 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
        .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@@ -491,6 +500,42 @@ static int dpaa2_flow_extract_add(
        return 0;
 }
 
+static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
+                                     int size)
+{
+       struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
+       struct dpaa2_key_info *key_info = &key_extract->key_info;
+       int last_extract_size, index;
+
+       if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
+           DPKG_EXTRACT_FROM_DATA) {
+               DPAA2_PMD_WARN("RAW extract cannot be combined with others");
+               return -1;
+       }
+
+       last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
+       dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
+       if (last_extract_size)
+               dpkg->num_extracts++;
+       else
+               last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
+
+       for (index = 0; index < dpkg->num_extracts; index++) {
+               dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
+               if (index == dpkg->num_extracts - 1)
+                       dpkg->extracts[index].extract.from_data.size =
+                               last_extract_size;
+               else
+                       dpkg->extracts[index].extract.from_data.size =
+                               DPAA2_FLOW_MAX_KEY_SIZE;
+               dpkg->extracts[index].extract.from_data.offset =
+                       DPAA2_FLOW_MAX_KEY_SIZE * index;
+       }
+
+       key_info->key_total_size = size;
+       return 0;
+}
+
 /* Protocol discrimination.
  * Discriminate IPv4/IPv6/vLan by Eth type.
  * Discriminate UDP/TCP/ICMP by next proto of IP.
@@ -672,6 +717,18 @@ dpaa2_flow_rule_data_set(
        return 0;
 }
 
+static inline int
+dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
+                            const void *key, const void *mask, int size)
+{
+       int offset = 0;
+
+       memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
+       memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
+
+       return 0;
+}
+
 static inline int
 _dpaa2_flow_rule_move_ipaddr_tail(
        struct dpaa2_key_extract *key_extract,
@@ -2812,6 +2869,96 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
        return 0;
 }
 
+static int
+dpaa2_configure_flow_raw(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item *pattern,
+                        const struct rte_flow_action actions[] __rte_unused,
+                        struct rte_flow_error *error __rte_unused,
+                        int *device_configured)
+{
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item_raw *spec = pattern->spec;
+       const struct rte_flow_item_raw *mask = pattern->mask;
+       int prev_key_size =
+               priv->extract.qos_key_extract.key_info.key_total_size;
+       int local_cfg = 0, ret;
+       uint32_t group;
+
+       /* Need both spec and mask */
+       if (!spec || !mask) {
+               DPAA2_PMD_ERR("spec or mask not present.");
+               return -EINVAL;
+       }
+       /* Only supports non-relative with offset 0 */
+       if (spec->relative || spec->offset != 0 ||
+           spec->search || spec->limit) {
+               DPAA2_PMD_ERR("relative and non zero offset not supported.");
+               return -EINVAL;
+       }
+       /* Spec len and mask len should be same */
+       if (spec->length != mask->length) {
+               DPAA2_PMD_ERR("Spec len and mask len mismatch.");
+               return -EINVAL;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       group = attr->group;
+       flow->tc_id = group;
+       flow->tc_index = attr->priority;
+
+       if (prev_key_size <= spec->length) {
+               ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
+                                                spec->length);
+               if (ret) {
+                       DPAA2_PMD_ERR("QoS Extract RAW add failed.");
+                       return -1;
+               }
+               local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+
+               ret = dpaa2_flow_extract_add_raw(
+                                       &priv->extract.tc_key_extract[group],
+                                       spec->length);
+               if (ret) {
+                       DPAA2_PMD_ERR("FS Extract RAW add failed.");
+                       return -1;
+               }
+               local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
+                                          mask->pattern, spec->length);
+       if (ret) {
+               DPAA2_PMD_ERR("QoS RAW rule data set failed");
+               return -1;
+       }
+
+       ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
+                                          mask->pattern, spec->length);
+       if (ret) {
+               DPAA2_PMD_ERR("FS RAW rule data set failed");
+               return -1;
+       }
+
+       (*device_configured) |= local_cfg;
+
+       return 0;
+}
+
+static inline int
+dpaa2_fs_action_supported(enum rte_flow_action_type action)
+{
+       int i;
+
+       for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
+                                       sizeof(enum rte_flow_action_type)); i++) {
+               if (action == dpaa2_supported_fs_action_type[i])
+                       return 1;
+       }
+
+       return 0;
+}
 /* The existing QoS/FS entry with IP address(es)
  * needs update after
  * new extract(s) are inserted before IP
@@ -2990,7 +3137,7 @@ dpaa2_flow_entry_update(
                        }
                }
 
-               if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
+               if (!dpaa2_fs_action_supported(curr->action)) {
                        curr = LIST_NEXT(curr, next);
                        continue;
                }
@@ -3128,6 +3275,43 @@ dpaa2_flow_verify_attr(
        return 0;
 }
 
+static inline struct rte_eth_dev *
+dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
+       const struct rte_flow_action *action)
+{
+       const struct rte_flow_action_phy_port *phy_port;
+       const struct rte_flow_action_port_id *port_id;
+       int idx = -1;
+       struct rte_eth_dev *dest_dev;
+
+       if (action->type == RTE_FLOW_ACTION_TYPE_PHY_PORT) {
+               phy_port = (const struct rte_flow_action_phy_port *)
+                                       action->conf;
+               if (!phy_port->original)
+                       idx = phy_port->index;
+       } else if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
+               port_id = (const struct rte_flow_action_port_id *)
+                                       action->conf;
+               if (!port_id->original)
+                       idx = port_id->id;
+       } else {
+               return NULL;
+       }
+
+       if (idx >= 0) {
+               if (!rte_eth_dev_is_valid_port(idx))
+                       return NULL;
+               dest_dev = &rte_eth_devices[idx];
+       } else {
+               dest_dev = priv->eth_dev;
+       }
+
+       if (!dpaa2_dev_is_dpaa2(dest_dev))
+               return NULL;
+
+       return dest_dev;
+}
+
 static inline int
 dpaa2_flow_verify_action(
        struct dpaa2_dev_priv *priv,
@@ -3153,6 +3337,13 @@ dpaa2_flow_verify_action(
                                return -1;
                        }
                        break;
+               case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+               case RTE_FLOW_ACTION_TYPE_PORT_ID:
+                       if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
+                               DPAA2_PMD_ERR("Invalid port id of action");
+                               return -ENOTSUP;
+                       }
+                       break;
                case RTE_FLOW_ACTION_TYPE_RSS:
                        rss_conf = (const struct rte_flow_action_rss *)
                                        (actions[j].conf);
@@ -3201,15 +3392,17 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
        const struct rte_flow_action_rss *rss_conf;
        int is_keycfg_configured = 0, end_of_list = 0;
        int ret = 0, i = 0, j = 0;
-       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpni_rx_dist_cfg tc_cfg;
        struct dpni_qos_tbl_cfg qos_cfg;
        struct dpni_fs_action_cfg action;
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
-       struct dpaa2_queue *rxq;
+       struct dpaa2_queue *dest_q;
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
        size_t param;
        struct rte_flow *curr = LIST_FIRST(&priv->flows);
        uint16_t qos_index;
+       struct rte_eth_dev *dest_dev;
+       struct dpaa2_dev_priv *dest_priv;
 
        ret = dpaa2_flow_verify_attr(priv, attr);
        if (ret)
@@ -3295,6 +3488,16 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                return ret;
                        }
                        break;
+               case RTE_FLOW_ITEM_TYPE_RAW:
+                       ret = dpaa2_configure_flow_raw(flow,
+                                                      dev, attr, &pattern[i],
+                                                      actions, error,
+                                                      &is_keycfg_configured);
+                       if (ret) {
+                               DPAA2_PMD_ERR("RAW flow configuration failed!");
+                               return ret;
+                       }
+                       break;
                case RTE_FLOW_ITEM_TYPE_END:
                        end_of_list = 1;
                        break; /*End of List*/
@@ -3311,12 +3514,31 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
        while (!end_of_list) {
                switch (actions[j].type) {
                case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       dest_queue =
-                               (const struct rte_flow_action_queue *)(actions[j].conf);
-                       rxq = priv->rx_vq[dest_queue->index];
-                       flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
+               case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+               case RTE_FLOW_ACTION_TYPE_PORT_ID:
                        memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
-                       action.flow_id = rxq->flow_id;
+                       flow->action = actions[j].type;
+
+                       if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+                               dest_queue = (const struct rte_flow_action_queue *)
+                                                               (actions[j].conf);
+                               dest_q = priv->rx_vq[dest_queue->index];
+                               action.flow_id = dest_q->flow_id;
+                       } else {
+                               dest_dev = dpaa2_flow_redirect_dev(priv,
+                                                                  &actions[j]);
+                               if (!dest_dev) {
+                                       DPAA2_PMD_ERR("Invalid destination device to redirect!");
+                                       return -1;
+                               }
+
+                               dest_priv = dest_dev->data->dev_private;
+                               dest_q = dest_priv->tx_vq[0];
+                               action.options =
+                                               DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
+                               action.redirect_obj_token = dest_priv->token;
+                               action.flow_id = dest_q->flow_id;
+                       }
 
                        /* Configure FS table first*/
                        if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
@@ -3330,20 +3552,29 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                        return -1;
                                }
 
-                               memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                               memset(&tc_cfg, 0,
+                                       sizeof(struct dpni_rx_dist_cfg));
                                tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
-                               tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
                                tc_cfg.key_cfg_iova =
                                        (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
-                               tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
-                               tc_cfg.fs_cfg.keep_entries = true;
-                               ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
-                                                        priv->token,
-                                                        flow->tc_id, &tc_cfg);
+                               tc_cfg.tc = flow->tc_id;
+                               tc_cfg.enable = false;
+                               ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
+                                               priv->token, &tc_cfg);
                                if (ret < 0) {
                                        DPAA2_PMD_ERR(
-                                       "Distribution cannot be configured.(%d)"
-                                       , ret);
+                                               "TC hash cannot be disabled.(%d)",
+                                               ret);
+                                       return -1;
+                               }
+                               tc_cfg.enable = true;
+                               tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
+                               ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
+                                                        priv->token, &tc_cfg);
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                               "TC distribution cannot be configured.(%d)",
+                                               ret);
                                        return -1;
                                }
                        }
@@ -3508,18 +3739,16 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                return -1;
                        }
 
-                       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                       memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
                        tc_cfg.dist_size = rss_conf->queue_num;
-                       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
                        tc_cfg.key_cfg_iova = (size_t)param;
-                       tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
-
-                       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
-                                                priv->token, flow->tc_id,
-                                                &tc_cfg);
+                       tc_cfg.enable = true;
+                       tc_cfg.tc = flow->tc_id;
+                       ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
+                                                priv->token, &tc_cfg);
                        if (ret < 0) {
                                DPAA2_PMD_ERR(
-                                       "RSS FS table cannot be configured: %d\n",
+                                       "RSS TC table cannot be configured: %d\n",
                                        ret);
                                rte_free((void *)param);
                                return -1;
@@ -3544,7 +3773,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                                         priv->token, &qos_cfg);
                                if (ret < 0) {
                                        DPAA2_PMD_ERR(
-                                       "Distribution can't be configured %d\n",
+                                       "RSS QoS dist can't be configured-%d\n",
                                        ret);
                                        return -1;
                                }
@@ -3761,6 +3990,20 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
        dpaa2_flow_control_log =
                getenv("DPAA2_FLOW_CONTROL_LOG");
 
+       if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
+               struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+               dpaa2_flow_miss_flow_id =
+                       atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
+               if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
+                       DPAA2_PMD_ERR(
+                               "The missed flow ID %d exceeds the max flow ID %d",
+                               dpaa2_flow_miss_flow_id,
+                               priv->dist_queues - 1);
+                       return NULL;
+               }
+       }
+
        flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
        if (!flow) {
                DPAA2_PMD_ERR("Failure to allocate memory for flow");
@@ -3810,24 +4053,15 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
        flow->ipaddr_rule.fs_ipdst_offset =
                IP_ADDRESS_OFFSET_INVALID;
 
-       switch (dpaa2_filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
-                                            actions, error);
-               if (ret < 0) {
-                       if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
-                               rte_flow_error_set(error, EPERM,
-                                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                               attr, "unknown");
-                       DPAA2_PMD_ERR(
-                       "Failure to create flow, return code (%d)", ret);
-                       goto creation_error;
-               }
-               break;
-       default:
-               DPAA2_PMD_ERR("Filter type (%d) not supported",
-               dpaa2_filter_type);
-               break;
+       ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
+                       actions, error);
+       if (ret < 0) {
+               if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
+                       rte_flow_error_set(error, EPERM,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       attr, "unknown");
+               DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
+               goto creation_error;
        }
 
        return flow;
@@ -3854,6 +4088,8 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev,
 
        switch (flow->action) {
        case RTE_FLOW_ACTION_TYPE_QUEUE:
+       case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+       case RTE_FLOW_ACTION_TYPE_PORT_ID:
                if (priv->num_rx_tc > 1) {
                        /* Remove entry from QoS table first */
                        ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,