doc: announce dpaa-specific API parameter change
[dpdk.git] / drivers / net / i40e / i40e_flow.c
index 7cd5373..51d8fdd 100644 (file)
@@ -17,6 +17,7 @@
 #include <rte_malloc.h>
 #include <rte_tailq.h>
 #include <rte_flow_driver.h>
+#include <rte_bitmap.h>
 
 #include "i40e_logs.h"
 #include "base/i40e_type.h"
@@ -144,6 +145,8 @@ const struct rte_flow_ops i40e_flow_ops = {
 
 static union i40e_filter_t cons_filter;
 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+/* internal pattern w/o VOID items */
+struct rte_flow_item g_items[32];
 
 /* Pattern matched ethertype filter */
 static enum rte_flow_item_type pattern_ethertype[] = {
@@ -2044,9 +2047,6 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
        const struct rte_flow_item_eth *eth_spec;
        const struct rte_flow_item_eth *eth_mask;
        enum rte_flow_item_type item_type;
-       uint16_t outer_tpid;
-
-       outer_tpid = i40e_get_outer_vlan(dev);
 
        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
                if (item->last) {
@@ -2106,7 +2106,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
                        if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
                            filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
                            filter->ether_type == RTE_ETHER_TYPE_LLDP ||
-                           filter->ether_type == outer_tpid) {
+                           filter->ether_type == i40e_get_outer_vlan(dev)) {
                                rte_flow_error_set(error, EINVAL,
                                                   RTE_FLOW_ERROR_TYPE_ITEM,
                                                   item,
@@ -2608,7 +2608,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
        uint16_t flex_size;
        bool cfg_flex_pit = true;
        bool cfg_flex_msk = true;
-       uint16_t outer_tpid;
        uint16_t ether_type;
        uint32_t vtc_flow_cpu;
        bool outer_ip = true;
@@ -2617,7 +2616,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
        memset(off_arr, 0, sizeof(off_arr));
        memset(len_arr, 0, sizeof(len_arr));
        memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
-       outer_tpid = i40e_get_outer_vlan(dev);
        filter->input.flow_ext.customized_pctype = false;
        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
                if (item->last) {
@@ -2685,7 +2683,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
                                    ether_type == RTE_ETHER_TYPE_IPV4 ||
                                    ether_type == RTE_ETHER_TYPE_IPV6 ||
-                                   ether_type == outer_tpid) {
+                                   ether_type == i40e_get_outer_vlan(dev)) {
                                        rte_flow_error_set(error, EINVAL,
                                                     RTE_FLOW_ERROR_TYPE_ITEM,
                                                     item,
@@ -2729,7 +2727,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
                                if (ether_type == RTE_ETHER_TYPE_IPV4 ||
                                    ether_type == RTE_ETHER_TYPE_IPV6 ||
-                                   ether_type == outer_tpid) {
+                                   ether_type == i40e_get_outer_vlan(dev)) {
                                        rte_flow_error_set(error, EINVAL,
                                                     RTE_FLOW_ERROR_TYPE_ITEM,
                                                     item,
@@ -4853,11 +4851,12 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
        const struct rte_flow_action *act;
        const struct rte_flow_action_rss *rss;
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_queue_regions *info = &pf->queue_region;
        struct i40e_rte_flow_rss_conf *rss_config =
                        &filter->rss_conf;
        struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
-       uint16_t i, j, n, tmp, nb_types;
+       uint16_t i, j, n, m, tmp, nb_types;
        uint32_t index = 0;
        uint64_t hf_bit = 1;
 
@@ -4889,6 +4888,24 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
                        I40E_FILTER_PCTYPE_L2_PAYLOAD},
        };
 
+       static const struct {
+               uint64_t rss_type;
+               enum i40e_filter_pctype pctype;
+       } pctype_match_table_x722[] = {
+               {ETH_RSS_NONFRAG_IPV4_TCP,
+                       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK},
+               {ETH_RSS_NONFRAG_IPV4_UDP,
+                       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP},
+               {ETH_RSS_NONFRAG_IPV4_UDP,
+                       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP},
+               {ETH_RSS_NONFRAG_IPV6_TCP,
+                       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK},
+               {ETH_RSS_NONFRAG_IPV6_UDP,
+                       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP},
+               {ETH_RSS_NONFRAG_IPV6_UDP,
+                       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP},
+       };
+
        NEXT_ITEM_OF_ACTION(act, actions, index);
        rss = act->conf;
 
@@ -4914,6 +4931,18 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
                                break;
                        }
                }
+
+               if (hw->mac.type == I40E_MAC_X722)
+                       for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) {
+                               if (rss->types &
+                                   pctype_match_table_x722[j].rss_type) {
+                                       m = conf_info->region[0].flowtype_num;
+                                       conf_info->region[0].hw_flowtype[m] =
+                                               pctype_match_table_x722[j].pctype;
+                                       conf_info->region[0].flowtype_num++;
+                                       conf_info->queue_region_number = 1;
+                               }
+                       }
        }
 
        /**
@@ -5011,9 +5040,9 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
                                        info->region[i].user_priority_num++;
                                }
 
-                               j = info->region[i].flowtype_num;
-                               tmp = conf_info->region[n].hw_flowtype[0];
-                               if (conf_info->region[n].flowtype_num) {
+                               for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
+                                       j = info->region[i].flowtype_num;
+                                       tmp = conf_info->region[n].hw_flowtype[m];
                                        info->region[i].hw_flowtype[j] = tmp;
                                        info->region[i].flowtype_num++;
                                }
@@ -5026,9 +5055,9 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
                                        info->region[i].user_priority_num++;
                                }
 
-                               j = info->region[i].flowtype_num;
-                               tmp = conf_info->region[n].hw_flowtype[0];
-                               if (conf_info->region[n].flowtype_num) {
+                               for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
+                                       j = info->region[i].flowtype_num;
+                                       tmp = conf_info->region[n].hw_flowtype[m];
                                        info->region[i].hw_flowtype[j] = tmp;
                                        info->region[i].flowtype_num++;
                                }
@@ -5263,7 +5292,6 @@ i40e_flow_validate(struct rte_eth_dev *dev,
                                   NULL, "NULL attribute.");
                return -rte_errno;
        }
-
        memset(&cons_filter, 0, sizeof(cons_filter));
 
        /* Get the non-void item of action */
@@ -5285,12 +5313,18 @@ i40e_flow_validate(struct rte_eth_dev *dev,
        }
        item_num++;
 
-       items = rte_zmalloc("i40e_pattern",
-                           item_num * sizeof(struct rte_flow_item), 0);
-       if (!items) {
-               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
-                                  NULL, "No memory for PMD internal items.");
-               return -ENOMEM;
+       if (item_num <= ARRAY_SIZE(g_items)) {
+               items = g_items;
+       } else {
+               items = rte_zmalloc("i40e_pattern",
+                                   item_num * sizeof(struct rte_flow_item), 0);
+               if (!items) {
+                       rte_flow_error_set(error, ENOMEM,
+                                       RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                                       NULL,
+                                       "No memory for PMD internal items.");
+                       return -ENOMEM;
+               }
        }
 
        i40e_pattern_skip_void_item(items, pattern);
@@ -5302,16 +5336,21 @@ i40e_flow_validate(struct rte_eth_dev *dev,
                        rte_flow_error_set(error, EINVAL,
                                           RTE_FLOW_ERROR_TYPE_ITEM,
                                           pattern, "Unsupported pattern");
-                       rte_free(items);
+
+                       if (items != g_items)
+                               rte_free(items);
                        return -rte_errno;
                }
+
                if (parse_filter)
                        ret = parse_filter(dev, attr, items, actions,
                                           error, &cons_filter);
+
                flag = true;
        } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
 
-       rte_free(items);
+       if (items != g_items)
+               rte_free(items);
 
        return ret;
 }
@@ -5324,21 +5363,33 @@ i40e_flow_create(struct rte_eth_dev *dev,
                 struct rte_flow_error *error)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct rte_flow *flow;
+       struct rte_flow *flow = NULL;
+       struct i40e_fdir_info *fdir_info = &pf->fdir;
        int ret;
 
-       flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
-       if (!flow) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                  "Failed to allocate memory");
-               return flow;
-       }
-
        ret = i40e_flow_validate(dev, attr, pattern, actions, error);
        if (ret < 0)
                return NULL;
 
+       if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
+               flow = i40e_fdir_entry_pool_get(fdir_info);
+               if (flow == NULL) {
+                       rte_flow_error_set(error, ENOBUFS,
+                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                          "Fdir space full");
+
+                       return flow;
+               }
+       } else {
+               flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+               if (!flow) {
+                       rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                          "Failed to allocate memory");
+                       return flow;
+               }
+       }
+
        switch (cons_filter_type) {
        case RTE_ETH_FILTER_ETHERTYPE:
                ret = i40e_ethertype_filter_set(pf,
@@ -5350,7 +5401,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
                break;
        case RTE_ETH_FILTER_FDIR:
                ret = i40e_flow_add_del_fdir_filter(dev,
-                                      &cons_filter.fdir_filter, 1);
+                              &cons_filter.fdir_filter, 1);
                if (ret)
                        goto free_flow;
                flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
@@ -5384,7 +5435,12 @@ free_flow:
        rte_flow_error_set(error, -ret,
                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                           "Failed to create flow.");
-       rte_free(flow);
+
+       if (cons_filter_type != RTE_ETH_FILTER_FDIR)
+               rte_free(flow);
+       else
+               i40e_fdir_entry_pool_put(fdir_info, flow);
+
        return NULL;
 }
 
@@ -5395,6 +5451,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        enum rte_filter_type filter_type = flow->filter_type;
+       struct i40e_fdir_info *fdir_info = &pf->fdir;
        int ret = 0;
 
        switch (filter_type) {
@@ -5408,7 +5465,8 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
                break;
        case RTE_ETH_FILTER_FDIR:
                ret = i40e_flow_add_del_fdir_filter(dev,
-                      &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+                               &((struct i40e_fdir_filter *)flow->rule)->fdir,
+                               0);
 
                /* If the last flow is destroyed, disable fdir. */
                if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
@@ -5428,7 +5486,11 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 
        if (!ret) {
                TAILQ_REMOVE(&pf->flow_list, flow, node);
-               rte_free(flow);
+               if (filter_type == RTE_ETH_FILTER_FDIR)
+                       i40e_fdir_entry_pool_put(fdir_info, flow);
+               else
+                       rte_free(flow);
+
        } else
                rte_flow_error_set(error, -ret,
                                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -5582,6 +5644,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
        struct rte_flow *flow;
        void *temp;
        int ret;
+       uint32_t i = 0;
 
        ret = i40e_fdir_flush(dev);
        if (!ret) {
@@ -5597,10 +5660,24 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
                TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
                        if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
                                TAILQ_REMOVE(&pf->flow_list, flow, node);
-                               rte_free(flow);
                        }
                }
 
+               /* reset bitmap */
+               rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
+               for (i = 0; i < fdir_info->fdir_space_size; i++) {
+                       fdir_info->fdir_flow_pool.pool[i].idx = i;
+                       rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
+               }
+
+               fdir_info->fdir_actual_cnt = 0;
+               fdir_info->fdir_guarantee_free_space =
+                       fdir_info->fdir_guarantee_total_space;
+               memset(fdir_info->fdir_filter_array,
+                       0,
+                       sizeof(struct i40e_fdir_filter) *
+                       I40E_MAX_FDIR_FILTER_NUM);
+
                for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
                     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
                        pf->fdir.inset_flag[pctype] = 0;