net/txgbe: support VF MTU update
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
index 7d50861..57a4f2e 100644 (file)
@@ -129,6 +129,9 @@ const struct rte_flow_action *next_no_void_action(
  * END
  * other members in mask and spec should set to 0x00.
  * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
  */
 static int
 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -177,6 +180,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
        memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
 
+#ifdef RTE_LIB_SECURITY
+       /**
+        *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+        */
+       act = next_no_void_action(actions, NULL);
+       if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+               const void *conf = act->conf;
+               /* check if the next not void item is END */
+               act = next_no_void_action(actions, act);
+               if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               act, "Not supported action.");
+                       return -rte_errno;
+               }
+
+               /* get the IP pattern*/
+               item = next_no_void_pattern(pattern, NULL);
+               while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+                               item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+                       if (item->last ||
+                                       item->type == RTE_FLOW_ITEM_TYPE_END) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "IP pattern missing.");
+                               return -rte_errno;
+                       }
+                       item = next_no_void_pattern(pattern, item);
+               }
+
+               filter->proto = IPPROTO_ESP;
+               return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+                                       item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+       }
+#endif
+
        /* the first not void item can be MAC or IPv4 */
        item = next_no_void_pattern(pattern, NULL);
 
@@ -547,6 +587,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
        if (ret)
                return ret;
 
+#ifdef RTE_LIB_SECURITY
+       /* ESP flow not really a flow */
+       if (filter->proto == IPPROTO_ESP)
+               return 0;
+#endif
+
        /* txgbe doesn't support tcp flags */
        if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
                memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -2537,6 +2583,16 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->rss_info.conf.queue_num)
+               txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
 void
 txgbe_filterlist_init(void)
 {
@@ -2549,6 +2605,68 @@ txgbe_filterlist_init(void)
        TAILQ_INIT(&txgbe_flow_list);
 }
 
+void
+txgbe_filterlist_flush(void)
+{
+       struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+       struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+       struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+       struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+       struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+       struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+       struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+       while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+               TAILQ_REMOVE(&filter_ntuple_list,
+                                ntuple_filter_ptr,
+                                entries);
+               rte_free(ntuple_filter_ptr);
+       }
+
+       while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+               TAILQ_REMOVE(&filter_ethertype_list,
+                                ethertype_filter_ptr,
+                                entries);
+               rte_free(ethertype_filter_ptr);
+       }
+
+       while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+               TAILQ_REMOVE(&filter_syn_list,
+                                syn_filter_ptr,
+                                entries);
+               rte_free(syn_filter_ptr);
+       }
+
+       while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+               TAILQ_REMOVE(&filter_l2_tunnel_list,
+                                l2_tn_filter_ptr,
+                                entries);
+               rte_free(l2_tn_filter_ptr);
+       }
+
+       while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+               TAILQ_REMOVE(&filter_fdir_list,
+                                fdir_rule_ptr,
+                                entries);
+               rte_free(fdir_rule_ptr);
+       }
+
+       while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+               TAILQ_REMOVE(&filter_rss_list,
+                                rss_filter_ptr,
+                                entries);
+               rte_free(rss_filter_ptr);
+       }
+
+       while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
+               TAILQ_REMOVE(&txgbe_flow_list,
+                                txgbe_flow_mem_ptr,
+                                entries);
+               rte_free(txgbe_flow_mem_ptr->flow);
+               rte_free(txgbe_flow_mem_ptr);
+       }
+}
+
 /**
  * Create or destroy a flow rule.
  * Theorically one rule can match more than one filters.
@@ -2600,6 +2718,12 @@ txgbe_flow_create(struct rte_eth_dev *dev,
        ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
                        actions, &ntuple_filter, error);
 
+#ifdef RTE_LIB_SECURITY
+       /* ESP flow not really a flow*/
+       if (ntuple_filter.proto == IPPROTO_ESP)
+               return flow;
+#endif
+
        if (!ret) {
                ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
                if (!ret) {
@@ -2996,7 +3120,29 @@ txgbe_flow_flush(struct rte_eth_dev *dev,
 {
        int ret = 0;
 
-       return ret;
+       txgbe_clear_all_ntuple_filter(dev);
+       txgbe_clear_all_ethertype_filter(dev);
+       txgbe_clear_syn_filter(dev);
+
+       ret = txgbe_clear_all_fdir_filter(dev);
+       if (ret < 0) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                       NULL, "Failed to flush rule");
+               return ret;
+       }
+
+       ret = txgbe_clear_all_l2_tn_filter(dev);
+       if (ret < 0) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                       NULL, "Failed to flush rule");
+               return ret;
+       }
+
+       txgbe_clear_rss_filter(dev);
+
+       txgbe_filterlist_flush();
+
+       return 0;
 }
 
 const struct rte_flow_ops txgbe_flow_ops = {