net/igc: support flow API
authorAlvin Zhang <alvinx.zhang@intel.com>
Wed, 15 Apr 2020 08:48:10 +0000 (16:48 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:08 +0000 (13:57 +0200)
Below type of flows are supported:
ether-type filter, 2-tuple filter, SYN filter, RSS.
Update docs too.

Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
12 files changed:
doc/guides/nics/features/igc.ini
doc/guides/nics/igc.rst
drivers/net/igc/Makefile
drivers/net/igc/igc_ethdev.c
drivers/net/igc/igc_ethdev.h
drivers/net/igc/igc_filter.c [new file with mode: 0644]
drivers/net/igc/igc_filter.h [new file with mode: 0644]
drivers/net/igc/igc_flow.c [new file with mode: 0644]
drivers/net/igc/igc_flow.h [new file with mode: 0644]
drivers/net/igc/igc_txrx.c
drivers/net/igc/igc_txrx.h
drivers/net/igc/meson.build

index 5bc901f..09300eb 100644 (file)
@@ -32,6 +32,7 @@ RSS key update       = Y
 RSS reta update      = Y
 VLAN filter          = Y
 VLAN offload         = Y
+Flow API             = P
 Linux UIO            = Y
 Linux VFIO           = Y
 x86-64               = Y
index 389c780..4c5e626 100644 (file)
@@ -75,3 +75,47 @@ outer VLAN to 0x9100:
    testpmd> vlan set strip off 0
    testpmd> vlan set extend on 0
    testpmd> vlan set outer tpid 0x9100 0
+
+
+Flow Director
+~~~~~~~~~~~~~
+
+The Flow Director works in receive mode to identify specific flows or sets of flows and route
+them to specific queues.
+
+The Flow Director filters includes the following types:
+
+- ether-type filter
+- 2-tuple filter(destination L4 protocol and destination L4 port)
+- TCP SYN filter
+- RSS filter
+
+Start ``testpmd``:
+
+.. code-block:: console
+
+   ./testpmd -l 4-8 -- i --rxq=4 --txq=4 --pkt-filter-mode=perfect --disable-rss
+
+Add a rule to direct packet whose ``ether-type=0x801`` to queue 1:
+
+.. code-block:: console
+
+   testpmd> flow create 0 ingress pattern eth type is 0x801 / end actions queue index 1 / end
+
+Add a rule to direct packet whose ``ip-protocol=0x6(TCP), tcp_port=0x80`` to queue 1:
+
+.. code-block:: console
+
+   testpmd> flow create 0 ingress pattern eth / ipv4 proto is 6 / tcp dst is 0x80 / end actions queue index 1 / end
+
+Add a rule to direct packet whose ``ip-protocol=0x6(TCP), SYN flag is set`` to queue 1:
+
+.. code-block:: console
+
+   testpmd> flow validate 0 ingress pattern tcp flags spec 0x02 flags mask 0x02 / end actions queue index 1 / end
+
+Add a rule to enable ipv4-udp RSS:
+
+.. code-block:: console
+
+   testpmd> flow create 0 ingress pattern end actions rss types ipv4-udp end / end
index c162c51..d6d7959 100644 (file)
@@ -34,5 +34,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_phy.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_logs.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_txrx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
index af910b9..16d98c6 100644 (file)
@@ -15,6 +15,8 @@
 
 #include "igc_logs.h"
 #include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
 
 #define IGC_INTEL_VENDOR_ID            0x8086
 
@@ -299,6 +301,7 @@ static const struct eth_dev_ops eth_igc_ops = {
        .vlan_offload_set       = eth_igc_vlan_offload_set,
        .vlan_tpid_set          = eth_igc_vlan_tpid_set,
        .vlan_strip_queue_set   = eth_igc_vlan_strip_queue_set,
+       .filter_ctrl            = eth_igc_filter_ctrl,
 };
 
 /*
@@ -1181,6 +1184,9 @@ eth_igc_close(struct rte_eth_dev *dev)
        if (!adapter->stopped)
                eth_igc_stop(dev);
 
+       igc_flow_flush(dev, NULL);
+       igc_clear_all_filter(dev);
+
        igc_intr_other_disable(dev);
        do {
                int ret = rte_intr_callback_unregister(intr_handle,
@@ -1348,6 +1354,8 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                igc->rxq_stats_map[i] = -1;
        }
 
+       igc_flow_init(dev);
+       igc_clear_all_filter(dev);
        return 0;
 
 err_late:
index 4b84127..a09debf 100644 (file)
@@ -90,6 +90,19 @@ extern "C" {
        ETH_RSS_IPV6_TCP_EX        | \
        ETH_RSS_IPV6_UDP_EX)
 
+#define IGC_MAX_ETQF_FILTERS           3       /* etqf(3) is used for 1588 */
+#define IGC_ETQF_FILTER_1588           3
+#define IGC_ETQF_QUEUE_SHIFT           16
+#define IGC_ETQF_QUEUE_MASK            (7u << IGC_ETQF_QUEUE_SHIFT)
+
+#define IGC_MAX_NTUPLE_FILTERS         8
+#define IGC_NTUPLE_MAX_PRI             7
+
+#define IGC_SYN_FILTER_ENABLE          0x01    /* syn filter enable field */
+#define IGC_SYN_FILTER_QUEUE_SHIFT     1       /* syn filter queue field */
+#define IGC_SYN_FILTER_QUEUE   0x0000000E      /* syn filter queue field */
+#define IGC_RFCTL_SYNQFP       0x00080000      /* SYNQFP in RFCTL register */
+
 /* structure for interrupt relative data */
 struct igc_interrupt {
        uint32_t flags;
@@ -125,6 +138,79 @@ struct igc_vfta {
        uint32_t vfta[IGC_VFTA_SIZE];
 };
 
+/* ethertype filter structure */
+struct igc_ethertype_filter {
+       uint16_t ether_type;
+       uint16_t queue;
+};
+
+/* Structure of ntuple filter info. */
+struct igc_ntuple_info {
+       uint16_t dst_port;
+       uint8_t proto;          /* l4 protocol. */
+
+       /*
+        * the packet matched above 2tuple and contain any set bit will hit
+        * this filter.
+        */
+       uint8_t tcp_flags;
+
+       /*
+        * seven levels (001b-111b), 111b is highest, used when more than one
+        * filter matches.
+        */
+       uint8_t priority;
+       uint8_t dst_port_mask:1, /* if mask is 1b, do compare dst port. */
+               proto_mask:1;    /* if mask is 1b, do compare protocol. */
+};
+
+/* Structure of n-tuple filter */
+struct igc_ntuple_filter {
+       RTE_STD_C11
+       union {
+               uint64_t hash_val;
+               struct igc_ntuple_info tuple_info;
+       };
+
+       uint8_t queue;
+};
+
+/* Structure of TCP SYN filter */
+struct igc_syn_filter {
+       uint8_t queue;
+
+       uint8_t hig_pri:1,      /* 1 - higher priority than other filters, */
+                               /* 0 - lower priority. */
+               enable:1;       /* 1-enable; 0-disable */
+};
+
+/* Structure to store RTE flow RSS configure. */
+struct igc_rss_filter {
+       struct rte_flow_action_rss conf; /* RSS parameters. */
+       uint8_t key[IGC_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+       uint16_t queue[IGC_RSS_RDT_SIZD];/* Queues indices to use. */
+       uint8_t enable; /* 1-enabled, 0-disabled */
+};
+
+/* Feature filter types */
+enum igc_filter_type {
+       IGC_FILTER_TYPE_ETHERTYPE,
+       IGC_FILTER_TYPE_NTUPLE,
+       IGC_FILTER_TYPE_SYN,
+       IGC_FILTER_TYPE_HASH
+};
+
+/* Structure to store flow */
+struct rte_flow {
+       TAILQ_ENTRY(rte_flow) node;
+       enum igc_filter_type filter_type;
+       RTE_STD_C11
+       char filter[0];         /* filter data */
+};
+
+/* Flow list header */
+TAILQ_HEAD(igc_flow_list, rte_flow);
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -138,6 +224,12 @@ struct igc_adapter {
        struct igc_interrupt    intr;
        struct igc_vfta shadow_vfta;
        bool            stopped;
+
+       struct igc_ethertype_filter ethertype_filters[IGC_MAX_ETQF_FILTERS];
+       struct igc_ntuple_filter ntuple_filters[IGC_MAX_NTUPLE_FILTERS];
+       struct igc_syn_filter syn_filter;
+       struct igc_rss_filter rss_filter;
+       struct igc_flow_list flow_list;
 };
 
 #define IGC_DEV_PRIVATE(_dev)  ((_dev)->data->dev_private)
@@ -157,6 +249,12 @@ struct igc_adapter {
 #define IGC_DEV_PRIVATE_VFTA(_dev) \
        (&((struct igc_adapter *)(_dev)->data->dev_private)->shadow_vfta)
 
+#define IGC_DEV_PRIVATE_RSS_FILTER(_dev) \
+       (&((struct igc_adapter *)(_dev)->data->dev_private)->rss_filter)
+
+#define IGC_DEV_PRIVATE_FLOW_LIST(_dev) \
+       (&((struct igc_adapter *)(_dev)->data->dev_private)->flow_list)
+
 static inline void
 igc_read_reg_check_set_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits)
 {
diff --git a/drivers/net/igc/igc_filter.c b/drivers/net/igc/igc_filter.c
new file mode 100644 (file)
index 0000000..836621d
--- /dev/null
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#include "rte_malloc.h"
+#include "igc_logs.h"
+#include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
+
+/*
+ * igc_ethertype_filter_lookup - lookup ether-type filter
+ *
+ * @igc, IGC filter pointer
+ * @ethertype, ethernet type
+ * @empty, a place to store the index of empty entry if the item not found
+ *  it's not smaller than 0 if valid, otherwise -1 for no empty entry.
+ *  empty parameter is only valid if the return value of the function is -1
+ *
+ * Return value
+ * >= 0, item index of the ether-type filter
+ * -1, the item not been found
+ */
+static inline int
+igc_ethertype_filter_lookup(const struct igc_adapter *igc,
+                       uint16_t ethertype, int *empty)
+{
+       int i = 0;
+
+       if (empty) {
+               /* set to invalid valid */
+               *empty = -1;
+
+               /* search the filters array */
+               for (; i < IGC_MAX_ETQF_FILTERS; i++) {
+                       if (igc->ethertype_filters[i].ether_type == ethertype)
+                               return i;
+                       if (igc->ethertype_filters[i].ether_type == 0) {
+                               /* get empty entry */
+                               *empty = i;
+                               i++;
+                               break;
+                       }
+               }
+       }
+
+       /* search the rest of filters */
+       for (; i < IGC_MAX_ETQF_FILTERS; i++) {
+               if (igc->ethertype_filters[i].ether_type == ethertype)
+                       return i;       /* filter be found, return index */
+       }
+
+       return -1;
+}
+
+int
+igc_del_ethertype_filter(struct rte_eth_dev *dev,
+                       const struct igc_ethertype_filter *filter)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       int ret;
+
+       if (filter->ether_type == 0) {
+               PMD_DRV_LOG(ERR, "Ethertype 0 is not been supported");
+               return -EINVAL;
+       }
+
+       ret = igc_ethertype_filter_lookup(igc, filter->ether_type, NULL);
+       if (ret < 0) {
+               /* not found */
+               PMD_DRV_LOG(ERR,
+                       "Ethertype (0x%04x) filter doesn't exist",
+                       filter->ether_type);
+               return -ENOENT;
+       }
+
+       igc->ethertype_filters[ret].ether_type = 0;
+
+       IGC_WRITE_REG(hw, IGC_ETQF(ret), 0);
+       IGC_WRITE_FLUSH(hw);
+       return 0;
+}
+
+int
+igc_add_ethertype_filter(struct rte_eth_dev *dev,
+                       const struct igc_ethertype_filter *filter)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       uint32_t etqf;
+       int ret, empty;
+
+       if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+               filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
+               filter->ether_type == 0) {
+               PMD_DRV_LOG(ERR,
+                       "Unsupported ether_type(0x%04x) in ethertype filter",
+                       filter->ether_type);
+               return -EINVAL;
+       }
+
+       ret = igc_ethertype_filter_lookup(igc, filter->ether_type, &empty);
+       if (ret >= 0) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+                               filter->ether_type);
+               return -EEXIST;
+       }
+
+       if (empty < 0) {
+               PMD_DRV_LOG(ERR, "no ethertype filter entry.");
+               return -ENOSPC;
+       }
+       ret = empty;
+
+       etqf = filter->ether_type;
+       etqf |= IGC_ETQF_FILTER_ENABLE | IGC_ETQF_QUEUE_ENABLE;
+       etqf |= (uint32_t)filter->queue << IGC_ETQF_QUEUE_SHIFT;
+
+       memcpy(&igc->ethertype_filters[ret], filter, sizeof(*filter));
+
+       IGC_WRITE_REG(hw, IGC_ETQF(ret), etqf);
+       IGC_WRITE_FLUSH(hw);
+       return 0;
+}
+
+/* clear all the ether type filters */
+static void
+igc_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       int i;
+
+       for (i = 0; i < IGC_MAX_ETQF_FILTERS; i++)
+               IGC_WRITE_REG(hw, IGC_ETQF(i), 0);
+       IGC_WRITE_FLUSH(hw);
+
+       memset(&igc->ethertype_filters, 0, sizeof(igc->ethertype_filters));
+}
+
+/*
+ * igc_tuple_filter_lookup - lookup n-tuple filter
+ *
+ * @igc, igc filter pointer
+ * @ntuple, n-tuple filter pointer
+ * @empty, a place to store the index of empty entry if the item not found
+ *  it's not smaller than 0 if valid, otherwise -1 for no empty entry.
+ *  The value of empty is uncertain if the return value of the function is
+ *  not -1.
+ *
+ * Return value
+ * >= 0, item index of the filter
+ * -1, the item not been found
+ */
+static int
+igc_tuple_filter_lookup(const struct igc_adapter *igc,
+                       const struct igc_ntuple_filter *ntuple,
+                       int *empty)
+{
+       int i = 0;
+
+       if (empty) {
+               /* set initial value */
+               *empty = -1;
+
+               /* search the filter array */
+               for (; i < IGC_MAX_NTUPLE_FILTERS; i++) {
+                       if (igc->ntuple_filters[i].hash_val) {
+                               /* compare the hase value */
+                               if (ntuple->hash_val ==
+                                       igc->ntuple_filters[i].hash_val)
+                                       /* filter be found, return index */
+                                       return i;
+                       } else {
+                               /* get the empty entry */
+                               *empty = i;
+                               i++;
+                               break;
+                       }
+               }
+       }
+
+       /* search the rest of filters */
+       for (; i < IGC_MAX_NTUPLE_FILTERS; i++) {
+               if (ntuple->hash_val == igc->ntuple_filters[i].hash_val)
+                       /* filter be found, return index */
+                       return i;
+       }
+
+       return -1;
+}
+
+/* Set hardware register values */
+static void
+igc_enable_tuple_filter(struct rte_eth_dev *dev,
+                       const struct igc_adapter *igc, uint8_t index)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       const struct igc_ntuple_filter *filter = &igc->ntuple_filters[index];
+       const struct igc_ntuple_info *info = &filter->tuple_info;
+       uint32_t ttqf, imir, imir_ext = IGC_IMIREXT_SIZE_BP;
+
+       imir = info->dst_port;
+       imir |= (uint32_t)info->priority << IGC_IMIR_PRIORITY_SHIFT;
+
+       /* 0b means not compare. */
+       if (info->dst_port_mask == 0)
+               imir |= IGC_IMIR_PORT_BP;
+
+       ttqf = IGC_TTQF_DISABLE_MASK | IGC_TTQF_QUEUE_ENABLE;
+       ttqf |= (uint32_t)filter->queue << IGC_TTQF_QUEUE_SHIFT;
+       ttqf |= info->proto;
+
+       if (info->proto_mask)
+               ttqf &= ~IGC_TTQF_MASK_ENABLE;
+
+       /* TCP flags bits setting. */
+       if (info->tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
+               if (info->tcp_flags & RTE_TCP_URG_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_URG;
+               if (info->tcp_flags & RTE_TCP_ACK_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_ACK;
+               if (info->tcp_flags & RTE_TCP_PSH_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_PSH;
+               if (info->tcp_flags & RTE_TCP_RST_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_RST;
+               if (info->tcp_flags & RTE_TCP_SYN_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_SYN;
+               if (info->tcp_flags & RTE_TCP_FIN_FLAG)
+                       imir_ext |= IGC_IMIREXT_CTRL_FIN;
+       } else {
+               imir_ext |= IGC_IMIREXT_CTRL_BP;
+       }
+
+       IGC_WRITE_REG(hw, IGC_IMIR(index), imir);
+       IGC_WRITE_REG(hw, IGC_TTQF(index), ttqf);
+       IGC_WRITE_REG(hw, IGC_IMIREXT(index), imir_ext);
+       IGC_WRITE_FLUSH(hw);
+}
+
+/* Reset hardware register values */
+static void
+igc_disable_tuple_filter(struct rte_eth_dev *dev, uint8_t index)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+       IGC_WRITE_REG(hw, IGC_TTQF(index), IGC_TTQF_DISABLE_MASK);
+       IGC_WRITE_REG(hw, IGC_IMIR(index), 0);
+       IGC_WRITE_REG(hw, IGC_IMIREXT(index), 0);
+       IGC_WRITE_FLUSH(hw);
+}
+
+int
+igc_add_ntuple_filter(struct rte_eth_dev *dev,
+               const struct igc_ntuple_filter *ntuple)
+{
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       int ret, empty;
+
+       ret = igc_tuple_filter_lookup(igc, ntuple, &empty);
+       if (ret >= 0) {
+               PMD_DRV_LOG(ERR, "filter exists.");
+               return -EEXIST;
+       }
+
+       if (empty < 0) {
+               PMD_DRV_LOG(ERR, "filter no entry.");
+               return -ENOSPC;
+       }
+
+       ret = empty;
+       memcpy(&igc->ntuple_filters[ret], ntuple, sizeof(*ntuple));
+       igc_enable_tuple_filter(dev, igc, (uint8_t)ret);
+       return 0;
+}
+
+int
+igc_del_ntuple_filter(struct rte_eth_dev *dev,
+               const struct igc_ntuple_filter *ntuple)
+{
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       int ret;
+
+       ret = igc_tuple_filter_lookup(igc, ntuple, NULL);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "filter not exists.");
+               return -ENOENT;
+       }
+
+       memset(&igc->ntuple_filters[ret], 0, sizeof(*ntuple));
+       igc_disable_tuple_filter(dev, (uint8_t)ret);
+       return 0;
+}
+
+/* Clear all the n-tuple filters */
+static void
+igc_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+       int i;
+
+       for (i = 0; i < IGC_MAX_NTUPLE_FILTERS; i++)
+               igc_disable_tuple_filter(dev, i);
+
+       memset(&igc->ntuple_filters, 0, sizeof(igc->ntuple_filters));
+}
+
+int
+igc_set_syn_filter(struct rte_eth_dev *dev,
+               const struct igc_syn_filter *filter)
+{
+       struct igc_hw *hw;
+       struct igc_adapter *igc;
+       uint32_t synqf, rfctl;
+
+       if (filter->queue >= IGC_QUEUE_PAIRS_NUM) {
+               PMD_DRV_LOG(ERR, "out of range queue %u(max is %u)",
+                       filter->queue, IGC_QUEUE_PAIRS_NUM);
+               return -EINVAL;
+       }
+
+       igc = IGC_DEV_PRIVATE(dev);
+
+       if (igc->syn_filter.enable) {
+               PMD_DRV_LOG(ERR, "SYN filter has been enabled before!");
+               return -EEXIST;
+       }
+
+       hw = IGC_DEV_PRIVATE_HW(dev);
+       synqf = (uint32_t)filter->queue << IGC_SYN_FILTER_QUEUE_SHIFT;
+       synqf |= IGC_SYN_FILTER_ENABLE;
+
+       rfctl = IGC_READ_REG(hw, IGC_RFCTL);
+       if (filter->hig_pri)
+               rfctl |= IGC_RFCTL_SYNQFP;
+       else
+               rfctl &= ~IGC_RFCTL_SYNQFP;
+
+       memcpy(&igc->syn_filter, filter, sizeof(igc->syn_filter));
+       igc->syn_filter.enable = 1;
+
+       IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
+       IGC_WRITE_REG(hw, IGC_SYNQF(0), synqf);
+       IGC_WRITE_FLUSH(hw);
+       return 0;
+}
+
+/* clear the SYN filter */
+void
+igc_clear_syn_filter(struct rte_eth_dev *dev)
+{
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+
+       IGC_WRITE_REG(hw, IGC_SYNQF(0), 0);
+       IGC_WRITE_FLUSH(hw);
+
+       memset(&igc->syn_filter, 0, sizeof(igc->syn_filter));
+}
+
+void
+igc_clear_all_filter(struct rte_eth_dev *dev)
+{
+       igc_clear_all_ethertype_filter(dev);
+       igc_clear_all_ntuple_filter(dev);
+       igc_clear_syn_filter(dev);
+       igc_clear_rss_filter(dev);
+}
+
+int
+eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+               enum rte_filter_op filter_op, void *arg)
+{
+       int ret = 0;
+
+       RTE_SET_USED(dev);
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+               *(const void **)arg = &igc_flow_ops;
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+                                                       filter_type);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
diff --git a/drivers/net/igc/igc_filter.h b/drivers/net/igc/igc_filter.h
new file mode 100644 (file)
index 0000000..7995150
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#ifndef _IGC_FILTER_H_
+#define _IGC_FILTER_H_
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_core.h>
+#include <rte_eth_ctrl.h>
+
+#include "igc_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int igc_add_ethertype_filter(struct rte_eth_dev *dev,
+               const struct igc_ethertype_filter *filter);
+int igc_del_ethertype_filter(struct rte_eth_dev *dev,
+               const struct igc_ethertype_filter *filter);
+int igc_add_ntuple_filter(struct rte_eth_dev *dev,
+               const struct igc_ntuple_filter *tuple);
+int igc_del_ntuple_filter(struct rte_eth_dev *dev,
+               const struct igc_ntuple_filter *tuple);
+int igc_set_syn_filter(struct rte_eth_dev *dev,
+               const struct igc_syn_filter *filter);
+void igc_clear_syn_filter(struct rte_eth_dev *dev);
+void igc_clear_all_filter(struct rte_eth_dev *dev);
+int
+eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+               enum rte_filter_op filter_op, void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* IGC_FILTER_H_ */
diff --git a/drivers/net/igc/igc_flow.c b/drivers/net/igc/igc_flow.c
new file mode 100644 (file)
index 0000000..1bb64d3
--- /dev/null
@@ -0,0 +1,917 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#include "rte_malloc.h"
+#include "igc_logs.h"
+#include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
+
+/*******************************************************************************
+ * All Supported Rule Type
+ *
+ * Notes:
+ * `para` or `(para)`, the para must been set
+ * `[para]`, the para is optional
+ * `([para1][para2]...)`, all paras is optional, but must one of them been set
+ * `para1 | para2 | ...`, only one of the paras can be set
+ *
+ * ether-type filter
+ * pattern: ETH(type)/END
+ * action: QUEUE/END
+ * attribute:
+ *
+ * n-tuple filter
+ * pattern: [ETH/]([IPv4(protocol)|IPv6(protocol)/][UDP(dst_port)|
+ *          TCP([dst_port],[flags])|SCTP(dst_port)/])END
+ * action: QUEUE/END
+ * attribute: [priority(0-7)]
+ *
+ * SYN filter
+ * pattern: [ETH/][IPv4|IPv6/]TCP(flags=SYN)/END
+ * action: QUEUE/END
+ * attribute: [priority(0,1)]
+ *
+ * RSS filter
+ * pattern:
+ * action: RSS/END
+ * attribute:
+ ******************************************************************************/
+
+/* Structure to store all filters */
+struct igc_all_filter {
+       struct igc_ethertype_filter ethertype;
+       struct igc_ntuple_filter ntuple;
+       struct igc_syn_filter syn;
+       struct igc_rss_filter rss;
+       uint32_t        mask;   /* see IGC_FILTER_MASK_* definition */
+};
+
+#define IGC_FILTER_MASK_ETHER          (1u << IGC_FILTER_TYPE_ETHERTYPE)
+#define IGC_FILTER_MASK_NTUPLE         (1u << IGC_FILTER_TYPE_NTUPLE)
+#define IGC_FILTER_MASK_TCP_SYN                (1u << IGC_FILTER_TYPE_SYN)
+#define IGC_FILTER_MASK_RSS            (1u << IGC_FILTER_TYPE_HASH)
+#define IGC_FILTER_MASK_ALL            (IGC_FILTER_MASK_ETHER |        \
+                                       IGC_FILTER_MASK_NTUPLE |        \
+                                       IGC_FILTER_MASK_TCP_SYN |       \
+                                       IGC_FILTER_MASK_RSS)
+
+#define IGC_SET_FILTER_MASK(_filter, _mask_bits)                       \
+                                       ((_filter)->mask &= (_mask_bits))
+
+#define IGC_IS_ALL_BITS_SET(_val)      ((_val) == (typeof(_val))~0)
+#define IGC_NOT_ALL_BITS_SET(_val)     ((_val) != (typeof(_val))~0)
+
+/* Parse rule attribute */
+static int
+igc_parse_attribute(const struct rte_flow_attr *attr,
+       struct igc_all_filter *filter, struct rte_flow_error *error)
+{
+       if (!attr)
+               return 0;
+
+       if (attr->group)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+                               "Not support");
+
+       if (attr->egress)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+                               "Not support");
+
+       if (attr->transfer)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+                               "Not support");
+
+       if (!attr->ingress)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+                               "A rule must apply to ingress traffic");
+
+       if (attr->priority == 0)
+               return 0;
+
+       /* only n-tuple and SYN filter have priority level */
+       IGC_SET_FILTER_MASK(filter,
+               IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+       if (IGC_IS_ALL_BITS_SET(attr->priority)) {
+               /* only SYN filter match this value */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
+               filter->syn.hig_pri = 1;
+               return 0;
+       }
+
+       if (attr->priority > IGC_NTUPLE_MAX_PRI)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+                               "Priority value is invalid.");
+
+       if (attr->priority > 1) {
+               /* only n-tuple filter match this value */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+               /* get priority */
+               filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
+               return 0;
+       }
+
+       /* get priority */
+       filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
+       filter->syn.hig_pri = (uint8_t)attr->priority;
+
+       return 0;
+}
+
+/* function type of parse pattern */
+typedef int (*igc_pattern_parse)(const struct rte_flow_item *,
+               struct igc_all_filter *, struct rte_flow_error *);
+
+static int igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
+               __rte_unused struct igc_all_filter *filter,
+               __rte_unused struct rte_flow_error *error);
+static int igc_parse_pattern_ether(const struct rte_flow_item *item,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_ip(const struct rte_flow_item *item,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_ipv6(const struct rte_flow_item *item,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_udp(const struct rte_flow_item *item,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_tcp(const struct rte_flow_item *item,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+
+static igc_pattern_parse pattern_parse_list[] = {
+               [RTE_FLOW_ITEM_TYPE_VOID] = igc_parse_pattern_void,
+               [RTE_FLOW_ITEM_TYPE_ETH] = igc_parse_pattern_ether,
+               [RTE_FLOW_ITEM_TYPE_IPV4] = igc_parse_pattern_ip,
+               [RTE_FLOW_ITEM_TYPE_IPV6] = igc_parse_pattern_ipv6,
+               [RTE_FLOW_ITEM_TYPE_UDP] = igc_parse_pattern_udp,
+               [RTE_FLOW_ITEM_TYPE_TCP] = igc_parse_pattern_tcp,
+};
+
+/* Parse rule patterns */
+static int
+igc_parse_patterns(const struct rte_flow_item patterns[],
+       struct igc_all_filter *filter, struct rte_flow_error *error)
+{
+       const struct rte_flow_item *item = patterns;
+
+       if (item == NULL) {
+               /* only RSS filter match this pattern */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
+               return 0;
+       }
+
+       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               int ret;
+
+               if (item->type >= RTE_DIM(pattern_parse_list))
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Not been supported");
+
+               if (item->last)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
+                                       "Range not been supported");
+
+               /* check pattern format is valid */
+               if (!!item->spec ^ !!item->mask)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Format error");
+
+               /* get the pattern type callback */
+               igc_pattern_parse parse_func =
+                               pattern_parse_list[item->type];
+               if (!parse_func)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Not been supported");
+
+               /* call the pattern type function */
+               ret = parse_func(item, filter, error);
+               if (ret)
+                       return ret;
+
+               /* if no filter match the pattern */
+               if (filter->mask == 0)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Not been supported");
+       }
+
+       return 0;
+}
+
+static int igc_parse_action_queue(struct rte_eth_dev *dev,
+               const struct rte_flow_action *act,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_action_rss(struct rte_eth_dev *dev,
+               const struct rte_flow_action *act,
+               struct igc_all_filter *filter, struct rte_flow_error *error);
+
+/* Parse flow actions */
+static int
+igc_parse_actions(struct rte_eth_dev *dev,
+               const struct rte_flow_action actions[],
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_action *act = actions;
+       int ret;
+
+       if (act == NULL)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_NUM, act,
+                               "Action is needed");
+
+       for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
+               switch (act->type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       ret = igc_parse_action_queue(dev, act, filter, error);
+                       if (ret)
+                               return ret;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       ret = igc_parse_action_rss(dev, act, filter, error);
+                       if (ret)
+                               return ret;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_VOID:
+                       break;
+               default:
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, act,
+                                       "Not been supported");
+               }
+
+               /* if no filter match the action */
+               if (filter->mask == 0)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, act,
+                                       "Not been supported");
+       }
+
+       return 0;
+}
+
+/* Parse a flow rule */
+static int
+igc_parse_flow(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error,
+               struct igc_all_filter *filter)
+{
+       int ret;
+
+       /* clear all filters */
+       memset(filter, 0, sizeof(*filter));
+
+       /* set default filter mask */
+       filter->mask = IGC_FILTER_MASK_ALL;
+
+       ret = igc_parse_attribute(attr, filter, error);
+       if (ret)
+               return ret;
+
+       ret = igc_parse_patterns(patterns, filter, error);
+       if (ret)
+               return ret;
+
+       ret = igc_parse_actions(dev, actions, filter, error);
+       if (ret)
+               return ret;
+
+       /* if no or more than one filter matched this flow */
+       if (filter->mask == 0 || (filter->mask & (filter->mask - 1)))
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                               "Flow can't be recognized");
+       return 0;
+}
+
+/* Parse pattern type of void */
+static int
+igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
+               __rte_unused struct igc_all_filter *filter,
+               __rte_unused struct rte_flow_error *error)
+{
+       return 0;
+}
+
+/* Parse pattern type of ethernet header */
+static int
+igc_parse_pattern_ether(const struct rte_flow_item *item,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_item_eth *spec = item->spec;
+       const struct rte_flow_item_eth *mask = item->mask;
+       struct igc_ethertype_filter *ether;
+
+       if (mask == NULL) {
+               /* only n-tuple and SYN filter match the pattern */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE |
+                               IGC_FILTER_MASK_TCP_SYN);
+               return 0;
+       }
+
+       /* only ether-type filter match the pattern*/
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER);
+
+       /* destination and source MAC address are not supported */
+       if (!rte_is_zero_ether_addr(&mask->src) ||
+               !rte_is_zero_ether_addr(&mask->dst))
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "Only support ether-type");
+
+       /* ether-type mask bits must be all 1 */
+       if (IGC_NOT_ALL_BITS_SET(mask->type))
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "Ethernet type mask bits must be all 1");
+
+       ether = &filter->ethertype;
+
+       /* get ether-type */
+       ether->ether_type = rte_be_to_cpu_16(spec->type);
+
+       /* ether-type should not be IPv4 and IPv6 */
+       if (ether->ether_type == RTE_ETHER_TYPE_IPV4 ||
+               ether->ether_type == RTE_ETHER_TYPE_IPV6 ||
+               ether->ether_type == 0)
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                       "IPv4/IPv6/0 not supported by ethertype filter");
+       return 0;
+}
+
+/* Parse pattern type of IP */
+static int
+igc_parse_pattern_ip(const struct rte_flow_item *item,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_item_ipv4 *spec = item->spec;
+       const struct rte_flow_item_ipv4 *mask = item->mask;
+
+       if (mask == NULL) {
+               /* only n-tuple and SYN filter match this pattern */
+               IGC_SET_FILTER_MASK(filter,
+                       IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+               return 0;
+       }
+
+       /* only n-tuple filter match this pattern */
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+       /* only protocol is used */
+       if (mask->hdr.version_ihl ||
+               mask->hdr.type_of_service ||
+               mask->hdr.total_length ||
+               mask->hdr.packet_id ||
+               mask->hdr.fragment_offset ||
+               mask->hdr.time_to_live ||
+               mask->hdr.hdr_checksum ||
+               mask->hdr.dst_addr ||
+               mask->hdr.src_addr)
+               return rte_flow_error_set(error,
+                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                       "IPv4 only support protocol");
+
+       if (mask->hdr.next_proto_id == 0)
+               return 0;
+
+       if (IGC_NOT_ALL_BITS_SET(mask->hdr.next_proto_id))
+               return rte_flow_error_set(error,
+                               EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "IPv4 protocol mask bits must be all 0 or 1");
+
+       /* get protocol type */
+       filter->ntuple.tuple_info.proto_mask = 1;
+       filter->ntuple.tuple_info.proto = spec->hdr.next_proto_id;
+       return 0;
+}
+
+/*
+ * Check ipv6 address is 0
+ * Return 1 if true, 0 for false.
+ */
+static inline bool
+igc_is_zero_ipv6_addr(const void *ipv6_addr)
+{
+       const uint64_t *ddw = ipv6_addr;
+       return ddw[0] == 0 && ddw[1] == 0;
+}
+
+/* Parse pattern type of IPv6 */
+static int
+igc_parse_pattern_ipv6(const struct rte_flow_item *item,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_item_ipv6 *spec = item->spec;
+       const struct rte_flow_item_ipv6 *mask = item->mask;
+
+       if (mask == NULL) {
+               /* only n-tuple and syn filter match this pattern */
+               IGC_SET_FILTER_MASK(filter,
+                       IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+               return 0;
+       }
+
+       /* only n-tuple filter match this pattern */
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+       /* only protocol is used */
+       if (mask->hdr.vtc_flow ||
+               mask->hdr.payload_len ||
+               mask->hdr.hop_limits ||
+               !igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
+               !igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "IPv6 only support protocol");
+
+       if (mask->hdr.proto == 0)
+               return 0;
+
+       if (IGC_NOT_ALL_BITS_SET(mask->hdr.proto))
+               return rte_flow_error_set(error,
+                               EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "IPv6 protocol mask bits must be all 0 or 1");
+
+       /* get protocol type */
+       filter->ntuple.tuple_info.proto_mask = 1;
+       filter->ntuple.tuple_info.proto = spec->hdr.proto;
+
+       return 0;
+}
+
+/* Parse pattern type of UDP */
+static int
+igc_parse_pattern_udp(const struct rte_flow_item *item,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_item_udp *spec = item->spec;
+       const struct rte_flow_item_udp *mask = item->mask;
+
+       /* only n-tuple filter match this pattern */
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+       if (mask == NULL)
+               return 0;
+
+       /* only destination port is used */
+       if (mask->hdr.dgram_len || mask->hdr.dgram_cksum || mask->hdr.src_port)
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                       "UDP only support destination port");
+
+       if (mask->hdr.dst_port == 0)
+               return 0;
+
+       if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "UDP port mask bits must be all 0 or 1");
+
+       /* get destination port info. */
+       filter->ntuple.tuple_info.dst_port_mask = 1;
+       filter->ntuple.tuple_info.dst_port = spec->hdr.dst_port;
+
+       return 0;
+}
+
+/* Parse pattern type of TCP */
+static int
+igc_parse_pattern_tcp(const struct rte_flow_item *item,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_item_tcp *spec = item->spec;
+       const struct rte_flow_item_tcp *mask = item->mask;
+       struct igc_ntuple_info *tuple_info = &filter->ntuple.tuple_info;
+
+       if (mask == NULL) {
+               /* only n-tuple filter match this pattern */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+               return 0;
+       }
+
+       /* only n-tuple and SYN filter match this pattern */
+       IGC_SET_FILTER_MASK(filter,
+                       IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+       /* only destination port and TCP flags are used */
+       if (mask->hdr.sent_seq ||
+               mask->hdr.recv_ack ||
+               mask->hdr.data_off ||
+               mask->hdr.rx_win ||
+               mask->hdr.cksum ||
+               mask->hdr.tcp_urp ||
+               mask->hdr.src_port)
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                       "TCP only support destination port and flags");
+
+       /* if destination port is used */
+       if (mask->hdr.dst_port) {
+               /* only n-tuple match this pattern */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+               if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
+                       return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                               "TCP port mask bits must be all 1");
+
+               /* get destination port info. */
+               tuple_info->dst_port = spec->hdr.dst_port;
+               tuple_info->dst_port_mask = 1;
+       }
+
+       /* if TCP flags are used */
+       if (mask->hdr.tcp_flags) {
+               if (IGC_IS_ALL_BITS_SET(mask->hdr.tcp_flags)) {
+                       /* only n-tuple match this pattern */
+                       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+                       /* get TCP flags */
+                       tuple_info->tcp_flags = spec->hdr.tcp_flags;
+               } else if (mask->hdr.tcp_flags == RTE_TCP_SYN_FLAG) {
+                       /* only TCP SYN filter match this pattern */
+                       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
+               } else {
+                       /* no filter match this pattern */
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+                                       "TCP flags can't match");
+               }
+       } else {
+               /* only n-tuple match this pattern */
+               IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+       }
+
+       return 0;
+}
+
+static int
+igc_parse_action_queue(struct rte_eth_dev *dev,
+               const struct rte_flow_action *act,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       uint16_t queue_idx;
+
+       if (act->conf == NULL)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "NULL pointer");
+
+       /* only ether-type, n-tuple, SYN filter match the action */
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER |
+                       IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+       /* get queue index */
+       queue_idx = ((const struct rte_flow_action_queue *)act->conf)->index;
+
+       /* check the queue index is valid */
+       if (queue_idx >= dev->data->nb_rx_queues)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "Queue id is invalid");
+
+       /* get queue info. */
+       filter->ethertype.queue = queue_idx;
+       filter->ntuple.queue = queue_idx;
+       filter->syn.queue = queue_idx;
+       return 0;
+}
+
+/* Parse action of RSS */
+static int
+igc_parse_action_rss(struct rte_eth_dev *dev,
+               const struct rte_flow_action *act,
+               struct igc_all_filter *filter,
+               struct rte_flow_error *error)
+{
+       const struct rte_flow_action_rss *rss = act->conf;
+       uint32_t i;
+
+       if (act->conf == NULL)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "NULL pointer");
+
+       /* only RSS match the action */
+       IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
+
+       /* RSS redirect table can't be zero and can't exceed 128 */
+       if (!rss || !rss->queue_num || rss->queue_num > IGC_RSS_RDT_SIZD)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "No valid queues");
+
+       /* queue index can't exceed max queue index */
+       for (i = 0; i < rss->queue_num; i++) {
+               if (rss->queue[i] >= dev->data->nb_rx_queues)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                       "Queue id is invalid");
+       }
+
+       /* only default RSS hash function is supported */
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "Only default RSS hash functions is supported");
+
+       if (rss->level)
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "Only 0 RSS encapsulation level is supported");
+
+       /* check key length is valid */
+       if (rss->key_len && rss->key_len != sizeof(filter->rss.key))
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                               "RSS hash key must be exactly 40 bytes");
+
+       /* get RSS info. */
+       igc_rss_conf_set(&filter->rss, rss);
+       return 0;
+}
+
+/**
+ * Allocate a rte_flow from the heap
+ * Return the pointer of the flow, or NULL for failed
+ **/
+static inline struct rte_flow *
+igc_alloc_flow(const void *filter, enum igc_filter_type type, uint inbytes)
+{
+       /* allocate memory, 8 bytes boundary aligned */
+       struct rte_flow *flow = rte_malloc("igc flow filter",
+                       sizeof(struct rte_flow) + inbytes, 8);
+       if (flow == NULL) {
+               PMD_DRV_LOG(ERR, "failed to allocate memory");
+               return NULL;
+       }
+
+       flow->filter_type = type;
+
+       /* copy filter data */
+       memcpy(flow->filter, filter, inbytes);
+       return flow;
+}
+
+/* Append a rte_flow to the list */
+static inline void
+igc_append_flow(struct igc_flow_list *list, struct rte_flow *flow)
+{
+       TAILQ_INSERT_TAIL(list, flow, node);
+}
+
+/**
+ * Remove the flow and free the flow buffer
+ * The caller should make sure the flow is really exist in the list
+ **/
+static inline void
+igc_remove_flow(struct igc_flow_list *list, struct rte_flow *flow)
+{
+       TAILQ_REMOVE(list, flow, node);
+       rte_free(flow);
+}
+
+/* Check whether the flow is really in the list or not */
+static inline bool
+igc_is_flow_in_list(struct igc_flow_list *list, struct rte_flow *flow)
+{
+       struct rte_flow *it;
+
+       TAILQ_FOREACH(it, list, node) {
+               if (it == flow)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * Create a flow rule.
+ * Theoretically one rule can match more than one filters.
+ * We will let it use the filter which it hit first.
+ * So, the sequence matters.
+ **/
+static struct rte_flow *
+igc_flow_create(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct rte_flow *flow = NULL;
+       struct igc_all_filter filter;
+       int ret;
+
+       ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
+       if (ret)
+               return NULL;
+       ret = -ENOMEM;
+
+       switch (filter.mask) {
+       case IGC_FILTER_MASK_ETHER:
+               flow = igc_alloc_flow(&filter.ethertype,
+                               IGC_FILTER_TYPE_ETHERTYPE,
+                               sizeof(filter.ethertype));
+               if (flow)
+                       ret = igc_add_ethertype_filter(dev, &filter.ethertype);
+               break;
+       case IGC_FILTER_MASK_NTUPLE:
+               /* Check n-tuple filter is valid */
+               if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
+                       filter.ntuple.tuple_info.proto_mask == 0) {
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_NONE, NULL,
+                                       "Flow can't be recognized");
+                       return NULL;
+               }
+
+               flow = igc_alloc_flow(&filter.ntuple, IGC_FILTER_TYPE_NTUPLE,
+                               sizeof(filter.ntuple));
+               if (flow)
+                       ret = igc_add_ntuple_filter(dev, &filter.ntuple);
+               break;
+       case IGC_FILTER_MASK_TCP_SYN:
+               flow = igc_alloc_flow(&filter.syn, IGC_FILTER_TYPE_SYN,
+                               sizeof(filter.syn));
+               if (flow)
+                       ret = igc_set_syn_filter(dev, &filter.syn);
+               break;
+       case IGC_FILTER_MASK_RSS:
+               flow = igc_alloc_flow(&filter.rss, IGC_FILTER_TYPE_HASH,
+                               sizeof(filter.rss));
+               if (flow) {
+                       struct igc_rss_filter *rss =
+                                       (struct igc_rss_filter *)flow->filter;
+                       rss->conf.key = rss->key;
+                       rss->conf.queue = rss->queue;
+                       ret = igc_add_rss_filter(dev, &filter.rss);
+               }
+               break;
+       default:
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_NONE, NULL,
+                               "Flow can't be recognized");
+               return NULL;
+       }
+
+       if (ret) {
+               /* check and free the memory */
+               if (flow)
+                       rte_free(flow);
+
+               rte_flow_error_set(error, -ret,
+                               RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                               "Failed to create flow.");
+               return NULL;
+       }
+
+       /* append the flow to the tail of the list */
+       igc_append_flow(IGC_DEV_PRIVATE_FLOW_LIST(dev), flow);
+       return flow;
+}
+
+/**
+ * Check if the flow rule is supported by the device.
+ * It only checks the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ **/
+static int
+igc_flow_validate(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct igc_all_filter filter;
+       int ret;
+
+       ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
+       if (ret)
+               return ret;
+
+       switch (filter.mask) {
+       case IGC_FILTER_MASK_NTUPLE:
+               /* Check n-tuple filter is valid */
+               if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
+                       filter.ntuple.tuple_info.proto_mask == 0)
+                       return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_NONE, NULL,
+                                       "Flow can't be recognized");
+               break;
+       }
+
+       return 0;
+}
+
+/**
+ * Disable a valid flow, the flow must be not NULL and
+ * chained in the device flow list.
+ **/
+static int
+igc_disable_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       int ret = 0;
+
+       switch (flow->filter_type) {
+       case IGC_FILTER_TYPE_ETHERTYPE:
+               ret = igc_del_ethertype_filter(dev,
+                       (struct igc_ethertype_filter *)&flow->filter);
+               break;
+       case IGC_FILTER_TYPE_NTUPLE:
+               ret = igc_del_ntuple_filter(dev,
+                               (struct igc_ntuple_filter *)&flow->filter);
+               break;
+       case IGC_FILTER_TYPE_SYN:
+               igc_clear_syn_filter(dev);
+               break;
+       case IGC_FILTER_TYPE_HASH:
+               ret = igc_del_rss_filter(dev);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Filter type (%d) not supported",
+                               flow->filter_type);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+/* Destroy a flow rule */
+static int
+igc_flow_destroy(struct rte_eth_dev *dev,
+               struct rte_flow *flow,
+               struct rte_flow_error *error)
+{
+       struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
+       int ret;
+
+       if (!flow) {
+               PMD_DRV_LOG(ERR, "NULL flow!");
+               return -EINVAL;
+       }
+
+       /* check the flow is create by IGC PMD */
+       if (!igc_is_flow_in_list(list, flow)) {
+               PMD_DRV_LOG(ERR, "Flow(%p) not been found!", flow);
+               return -ENOENT;
+       }
+
+       ret = igc_disable_flow(dev, flow);
+       if (ret)
+               rte_flow_error_set(error, -ret,
+                               RTE_FLOW_ERROR_TYPE_HANDLE,
+                               NULL, "Failed to destroy flow");
+
+       igc_remove_flow(list, flow);
+       return ret;
+}
+
+/* Initiate device flow list header */
+void
+igc_flow_init(struct rte_eth_dev *dev)
+{
+       TAILQ_INIT(IGC_DEV_PRIVATE_FLOW_LIST(dev));
+}
+
+/* Destroy all flow in the list and free memory */
+int
+igc_flow_flush(struct rte_eth_dev *dev,
+               __rte_unused struct rte_flow_error *error)
+{
+       struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
+       struct rte_flow *flow;
+
+       while ((flow = TAILQ_FIRST(list)) != NULL) {
+               igc_disable_flow(dev, flow);
+               igc_remove_flow(list, flow);
+       }
+
+       return 0;
+}
+
+const struct rte_flow_ops igc_flow_ops = {
+       .validate = igc_flow_validate,
+       .create = igc_flow_create,
+       .destroy = igc_flow_destroy,
+       .flush = igc_flow_flush,
+};
diff --git a/drivers/net/igc/igc_flow.h b/drivers/net/igc/igc_flow.h
new file mode 100644 (file)
index 0000000..310b4bd
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#ifndef _IGC_FLOW_H_
+#define _IGC_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "igc_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const struct rte_flow_ops igc_flow_ops;
+
+void igc_flow_init(struct rte_eth_dev *dev);
+int igc_flow_flush(struct rte_eth_dev *dev,
+               __rte_unused struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IGC_FLOW_H_ */
index 0bdc3f4..5b269b6 100644 (file)
@@ -836,7 +836,7 @@ static uint8_t default_rss_key[40] = {
        0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 };
 
-static void
+void
 igc_rss_disable(struct rte_eth_dev *dev)
 {
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
@@ -917,6 +917,137 @@ igc_rss_configure(struct rte_eth_dev *dev)
        igc_hw_rss_hash_set(hw, &rss_conf);
 }
 
+int
+igc_del_rss_filter(struct rte_eth_dev *dev)
+{
+       struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+       if (rss_filter->enable) {
+               /* recover default RSS configuration */
+               igc_rss_configure(dev);
+
+               /* disable RSS logic and clear filter data */
+               igc_rss_disable(dev);
+               memset(rss_filter, 0, sizeof(*rss_filter));
+               return 0;
+       }
+       PMD_DRV_LOG(ERR, "filter not exist!");
+       return -ENOENT;
+}
+
+/* Initiate the filter structure by the structure of rte_flow_action_rss */
+void
+igc_rss_conf_set(struct igc_rss_filter *out,
+               const struct rte_flow_action_rss *rss)
+{
+       out->conf.func = rss->func;
+       out->conf.level = rss->level;
+       out->conf.types = rss->types;
+
+       if (rss->key_len == sizeof(out->key)) {
+               memcpy(out->key, rss->key, rss->key_len);
+               out->conf.key = out->key;
+               out->conf.key_len = rss->key_len;
+       } else {
+               out->conf.key = NULL;
+               out->conf.key_len = 0;
+       }
+
+       if (rss->queue_num <= IGC_RSS_RDT_SIZD) {
+               memcpy(out->queue, rss->queue,
+                       sizeof(*out->queue) * rss->queue_num);
+               out->conf.queue = out->queue;
+               out->conf.queue_num = rss->queue_num;
+       } else {
+               out->conf.queue = NULL;
+               out->conf.queue_num = 0;
+       }
+}
+
+int
+igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss)
+{
+       struct rte_eth_rss_conf rss_conf = {
+               .rss_key = rss->conf.key_len ?
+                       (void *)(uintptr_t)rss->conf.key : NULL,
+               .rss_key_len = rss->conf.key_len,
+               .rss_hf = rss->conf.types,
+       };
+       struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+       uint32_t i, j;
+
+       /* check RSS type is valid */
+       if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) {
+               PMD_DRV_LOG(ERR,
+                       "RSS type(0x%" PRIx64 ") error!, only 0x%" PRIx64
+                       " been supported", rss_conf.rss_hf,
+                       (uint64_t)IGC_RSS_OFFLOAD_ALL);
+               return -EINVAL;
+       }
+
+       /* check queue count is not zero */
+       if (!rss->conf.queue_num) {
+               PMD_DRV_LOG(ERR, "Queue number should not be 0!");
+               return -EINVAL;
+       }
+
+       /* check queue id is valid */
+       for (i = 0; i < rss->conf.queue_num; i++)
+               if (rss->conf.queue[i] >= dev->data->nb_rx_queues) {
+                       PMD_DRV_LOG(ERR, "Queue id %u is invalid!",
+                                       rss->conf.queue[i]);
+                       return -EINVAL;
+               }
+
+       /* only support one filter */
+       if (rss_filter->enable) {
+               PMD_DRV_LOG(ERR, "Only support one RSS filter!");
+               return -ENOTSUP;
+       }
+       rss_filter->enable = 1;
+
+       igc_rss_conf_set(rss_filter, &rss->conf);
+
+       /* Fill in redirection table. */
+       for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) {
+               union igc_rss_reta_reg reta;
+               uint16_t q_idx, reta_idx;
+
+               if (j == rss->conf.queue_num)
+                       j = 0;
+               q_idx = rss->conf.queue[j];
+               reta_idx = i % sizeof(reta);
+               reta.bytes[reta_idx] = q_idx;
+               if (reta_idx == sizeof(reta) - 1)
+                       IGC_WRITE_REG_LE_VALUE(hw,
+                               IGC_RETA(i / sizeof(reta)), reta.dword);
+       }
+
+       if (rss_conf.rss_key == NULL)
+               rss_conf.rss_key = default_rss_key;
+       igc_hw_rss_hash_set(hw, &rss_conf);
+       return 0;
+}
+
+void
+igc_clear_rss_filter(struct rte_eth_dev *dev)
+{
+       struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+       if (!rss_filter->enable) {
+               PMD_DRV_LOG(WARNING, "RSS filter not enabled!");
+               return;
+       }
+
+       /* recover default RSS configuration */
+       igc_rss_configure(dev);
+
+       /* disable RSS logic and clear filter data */
+       igc_rss_disable(dev);
+       memset(rss_filter, 0, sizeof(*rss_filter));
+}
+
 static int
 igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
index dbda56c..f2b2d75 100644 (file)
@@ -38,8 +38,14 @@ int eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt);
 
 int igc_rx_init(struct rte_eth_dev *dev);
 void igc_tx_init(struct rte_eth_dev *dev);
+void igc_rss_disable(struct rte_eth_dev *dev);
 void
 igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf);
+int igc_del_rss_filter(struct rte_eth_dev *dev);
+void igc_rss_conf_set(struct igc_rss_filter *out,
+               const struct rte_flow_action_rss *rss);
+int igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss);
+void igc_clear_rss_filter(struct rte_eth_dev *dev);
 void eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_rxq_info *qinfo);
 void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
index e402f26..fba119c 100644 (file)
@@ -7,7 +7,9 @@ objs = [base_objs]
 sources = files(
        'igc_logs.c',
        'igc_ethdev.c',
-       'igc_txrx.c'
+       'igc_txrx.c',
+       'igc_filter.c',
+       'igc_flow.c'
 )
 
 includes += include_directories('base')