net/ice: enable flow director engine
authorBeilei Xing <beilei.xing@intel.com>
Fri, 18 Oct 2019 11:15:54 +0000 (19:15 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 23 Oct 2019 14:43:10 +0000 (16:43 +0200)
Enable flow director engine, including initialization and teardown.
 - Control VSI create and release.
 - Queue pair allocated, set up and release.
 - Programming packet create and release.
 - FDIR profile create and release.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Reviewed-by: Xiaolong Ye <xiaolong.ye@intel.com>
drivers/net/ice/Makefile
drivers/net/ice/ice_ethdev.c
drivers/net/ice/ice_ethdev.h
drivers/net/ice/ice_fdir_filter.c [new file with mode: 0644]
drivers/net/ice/ice_rxtx.c
drivers/net/ice/ice_rxtx.h
drivers/net/ice/meson.build

index 4a279f1..f7e1852 100644 (file)
@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
        CC_AVX2_SUPPORT=1
 else
index dc4d1e6..30ea3e8 100644 (file)
@@ -1417,10 +1417,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev)
 
        ice_init_proto_xtr(dev);
 
+       if (hw->func_caps.fd_fltr_guar > 0 ||
+           hw->func_caps.fd_fltr_best_effort > 0) {
+               pf->flags |= ICE_FLAG_FDIR;
+               pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
+               pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
+       } else {
+               pf->fdir_nb_qps = 0;
+       }
+       pf->fdir_qp_offset = 0;
+
        return 0;
 }
 
-static struct ice_vsi *
+struct ice_vsi *
 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 {
        struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -1432,6 +1442,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        struct rte_ether_addr mac_addr;
        uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        uint8_t tc_bitmap = 0x1;
+       uint16_t cfg;
 
        /* hw->num_lports = 1 in NIC mode */
        vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
@@ -1455,14 +1466,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
 
        memset(&vsi_ctx, 0, sizeof(vsi_ctx));
-       /* base_queue in used in queue mapping of VSI add/update command.
-        * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
-        * cases in the first stage. Only Main VSI.
-        */
-       vsi->base_queue = 0;
        switch (type) {
        case ICE_VSI_PF:
                vsi->nb_qps = pf->lan_nb_qps;
+               vsi->base_queue = 1;
                ice_vsi_config_default_rss(&vsi_ctx.info);
                vsi_ctx.alloc_from_pool = true;
                vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
@@ -1476,6 +1483,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
                vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
                vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
                                         ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+               /* FDIR */
+               cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
+                       ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+               vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+               cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+               vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+               vsi_ctx.info.max_fd_fltr_dedicated =
+                       rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
+               vsi_ctx.info.max_fd_fltr_shared =
+                       rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
+
                /* Enable VLAN/UP trip */
                ret = ice_vsi_config_tc_queue_mapping(vsi,
                                                      &vsi_ctx.info,
@@ -1488,6 +1507,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
                        goto fail_mem;
                }
 
+               break;
+       case ICE_VSI_CTRL:
+               vsi->nb_qps = pf->fdir_nb_qps;
+               vsi->base_queue = ICE_FDIR_QUEUE_ID;
+               vsi_ctx.alloc_from_pool = true;
+               vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
+
+               cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+               vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+               cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+               vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+               vsi_ctx.info.sw_id = hw->port_info->sw_id;
+               ret = ice_vsi_config_tc_queue_mapping(vsi,
+                                                     &vsi_ctx.info,
+                                                     ICE_DEFAULT_TCMAP);
+               if (ret) {
+                       PMD_INIT_LOG(ERR,
+                                    "tc queue mapping with vsi failed, "
+                                    "err = %d",
+                                    ret);
+                       goto fail_mem;
+               }
                break;
        default:
                /* for other types of VSI */
@@ -1506,6 +1547,14 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
                }
                vsi->msix_intr = ret;
                vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+       } else if (type == ICE_VSI_CTRL) {
+               ret = ice_res_pool_alloc(&pf->msix_pool, 1);
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
+                                   vsi->vsi_id, ret);
+               }
+               vsi->msix_intr = ret;
+               vsi->nb_msix = 1;
        } else {
                vsi->msix_intr = 0;
                vsi->nb_msix = 0;
@@ -1521,20 +1570,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
        pf->vsis_allocated = vsi_ctx.vsis_allocd;
        pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
 
-       /* MAC configuration */
-       rte_memcpy(pf->dev_addr.addr_bytes,
-                  hw->port_info->mac.perm_addr,
-                  ETH_ADDR_LEN);
+       if (type == ICE_VSI_PF) {
+               /* MAC configuration */
+               rte_memcpy(pf->dev_addr.addr_bytes,
+                          hw->port_info->mac.perm_addr,
+                          ETH_ADDR_LEN);
 
-       rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
-       ret = ice_add_mac_filter(vsi, &mac_addr);
-       if (ret != ICE_SUCCESS)
-               PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
+               rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
+               ret = ice_add_mac_filter(vsi, &mac_addr);
+               if (ret != ICE_SUCCESS)
+                       PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
 
-       rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
-       ret = ice_add_mac_filter(vsi, &mac_addr);
-       if (ret != ICE_SUCCESS)
-               PMD_INIT_LOG(ERR, "Failed to add MAC filter");
+               rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
+               ret = ice_add_mac_filter(vsi, &mac_addr);
+               if (ret != ICE_SUCCESS)
+                       PMD_INIT_LOG(ERR, "Failed to add MAC filter");
+       }
 
        /* At the beginning, only TC0. */
        /* What we need here is the maximam number of the TX queues.
@@ -1572,7 +1623,9 @@ ice_send_driver_ver(struct ice_hw *hw)
 static int
 ice_pf_setup(struct ice_pf *pf)
 {
+       struct ice_hw *hw = ICE_PF_TO_HW(pf);
        struct ice_vsi *vsi;
+       uint16_t unused;
 
        /* Clear all stats counters */
        pf->offset_loaded = FALSE;
@@ -1581,6 +1634,13 @@ ice_pf_setup(struct ice_pf *pf)
        memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
        memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
 
+       /* force guaranteed filter pool for PF */
+       ice_alloc_fd_guar_item(hw, &unused,
+                              hw->func_caps.fd_fltr_guar);
+       /* force shared filter pool for PF */
+       ice_alloc_fd_shrd_item(hw, &unused,
+                              hw->func_caps.fd_fltr_best_effort);
+
        vsi = ice_setup_vsi(pf, ICE_VSI_PF);
        if (!vsi) {
                PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
@@ -2035,7 +2095,7 @@ err_init_mac:
        return ret;
 }
 
-static int
+int
 ice_release_vsi(struct ice_vsi *vsi)
 {
        struct ice_hw *hw;
@@ -2117,6 +2177,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
        /* disable all queue interrupts */
        ice_vsi_disable_queues_intr(main_vsi);
 
+       if (pf->fdir.fdir_vsi)
+               ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
+
        /* Clear all queues and release mbufs */
        ice_clear_queues(dev);
 
@@ -2456,6 +2519,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
        /* Enable interrupts for all the queues */
        ice_vsi_enable_queues_intr(vsi);
 
+       /* Enable FDIR MSIX interrupt */
+       if (pf->fdir.fdir_vsi) {
+               ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+               ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+       }
+
        rte_intr_enable(intr_handle);
 
        return 0;
index 99eb2e2..222fc08 100644 (file)
@@ -247,6 +247,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow);
 struct ice_flow_parser_node;
 TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
 
+/**
+ *  A structure used to define fields of a FDIR related info.
+ */
+struct ice_fdir_info {
+       struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
+       struct ice_tx_queue *txq;
+       struct ice_rx_queue *rxq;
+       void *prg_pkt;                 /* memory for fdir program packet */
+       uint64_t dma_addr;             /* physic address of packet memory*/
+};
+
 struct ice_pf {
        struct ice_adapter *adapter; /* The adapter this PF associate to */
        struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -267,6 +278,9 @@ struct ice_pf {
        uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
        uint16_t base_queue; /* The base queue pairs index  in the device */
        uint8_t *proto_xtr; /* Protocol extraction type for all queues */
+       uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+       uint16_t fdir_qp_offset;
+       struct ice_fdir_info fdir; /* flow director info */
        struct ice_hw_port_stats stats_offset;
        struct ice_hw_port_stats stats;
        /* internal packet statistics, it should be excluded from the total */
@@ -353,6 +367,11 @@ struct ice_vsi_vlan_pvid_info {
 #define ICE_PF_TO_ETH_DEV(pf) \
        (((struct ice_pf *)pf)->adapter->eth_dev)
 
+struct ice_vsi *
+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
+int
+ice_release_vsi(struct ice_vsi *vsi);
+
 static inline int
 ice_align_floor(int n)
 {
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
new file mode 100644 (file)
index 0000000..0fb3054
--- /dev/null
@@ -0,0 +1,225 @@
+#include <stdio.h>
+#include <rte_flow.h>
+#include "base/ice_fdir.h"
+#include "base/ice_flow.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+#include "ice_rxtx.h"
+#include "ice_generic_flow.h"
+
+static const struct rte_memzone *
+ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
+{
+       return rte_memzone_reserve_aligned(name, len, socket_id,
+                                          RTE_MEMZONE_IOVA_CONTIG,
+                                          ICE_RING_BASE_ALIGN);
+}
+
+#define ICE_FDIR_MZ_NAME       "FDIR_MEMZONE"
+
+static int
+ice_fdir_prof_alloc(struct ice_hw *hw)
+{
+       enum ice_fltr_ptype ptype, fltr_ptype;
+
+       if (!hw->fdir_prof) {
+               hw->fdir_prof = (struct ice_fd_hw_prof **)
+                       ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+                                  sizeof(*hw->fdir_prof));
+               if (!hw->fdir_prof)
+                       return -ENOMEM;
+       }
+       for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+            ptype < ICE_FLTR_PTYPE_MAX;
+            ptype++) {
+               if (!hw->fdir_prof[ptype]) {
+                       hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
+                               ice_malloc(hw, sizeof(**hw->fdir_prof));
+                       if (!hw->fdir_prof[ptype])
+                               goto fail_mem;
+               }
+       }
+       return 0;
+
+fail_mem:
+       for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+            fltr_ptype < ptype;
+            fltr_ptype++)
+               rte_free(hw->fdir_prof[fltr_ptype]);
+       rte_free(hw->fdir_prof);
+       return -ENOMEM;
+}
+
+/*
+ * ice_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+static int
+ice_fdir_setup(struct ice_pf *pf)
+{
+       struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+       struct ice_hw *hw = ICE_PF_TO_HW(pf);
+       const struct rte_memzone *mz = NULL;
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       struct ice_vsi *vsi;
+       int err = ICE_SUCCESS;
+
+       if ((pf->flags & ICE_FLAG_FDIR) == 0) {
+               PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
+               return -ENOTSUP;
+       }
+
+       PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
+                   " fd_fltr_best_effort = %u.",
+                   hw->func_caps.fd_fltr_guar,
+                   hw->func_caps.fd_fltr_best_effort);
+
+       if (pf->fdir.fdir_vsi) {
+               PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
+               return ICE_SUCCESS;
+       }
+
+       /* make new FDIR VSI */
+       vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
+       if (!vsi) {
+               PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+               return -EINVAL;
+       }
+       pf->fdir.fdir_vsi = vsi;
+
+       /*Fdir tx queue setup*/
+       err = ice_fdir_setup_tx_resources(pf);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+               goto fail_setup_tx;
+       }
+
+       /*Fdir rx queue setup*/
+       err = ice_fdir_setup_rx_resources(pf);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+               goto fail_setup_rx;
+       }
+
+       err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
+               goto fail_mem;
+       }
+
+       err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
+               goto fail_mem;
+       }
+
+       /* reserve memory for the fdir programming packet */
+       snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
+                ICE_FDIR_MZ_NAME,
+                eth_dev->data->port_id);
+       mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
+       if (!mz) {
+               PMD_DRV_LOG(ERR, "Cannot init memzone for "
+                           "flow director program packet.");
+               err = -ENOMEM;
+               goto fail_mem;
+       }
+       pf->fdir.prg_pkt = mz->addr;
+       pf->fdir.dma_addr = mz->iova;
+
+       err = ice_fdir_prof_alloc(hw);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+                           "flow director profile.");
+               err = -ENOMEM;
+               goto fail_mem;
+       }
+
+       PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+                   vsi->base_queue);
+       return ICE_SUCCESS;
+
+fail_mem:
+       ice_rx_queue_release(pf->fdir.rxq);
+       pf->fdir.rxq = NULL;
+fail_setup_rx:
+       ice_tx_queue_release(pf->fdir.txq);
+       pf->fdir.txq = NULL;
+fail_setup_tx:
+       ice_release_vsi(vsi);
+       pf->fdir.fdir_vsi = NULL;
+       return err;
+}
+
+static void
+ice_fdir_prof_free(struct ice_hw *hw)
+{
+       enum ice_fltr_ptype ptype;
+
+       for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+            ptype < ICE_FLTR_PTYPE_MAX;
+            ptype++)
+               rte_free(hw->fdir_prof[ptype]);
+
+       rte_free(hw->fdir_prof);
+}
+
+/*
+ * ice_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ */
+static void
+ice_fdir_teardown(struct ice_pf *pf)
+{
+       struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+       struct ice_hw *hw = ICE_PF_TO_HW(pf);
+       struct ice_vsi *vsi;
+       int err;
+
+       vsi = pf->fdir.fdir_vsi;
+       if (!vsi)
+               return;
+
+       err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
+
+       err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
+
+       ice_tx_queue_release(pf->fdir.txq);
+       pf->fdir.txq = NULL;
+       ice_rx_queue_release(pf->fdir.rxq);
+       pf->fdir.rxq = NULL;
+       ice_release_vsi(vsi);
+       pf->fdir.fdir_vsi = NULL;
+       ice_fdir_prof_free(hw);
+}
+
+static int
+ice_fdir_init(struct ice_adapter *ad)
+{
+       struct ice_pf *pf = &ad->pf;
+
+       return ice_fdir_setup(pf);
+}
+
+static void
+ice_fdir_uninit(struct ice_adapter *ad)
+{
+       struct ice_pf *pf = &ad->pf;
+
+       ice_fdir_teardown(pf);
+}
+
+static struct ice_flow_engine ice_fdir_engine = {
+       .init = ice_fdir_init,
+       .uninit = ice_fdir_uninit,
+       .type = ICE_FLOW_ENGINE_FDIR,
+};
+
+RTE_INIT(ice_fdir_engine_register)
+{
+       ice_register_flow_engine(&ice_fdir_engine);
+}
index c07aa4b..d4c801f 100644 (file)
@@ -511,6 +511,179 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        return 0;
 }
 
+static enum ice_status
+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
+{
+       struct ice_vsi *vsi = rxq->vsi;
+       struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+       uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+       struct ice_rlan_ctx rx_ctx;
+       enum ice_status err;
+       uint32_t regval;
+
+       rxq->rx_hdr_len = 0;
+       rxq->rx_buf_len = 1024;
+
+       memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+       rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+       rx_ctx.qlen = rxq->nb_rx_desc;
+       rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+       rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+       rx_ctx.dtype = 0; /* No Header Split mode */
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+       rx_ctx.dsize = 1; /* 32B descriptors */
+#endif
+       rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+       /* TPH: Transaction Layer Packet (TLP) processing hints */
+       rx_ctx.tphrdesc_ena = 1;
+       rx_ctx.tphwdesc_ena = 1;
+       rx_ctx.tphdata_ena = 1;
+       rx_ctx.tphhead_ena = 1;
+       /* Low Receive Queue Threshold defined in 64 descriptors units.
+        * When the number of free descriptors goes below the lrxqthresh,
+        * an immediate interrupt is triggered.
+        */
+       rx_ctx.lrxqthresh = 2;
+       /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
+       rx_ctx.l2tsel = 1;
+       rx_ctx.showiv = 0;
+       rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+
+       /* Enable Flexible Descriptors in the queue context which
+        * allows this driver to select a specific receive descriptor format
+        */
+       regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+               QRXFLXP_CNTXT_RXDID_IDX_M;
+
+       /* increasing context priority to pick up profile ID;
+        * default is 0x01; setting to 0x03 to ensure profile
+        * is programming if prev context is of same priority
+        */
+       regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+               QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+       ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
+
+       err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
+                           rxq->queue_id);
+               return -EINVAL;
+       }
+       err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
+                           rxq->queue_id);
+               return -EINVAL;
+       }
+
+       rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
+
+       /* Init the Rx tail register*/
+       ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+       return 0;
+}
+
+int
+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ice_rx_queue *rxq;
+       int err;
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       rxq = pf->fdir.rxq;
+       if (!rxq || !rxq->q_set) {
+               PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
+                           rx_queue_id);
+               return -EINVAL;
+       }
+
+       err = ice_fdir_program_hw_rx_queue(rxq);
+       if (err) {
+               PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
+                           rx_queue_id);
+               return -EIO;
+       }
+
+       /* Init the RX tail register. */
+       ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+       err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
+                           rx_queue_id);
+
+               ice_reset_rx_queue(rxq);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int
+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct ice_tx_queue *txq;
+       int err;
+       struct ice_vsi *vsi;
+       struct ice_hw *hw;
+       struct ice_aqc_add_tx_qgrp txq_elem;
+       struct ice_tlan_ctx tx_ctx;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = pf->fdir.txq;
+       if (!txq || !txq->q_set) {
+               PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
+                           tx_queue_id);
+               return -EINVAL;
+       }
+
+       vsi = txq->vsi;
+       hw = ICE_VSI_TO_HW(vsi);
+
+       memset(&txq_elem, 0, sizeof(txq_elem));
+       memset(&tx_ctx, 0, sizeof(tx_ctx));
+       txq_elem.num_txqs = 1;
+       txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+
+       tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+       tx_ctx.qlen = txq->nb_tx_desc;
+       tx_ctx.pf_num = hw->pf_id;
+       tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+       tx_ctx.src_vsi = vsi->vsi_id;
+       tx_ctx.port_num = hw->port_info->lport;
+       tx_ctx.tso_ena = 1; /* tso enable */
+       tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
+       tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+
+       ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+                   ice_tlan_ctx_info);
+
+       txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
+
+       /* Init the Tx tail register*/
+       ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+       /* Fix me, we assume TC always 0 here */
+       err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
+                             &txq_elem, sizeof(txq_elem), NULL);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+               return -EIO;
+       }
+       /* store the schedule node id */
+       txq->q_teid = txq_elem.txqs[0].q_teid;
+
+       return 0;
+}
+
 /* Free all mbufs for descriptors in tx queue */
 static void
 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
@@ -616,6 +789,63 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        return 0;
 }
 
+int
+ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ice_rx_queue *rxq;
+       int err;
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+       rxq = pf->fdir.rxq;
+
+       err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
+                           rx_queue_id);
+               return -EINVAL;
+       }
+       ice_rx_queue_release_mbufs(rxq);
+
+       return 0;
+}
+
+int
+ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ice_tx_queue *txq;
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct ice_vsi *vsi = pf->main_vsi;
+       enum ice_status status;
+       uint16_t q_ids[1];
+       uint32_t q_teids[1];
+       uint16_t q_handle = tx_queue_id;
+
+       txq = pf->fdir.txq;
+       if (!txq) {
+               PMD_DRV_LOG(ERR, "TX queue %u is not available",
+                           tx_queue_id);
+               return -EINVAL;
+       }
+       vsi = txq->vsi;
+
+       q_ids[0] = txq->reg_idx;
+       q_teids[0] = txq->q_teid;
+
+       /* Fix me, we assume TC always 0 here */
+       status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
+                                q_ids, q_teids, ICE_NO_RESET, 0, NULL);
+       if (status != ICE_SUCCESS) {
+               PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
+               return -EINVAL;
+       }
+
+       ice_tx_queue_release_mbufs(txq);
+
+       return 0;
+}
+
 int
 ice_rx_queue_setup(struct rte_eth_dev *dev,
                   uint16_t queue_idx,
@@ -1131,6 +1361,11 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
                xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
                xtr->magic = PROTO_XTR_MAGIC_ID;
        }
+
+       if (desc->flow_id != 0xFFFFFFFF) {
+               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+       }
 #endif
 }
 
@@ -1684,6 +1919,128 @@ ice_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_tx_queues = 0;
 }
 
+#define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
+#define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
+
+int
+ice_fdir_setup_tx_resources(struct ice_pf *pf)
+{
+       struct ice_tx_queue *txq;
+       const struct rte_memzone *tz = NULL;
+       uint32_t ring_size;
+       struct rte_eth_dev *dev;
+
+       if (!pf) {
+               PMD_DRV_LOG(ERR, "PF is not available");
+               return -EINVAL;
+       }
+
+       dev = pf->adapter->eth_dev;
+
+       /* Allocate the TX queue data structure. */
+       txq = rte_zmalloc_socket("ice fdir tx queue",
+                                sizeof(struct ice_tx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                SOCKET_ID_ANY);
+       if (!txq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+                           "tx queue structure.");
+               return -ENOMEM;
+       }
+
+       /* Allocate TX hardware ring descriptors. */
+       ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
+       ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+       tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+                                     ICE_FDIR_QUEUE_ID, ring_size,
+                                     ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+       if (!tz) {
+               ice_tx_queue_release(txq);
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+               return -ENOMEM;
+       }
+
+       txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
+       txq->queue_id = ICE_FDIR_QUEUE_ID;
+       txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+       txq->vsi = pf->fdir.fdir_vsi;
+
+       txq->tx_ring_dma = tz->iova;
+       txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+       /*
+        * don't need to allocate software ring and reset for the fdir
+        * program queue just set the queue has been configured.
+        */
+       txq->q_set = TRUE;
+       pf->fdir.txq = txq;
+
+       txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
+
+       return ICE_SUCCESS;
+}
+
+int
+ice_fdir_setup_rx_resources(struct ice_pf *pf)
+{
+       struct ice_rx_queue *rxq;
+       const struct rte_memzone *rz = NULL;
+       uint32_t ring_size;
+       struct rte_eth_dev *dev;
+
+       if (!pf) {
+               PMD_DRV_LOG(ERR, "PF is not available");
+               return -EINVAL;
+       }
+
+       dev = pf->adapter->eth_dev;
+
+       /* Allocate the RX queue data structure. */
+       rxq = rte_zmalloc_socket("ice fdir rx queue",
+                                sizeof(struct ice_rx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                SOCKET_ID_ANY);
+       if (!rxq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+                           "rx queue structure.");
+               return -ENOMEM;
+       }
+
+       /* Allocate RX hardware ring descriptors. */
+       ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
+       ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+       rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+                                     ICE_FDIR_QUEUE_ID, ring_size,
+                                     ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+       if (!rz) {
+               ice_rx_queue_release(rxq);
+               PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+               return -ENOMEM;
+       }
+
+       rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
+       rxq->queue_id = ICE_FDIR_QUEUE_ID;
+       rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+       rxq->vsi = pf->fdir.fdir_vsi;
+
+       rxq->rx_ring_dma = rz->iova;
+       memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
+              sizeof(union ice_rx_flex_desc));
+       rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
+
+       /*
+        * Don't need to allocate software ring and reset for the fdir
+        * rx queue, just set the queue has been configured.
+        */
+       rxq->q_set = TRUE;
+       pf->fdir.rxq = rxq;
+
+       rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
+
+       return ICE_SUCCESS;
+}
+
 uint16_t
 ice_recv_pkts(void *rx_queue,
              struct rte_mbuf **rx_pkts,
@@ -3180,3 +3537,49 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)
        for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
                ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
 }
+
+#define ICE_FDIR_MAX_WAIT_US 10000
+
+int
+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
+{
+       struct ice_tx_queue *txq = pf->fdir.txq;
+       volatile struct ice_fltr_desc *fdirdp;
+       volatile struct ice_tx_desc *txdp;
+       uint32_t td_cmd;
+       uint16_t i;
+
+       fdirdp = (volatile struct ice_fltr_desc *)
+               (&txq->tx_ring[txq->tx_tail]);
+       fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
+       fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
+
+       txdp = &txq->tx_ring[txq->tx_tail + 1];
+       txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+       td_cmd = ICE_TX_DESC_CMD_EOP |
+               ICE_TX_DESC_CMD_RS  |
+               ICE_TX_DESC_CMD_DUMMY;
+
+       txdp->cmd_type_offset_bsz =
+               ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
+
+       txq->tx_tail += 2;
+       if (txq->tx_tail >= txq->nb_tx_desc)
+               txq->tx_tail = 0;
+       /* Update the tx tail register */
+       ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+       for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
+               if ((txdp->cmd_type_offset_bsz &
+                    rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+                   rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+                       break;
+               rte_delay_us(1);
+       }
+       if (i >= ICE_FDIR_MAX_WAIT_US) {
+               PMD_DRV_LOG(ERR,
+                           "Failed to program FDIR filter: time out to get DD on tx queue.");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
index 31c53d5..5de6189 100644 (file)
@@ -37,6 +37,8 @@
 #define ICE_TX_MAX_FREE_BUF_SZ      64
 #define ICE_DESCS_PER_LOOP          4
 
+#define ICE_FDIR_PKT_LEN       512
+
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 
@@ -149,10 +151,16 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void ice_rx_queue_release(void *rxq);
 void ice_tx_queue_release(void *txq);
 void ice_clear_queues(struct rte_eth_dev *dev);
 void ice_free_queues(struct rte_eth_dev *dev);
+int ice_fdir_setup_tx_resources(struct ice_pf *pf);
+int ice_fdir_setup_rx_resources(struct ice_pf *pf);
 uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts);
 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -194,4 +202,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
                                          uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 #endif /* _ICE_RXTX_H_ */
index 6828170..908a2ca 100644 (file)
@@ -10,7 +10,8 @@ sources = files(
        'ice_ethdev.c',
        'ice_rxtx.c',
        'ice_switch_filter.c',
-       'ice_generic_flow.c'
+       'ice_generic_flow.c',
+       'ice_fdir_filter.c'
        )
 
 deps += ['hash']