net/ice: initialize and update RSS based on user config
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index b29cb45..7db8b35 100644 (file)
 /* devargs */
 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
+#define ICE_FLOW_MARK_SUPPORT_ARG      "flow-mark-support"
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 
 static const char * const ice_valid_args[] = {
        ICE_SAFE_MODE_SUPPORT_ARG,
        ICE_PIPELINE_MODE_SUPPORT_ARG,
+       ICE_FLOW_MARK_SUPPORT_ARG,
        ICE_PROTO_XTR_ARG,
        NULL
 };
@@ -65,15 +67,8 @@ static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
 
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
 
-/* DDP package search path */
-#define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
-#define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
-#define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
-#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
-
 #define ICE_OS_DEFAULT_PKG_NAME                "ICE OS Default Package"
 #define ICE_COMMS_PKG_NAME                     "ICE COMMS Package"
-#define ICE_MAX_PKG_FILENAME_SIZE   256
 #define ICE_MAX_RES_DESC_NUM        1024
 
 int ice_logtype_init;
@@ -161,6 +156,16 @@ static const struct rte_pci_id pci_id_ice_map[] = {
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -218,6 +223,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .filter_ctrl                  = ice_dev_filter_ctrl,
        .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
+       .tx_done_cleanup              = ice_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -868,7 +874,7 @@ ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
                ret = -ENOMEM;
                goto DONE;
        }
-       rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
+       rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
        TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
        vsi->mac_num++;
 
@@ -1571,7 +1577,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
                cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
                        ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
                vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
-               cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+               cfg = ICE_AQ_VSI_FD_ENABLE;
                vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
                vsi_ctx.info.max_fd_fltr_dedicated =
                        rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
@@ -1599,9 +1605,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 
                cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
                vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
-               cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+               cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
                vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
                vsi_ctx.info.sw_id = hw->port_info->sw_id;
+               vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
                ret = ice_vsi_config_tc_queue_mapping(vsi,
                                                      &vsi_ctx.info,
                                                      ICE_DEFAULT_TCMAP);
@@ -1655,16 +1662,16 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
 
        if (type == ICE_VSI_PF) {
                /* MAC configuration */
-               rte_memcpy(pf->dev_addr.addr_bytes,
-                          hw->port_info->mac.perm_addr,
-                          ETH_ADDR_LEN);
+               rte_ether_addr_copy((struct rte_ether_addr *)
+                                       hw->port_info->mac.perm_addr,
+                                   &pf->dev_addr);
 
-               rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
+               rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
                ret = ice_add_mac_filter(vsi, &mac_addr);
                if (ret != ICE_SUCCESS)
                        PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
 
-               rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
+               rte_ether_addr_copy(&broadcast, &mac_addr);
                ret = ice_add_mac_filter(vsi, &mac_addr);
                if (ret != ICE_SUCCESS)
                        PMD_INIT_LOG(ERR, "Failed to add MAC filter");
@@ -1711,7 +1718,7 @@ ice_pf_setup(struct ice_pf *pf)
        uint16_t unused;
 
        /* Clear all stats counters */
-       pf->offset_loaded = FALSE;
+       pf->offset_loaded = false;
        memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
        memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
        memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
@@ -1824,7 +1831,7 @@ fail_dsn:
        return 0;
 }
 
-static enum ice_pkg_type
+enum ice_pkg_type
 ice_load_pkg_type(struct ice_hw *hw)
 {
        enum ice_pkg_type package_type;
@@ -1987,6 +1994,13 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 
        ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
                                 &parse_bool, &ad->devargs.pipe_mode_support);
+       if (ret)
+               goto bail;
+
+       ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
+                                &parse_bool, &ad->devargs.flow_mark_support);
+       if (ret)
+               goto bail;
 
 bail:
        rte_kvargs_free(kvlist);
@@ -2218,16 +2232,16 @@ ice_dev_init(struct rte_eth_dev *dev)
        vsi = pf->main_vsi;
 
        /* Disable double vlan by default */
-       ice_vsi_config_double_vlan(vsi, FALSE);
+       ice_vsi_config_double_vlan(vsi, false);
 
-       ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL);
+       ret = ice_aq_stop_lldp(hw, true, false, NULL);
        if (ret != ICE_SUCCESS)
                PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
-       ret = ice_init_dcb(hw, TRUE);
+       ret = ice_init_dcb(hw, true);
        if (ret != ICE_SUCCESS)
                PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
        /* Forward LLDP packets to default VSI */
-       ret = ice_vsi_config_sw_lldp(vsi, TRUE);
+       ret = ice_vsi_config_sw_lldp(vsi, true);
        if (ret != ICE_SUCCESS)
                PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
        /* register callback func to eal lib */
@@ -2242,10 +2256,12 @@ ice_dev_init(struct rte_eth_dev *dev)
        /* get base queue pairs index  in the device */
        ice_base_queue_get(pf);
 
-       ret = ice_flow_init(ad);
-       if (ret) {
-               PMD_INIT_LOG(ERR, "Failed to initialize flow");
-               return ret;
+       if (!ad->is_safe_mode) {
+               ret = ice_flow_init(ad);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "Failed to initialize flow");
+                       return ret;
+               }
        }
 
        ret = ice_reset_fxp_resource(hw);
@@ -2299,7 +2315,7 @@ ice_release_vsi(struct ice_vsi *vsi)
        return 0;
 }
 
-static void
+void
 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2352,13 +2368,10 @@ ice_dev_stop(struct rte_eth_dev *dev)
        /* disable all queue interrupts */
        ice_vsi_disable_queues_intr(main_vsi);
 
-       if (pf->fdir.fdir_vsi)
-               ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
-
-       /* Clear all queues and release mbufs */
-       ice_clear_queues(dev);
-
-       ice_dev_set_link_down(dev);
+       if (pf->init_link_up)
+               ice_dev_set_link_up(dev);
+       else
+               ice_dev_set_link_down(dev);
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
@@ -2389,7 +2402,8 @@ ice_dev_close(struct rte_eth_dev *dev)
 
        ice_dev_stop(dev);
 
-       ice_flow_uninit(ad);
+       if (!ad->is_safe_mode)
+               ice_flow_uninit(ad);
 
        /* release all queue resource */
        ice_free_queues(dev);
@@ -2427,21 +2441,98 @@ ice_dev_uninit(struct rte_eth_dev *dev)
        return 0;
 }
 
-static int
-ice_dev_configure(struct rte_eth_dev *dev)
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 {
-       struct ice_adapter *ad =
-               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
-       /* Initialize to TRUE. If any of Rx queues doesn't meet the
-        * bulk allocation or vector Rx preconditions we will reset it.
-        */
-       ad->rx_bulk_alloc_allowed = true;
-       ad->tx_simple_allowed = true;
-
-       dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       struct ice_hw *hw = ICE_PF_TO_HW(pf);
+       struct ice_vsi *vsi = pf->main_vsi;
+       int ret;
 
-       return 0;
+       /* Configure RSS for IPv4 with src/dst addr as input set */
+       if (rss_hf & ETH_RSS_IPV4) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+                                     ICE_FLOW_SEG_HDR_IPV4 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for IPv6 with src/dst addr as input set */
+       if (rss_hf & ETH_RSS_IPV6) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+                                     ICE_FLOW_SEG_HDR_IPV6 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for udp4 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+                                     ICE_FLOW_SEG_HDR_UDP |
+                                     ICE_FLOW_SEG_HDR_IPV4 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for udp6 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+                                     ICE_FLOW_SEG_HDR_UDP |
+                                     ICE_FLOW_SEG_HDR_IPV6 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for tcp4 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+                                     ICE_FLOW_SEG_HDR_TCP |
+                                     ICE_FLOW_SEG_HDR_IPV4 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for tcp6 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+                                     ICE_FLOW_SEG_HDR_TCP |
+                                     ICE_FLOW_SEG_HDR_IPV6 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for sctp4 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+                                     ICE_FLOW_SEG_HDR_SCTP |
+                                     ICE_FLOW_SEG_HDR_IPV4 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+                                   __func__, ret);
+       }
+
+       /* Configure RSS for sctp6 with src/dst addr and port as input set */
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+               ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+                                     ICE_FLOW_SEG_HDR_SCTP |
+                                     ICE_FLOW_SEG_HDR_IPV6 |
+                                     ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+               if (ret)
+                       PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+                                   __func__, ret);
+       }
 }
 
 static int ice_init_rss(struct ice_pf *pf)
@@ -2504,72 +2595,34 @@ static int ice_init_rss(struct ice_pf *pf)
                (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
        ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
 
-       /* configure RSS for IPv4 with input set IPv4 src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
-                             ICE_FLOW_SEG_HDR_IPV4, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
-
-       /* configure RSS for IPv6 with input set IPv6 src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
-                             ICE_FLOW_SEG_HDR_IPV6, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
-
-       /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
-                             ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
-
-       /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
-                             ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
-
-       /* configure RSS for sctp6 with input set IPv6 src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
-                             ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
-                               __func__, ret);
+       /* RSS hash configuration */
+       ice_rss_hash_set(pf, rss_conf->rss_hf);
 
-       /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
-                             ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
+       return 0;
+}
 
-       /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
-                             ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
+static int
+ice_dev_configure(struct rte_eth_dev *dev)
+{
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       int ret;
 
-       /* configure RSS for sctp4 with input set IP src/dst */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
-                             ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
-                               __func__, ret);
+       /* Initialize to TRUE. If any of Rx queues doesn't meet the
+        * bulk allocation or vector Rx preconditions we will reset it.
+        */
+       ad->rx_bulk_alloc_allowed = true;
+       ad->tx_simple_allowed = true;
 
-       /* configure RSS for gtpu with input set TEID */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
-                               ICE_FLOW_SEG_HDR_GTPU_IP, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
-                               __func__, ret);
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 
-       /**
-        * configure RSS for pppoe/pppod with input set
-        * Source MAC and Session ID
-        */
-       ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
-                               ICE_FLOW_SEG_HDR_PPPOE, 0);
-       if (ret)
-               PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
-                               __func__, ret);
+       ret = ice_init_rss(pf);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+               return ret;
+       }
 
        return 0;
 }
@@ -2585,9 +2638,9 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
        for (i = 0; i < nb_queue; i++) {
                /*do actual bind*/
                val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
-                     (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
+                     (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
                val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
-                        (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
+                        (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
 
                PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
                            base_queue + i, msix_vect);
@@ -2598,7 +2651,7 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
        }
 }
 
-static void
+void
 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2651,7 +2704,7 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
        }
 }
 
-static void
+void
 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2721,17 +2774,32 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
        /* Enable interrupts for all the queues */
        ice_vsi_enable_queues_intr(vsi);
 
-       /* Enable FDIR MSIX interrupt */
-       if (pf->fdir.fdir_vsi) {
-               ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
-               ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
-       }
-
        rte_intr_enable(intr_handle);
 
        return 0;
 }
 
+static void
+ice_get_init_link_status(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+       struct ice_link_status link_status;
+       int ret;
+
+       ret = ice_aq_get_link_info(hw->port_info, enable_lse,
+                                  &link_status, NULL);
+       if (ret != ICE_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to get link info");
+               pf->init_link_up = false;
+               return;
+       }
+
+       if (link_status.link_info & ICE_AQ_LINK_UP)
+               pf->init_link_up = true;
+}
+
 static int
 ice_dev_start(struct rte_eth_dev *dev)
 {
@@ -2762,12 +2830,6 @@ ice_dev_start(struct rte_eth_dev *dev)
                }
        }
 
-       ret = ice_init_rss(pf);
-       if (ret) {
-               PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
-               goto rx_err;
-       }
-
        ice_set_rx_function(dev);
        ice_set_tx_function(dev);
 
@@ -2802,6 +2864,8 @@ ice_dev_start(struct rte_eth_dev *dev)
        if (ret != ICE_SUCCESS)
                PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
+       ice_get_init_link_status(dev);
+
        ice_dev_set_link_up(dev);
 
        /* Call get_link_info aq commond to enable/disable LSE */
@@ -3136,7 +3200,7 @@ ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
        cfg.phy_type_low = pcaps->phy_type_low;
        cfg.phy_type_high = pcaps->phy_type_high;
        cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
-       cfg.low_power_ctrl = pcaps->low_power_ctrl;
+       cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
        cfg.eee_cap = pcaps->eee_cap;
        cfg.eeer_value = pcaps->eeer_value;
        cfg.link_fec_opt = pcaps->link_fec_options;
@@ -3234,7 +3298,7 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,
                PMD_DRV_LOG(ERR, "Failed to add mac filter");
                return -EIO;
        }
-       memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+       rte_ether_addr_copy(mac_addr, &pf->dev_addr);
 
        flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
        ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
@@ -3412,23 +3476,23 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        rxmode = &dev->data->dev_conf.rxmode;
        if (mask & ETH_VLAN_FILTER_MASK) {
                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-                       ice_vsi_config_vlan_filter(vsi, TRUE);
+                       ice_vsi_config_vlan_filter(vsi, true);
                else
-                       ice_vsi_config_vlan_filter(vsi, FALSE);
+                       ice_vsi_config_vlan_filter(vsi, false);
        }
 
        if (mask & ETH_VLAN_STRIP_MASK) {
                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-                       ice_vsi_config_vlan_stripping(vsi, TRUE);
+                       ice_vsi_config_vlan_stripping(vsi, true);
                else
-                       ice_vsi_config_vlan_stripping(vsi, FALSE);
+                       ice_vsi_config_vlan_stripping(vsi, false);
        }
 
        if (mask & ETH_VLAN_EXTEND_MASK) {
                if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
-                       ice_vsi_config_double_vlan(vsi, TRUE);
+                       ice_vsi_config_double_vlan(vsi, true);
                else
-                       ice_vsi_config_double_vlan(vsi, FALSE);
+                       ice_vsi_config_double_vlan(vsi, false);
        }
 
        return 0;
@@ -3653,7 +3717,12 @@ ice_rss_hash_update(struct rte_eth_dev *dev,
        if (status)
                return status;
 
-       /* TODO: hash enable config, ice_add_rss_cfg */
+       if (rss_conf->rss_hf == 0)
+               return 0;
+
+       /* RSS hash configuration */
+       ice_rss_hash_set(pf, rss_conf->rss_hf);
+
        return 0;
 }
 
@@ -3812,21 +3881,19 @@ static int
 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 {
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       u32 full_ver;
        u8 ver, patch;
        u16 build;
        int ret;
 
-       full_ver = hw->nvm.oem_ver;
-       ver = (u8)(full_ver >> 24);
-       build = (u16)((full_ver >> 8) & 0xffff);
-       patch = (u8)(full_ver & 0xff);
+       ver = hw->nvm.orom.major;
+       patch = hw->nvm.orom.patch;
+       build = hw->nvm.orom.build;
 
        ret = snprintf(fw_version, fw_size,
-                       "%d.%d%d 0x%08x %d.%d.%d",
-                       ((hw->nvm.ver >> 12) & 0xf),
-                       ((hw->nvm.ver >> 4) & 0xff),
-                       (hw->nvm.ver & 0xf), hw->nvm.eetrack,
+                       "%d.%d 0x%08x %d.%d.%d",
+                       hw->nvm.major_ver,
+                       hw->nvm.minor_ver,
+                       hw->nvm.eetrack,
                        ver, build, patch);
 
        /* add the size of '\0' */
@@ -4541,7 +4608,8 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
                              ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
                              ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
-                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
+                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
+                             ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
 
 RTE_INIT(ice_init_log)
 {