build: explicitly enable sse4 for meson
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index febcdfe..de189da 100644 (file)
 /* devargs */
 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
+#define ICE_FLOW_MARK_SUPPORT_ARG      "flow-mark-support"
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 
 static const char * const ice_valid_args[] = {
        ICE_SAFE_MODE_SUPPORT_ARG,
        ICE_PIPELINE_MODE_SUPPORT_ARG,
+       ICE_FLOW_MARK_SUPPORT_ARG,
        ICE_PROTO_XTR_ARG,
        NULL
 };
@@ -1987,6 +1989,13 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 
        ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
                                 &parse_bool, &ad->devargs.pipe_mode_support);
+       if (ret)
+               goto bail;
+
+       ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
+                                &parse_bool, &ad->devargs.flow_mark_support);
+       if (ret)
+               goto bail;
 
 bail:
        rte_kvargs_free(kvlist);
@@ -2242,10 +2251,12 @@ ice_dev_init(struct rte_eth_dev *dev)
        /* get base queue pairs index  in the device */
        ice_base_queue_get(pf);
 
-       ret = ice_flow_init(ad);
-       if (ret) {
-               PMD_INIT_LOG(ERR, "Failed to initialize flow");
-               return ret;
+       if (!ad->is_safe_mode) {
+               ret = ice_flow_init(ad);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "Failed to initialize flow");
+                       return ret;
+               }
        }
 
        ret = ice_reset_fxp_resource(hw);
@@ -2299,7 +2310,7 @@ ice_release_vsi(struct ice_vsi *vsi)
        return 0;
 }
 
-static void
+void
 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2352,13 +2363,13 @@ ice_dev_stop(struct rte_eth_dev *dev)
        /* disable all queue interrupts */
        ice_vsi_disable_queues_intr(main_vsi);
 
-       if (pf->fdir.fdir_vsi)
-               ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
-
        /* Clear all queues and release mbufs */
        ice_clear_queues(dev);
 
-       ice_dev_set_link_down(dev);
+       if (pf->init_link_up)
+               ice_dev_set_link_up(dev);
+       else
+               ice_dev_set_link_down(dev);
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
@@ -2389,7 +2400,8 @@ ice_dev_close(struct rte_eth_dev *dev)
 
        ice_dev_stop(dev);
 
-       ice_flow_uninit(ad);
+       if (!ad->is_safe_mode)
+               ice_flow_uninit(ad);
 
        /* release all queue resource */
        ice_free_queues(dev);
@@ -2439,6 +2451,9 @@ ice_dev_configure(struct rte_eth_dev *dev)
        ad->rx_bulk_alloc_allowed = true;
        ad->tx_simple_allowed = true;
 
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
        return 0;
 }
 
@@ -2596,7 +2611,7 @@ __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
        }
 }
 
-static void
+void
 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2649,7 +2664,7 @@ ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
        }
 }
 
-static void
+void
 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
 {
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
@@ -2719,17 +2734,32 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
        /* Enable interrupts for all the queues */
        ice_vsi_enable_queues_intr(vsi);
 
-       /* Enable FDIR MSIX interrupt */
-       if (pf->fdir.fdir_vsi) {
-               ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
-               ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
-       }
-
        rte_intr_enable(intr_handle);
 
        return 0;
 }
 
+static void
+ice_get_init_link_status(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+       struct ice_link_status link_status;
+       int ret;
+
+       ret = ice_aq_get_link_info(hw->port_info, enable_lse,
+                                  &link_status, NULL);
+       if (ret != ICE_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to get link info");
+               pf->init_link_up = false;
+               return;
+       }
+
+       if (link_status.link_info & ICE_AQ_LINK_UP)
+               pf->init_link_up = true;
+}
+
 static int
 ice_dev_start(struct rte_eth_dev *dev)
 {
@@ -2800,6 +2830,8 @@ ice_dev_start(struct rte_eth_dev *dev)
        if (ret != ICE_SUCCESS)
                PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
+       ice_get_init_link_status(dev);
+
        ice_dev_set_link_up(dev);
 
        /* Call get_link_info aq commond to enable/disable LSE */
@@ -2891,7 +2923,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                        DEV_RX_OFFLOAD_TCP_CKSUM |
                        DEV_RX_OFFLOAD_QINQ_STRIP |
                        DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_VLAN_EXTEND;
+                       DEV_RX_OFFLOAD_VLAN_EXTEND |
+                       DEV_RX_OFFLOAD_RSS_HASH;
                dev_info->tx_offload_capa |=
                        DEV_TX_OFFLOAD_QINQ_INSERT |
                        DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -4538,7 +4571,8 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
                              ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
                              ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
-                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
+                             ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
+                             ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
 
 RTE_INIT(ice_init_log)
 {