common/cnxk: add lower bound check for SSO resources
[dpdk.git] / drivers / net / ice / ice_ethdev.c
index 6d85e42..4c658ce 100644 (file)
@@ -196,9 +196,27 @@ static const struct rte_pci_id pci_id_ice_map[] = {
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E824S) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_1GBE) },
+       { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825X) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
+static int
+ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+               void *arg)
+{
+       if (!arg)
+               return -EINVAL;
+
+       *(const void **)arg = &ice_tm_ops;
+
+       return 0;
+}
+
 static const struct eth_dev_ops ice_eth_dev_ops = {
        .dev_configure                = ice_dev_configure,
        .dev_start                    = ice_dev_start,
@@ -261,6 +279,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .timesync_read_time           = ice_timesync_read_time,
        .timesync_write_time          = ice_timesync_write_time,
        .timesync_disable             = ice_timesync_disable,
+       .tm_ops_get                   = ice_tm_ops_get,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -802,7 +821,7 @@ ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
                                struct ice_aqc_vsi_props *info,
                                uint8_t enabled_tcmap)
 {
-       uint16_t bsf, qp_idx;
+       uint16_t fls, qp_idx;
 
        /* default tc 0 now. Multi-TC supporting need to be done later.
         * Configure TC and queue mapping parameters, for enabled TC,
@@ -813,16 +832,32 @@ ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
                return -ENOTSUP;
        }
 
-       vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
-       bsf = rte_bsf32(vsi->nb_qps);
-       /* Adjust the queue number to actual queues that can be applied */
-       vsi->nb_qps = 0x1 << bsf;
+       /* vector 0 is reserved and 1 vector for ctrl vsi */
+       if (vsi->adapter->hw.func_caps.common_cap.num_msix_vectors < 2)
+               vsi->nb_qps = 0;
+       else
+               vsi->nb_qps = RTE_MIN
+                       ((uint16_t)vsi->adapter->hw.func_caps.common_cap.num_msix_vectors - 2,
+                       RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC));
+
+       /* nb_qps(hex)  -> fls */
+       /* 0000         -> 0 */
+       /* 0001         -> 0 */
+       /* 0002         -> 1 */
+       /* 0003 ~ 0004  -> 2 */
+       /* 0005 ~ 0008  -> 3 */
+       /* 0009 ~ 0010  -> 4 */
+       /* 0011 ~ 0020  -> 5 */
+       /* 0021 ~ 0040  -> 6 */
+       /* 0041 ~ 0080  -> 7 */
+       /* 0081 ~ 0100  -> 8 */
+       fls = (vsi->nb_qps == 0) ? 0 : rte_fls_u32(vsi->nb_qps - 1);
 
        qp_idx = 0;
        /* Set tc and queue mapping with VSI */
        info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
                                                ICE_AQ_VSI_TC_Q_OFFSET_S) |
-                                              (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
+                                              (fls << ICE_AQ_VSI_TC_Q_NUM_S));
 
        /* Associate queue number with VSI */
        info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
@@ -2306,6 +2341,9 @@ ice_dev_init(struct rte_eth_dev *dev)
        /* Initialize RSS context for gtpu_eh */
        ice_rss_ctx_init(pf);
 
+       /* Initialize TM configuration */
+       ice_tm_conf_init(dev);
+
        if (!ad->is_safe_mode) {
                ret = ice_flow_init(ad);
                if (ret) {
@@ -2486,6 +2524,9 @@ ice_dev_close(struct rte_eth_dev *dev)
        rte_free(pf->proto_xtr);
        pf->proto_xtr = NULL;
 
+       /* Uninit TM configuration */
+       ice_tm_conf_uninit(dev);
+
        if (ad->devargs.pps_out_ena) {
                ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
                ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
@@ -3241,7 +3282,8 @@ static int ice_init_rss(struct ice_pf *pf)
                           RTE_MIN(rss_conf->rss_key_len,
                                   vsi->rss_key_size));
 
-       rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
+       rte_memcpy(key.standard_rss_key, vsi->rss_key,
+               RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size));
        ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
        if (ret)
                goto out;
@@ -3610,7 +3652,7 @@ ice_dev_start(struct rte_eth_dev *dev)
        ice_dev_set_link_up(dev);
 
        /* Call get_link_info aq command to enable/disable LSE */
-       ice_link_update(dev, 0);
+       ice_link_update(dev, 1);
 
        pf->adapter_stopped = false;
 
@@ -5016,14 +5058,14 @@ ice_get_module_eeprom(struct rte_eth_dev *dev,
 #define I2C_USLEEP_MAX_TIME 2500
        uint8_t value[SFF_READ_BLOCK_SIZE] = {0};
        uint8_t addr = ICE_I2C_EEPROM_DEV_ADDR;
-       uint8_t *data = info->data;
+       uint8_t *data = NULL;
        enum ice_status status;
        bool is_sfp = false;
        uint32_t i, j;
        uint32_t offset = 0;
        uint8_t page = 0;
 
-       if (!info || !info->length || !data)
+       if (!info || !info->length || !info->data)
                return -EINVAL;
 
        status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
@@ -5034,6 +5076,7 @@ ice_get_module_eeprom(struct rte_eth_dev *dev,
        if (value[0] == ICE_MODULE_TYPE_SFP)
                is_sfp = true;
 
+       data = info->data;
        memset(data, 0, info->length);
        for (i = 0; i < info->length; i += SFF_READ_BLOCK_SIZE) {
                offset = i + info->offset;
@@ -5614,6 +5657,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 {
        int ret = 0;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
        if (udp_tunnel == NULL)
                return -EINVAL;
@@ -5621,6 +5666,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
        switch (udp_tunnel->prot_type) {
        case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+               if (!ret && ad->psr != NULL)
+                       ice_parser_vxlan_tunnel_set(ad->psr,
+                                       udp_tunnel->udp_port, true);
                break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
@@ -5638,6 +5686,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 {
        int ret = 0;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
        if (udp_tunnel == NULL)
                return -EINVAL;
@@ -5645,6 +5695,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
        switch (udp_tunnel->prot_type) {
        case RTE_ETH_TUNNEL_TYPE_VXLAN:
                ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+               if (!ret && ad->psr != NULL)
+                       ice_parser_vxlan_tunnel_set(ad->psr,
+                                       udp_tunnel->udp_port, false);
                break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");