net/hns3: fix return value for unsupported tuple
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 293df88..7ffd7e7 100644 (file)
@@ -386,6 +386,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
                                      struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
        const struct rte_eth_ethertype_filter *input,
@@ -1440,7 +1441,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
        }
        i40e_set_default_ptype_table(dev);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       intr_handle = &pci_dev->intr_handle;
+       intr_handle = pci_dev->intr_handle;
 
        rte_eth_copy_pci_info(dev, pci_dev);
 
@@ -1709,11 +1710,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
         */
        i40e_add_tx_flow_control_drop_filter(pf);
 
-       /* Set the max frame size to 0x2600 by default,
-        * in case other drivers changed the default value.
-        */
-       i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
-
        /* initialize RSS rule list */
        TAILQ_INIT(&pf->rss_config_list);
 
@@ -1781,10 +1777,8 @@ i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
 
        ethertype_rule = &pf->ethertype;
        /* Remove all ethertype filter rules and hash */
-       if (ethertype_rule->hash_map)
-               rte_free(ethertype_rule->hash_map);
-       if (ethertype_rule->hash_table)
-               rte_hash_free(ethertype_rule->hash_table);
+       rte_free(ethertype_rule->hash_map);
+       rte_hash_free(ethertype_rule->hash_table);
 
        while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
                TAILQ_REMOVE(&ethertype_rule->ethertype_list,
@@ -1801,10 +1795,8 @@ i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
 
        tunnel_rule = &pf->tunnel;
        /* Remove all tunnel director rules and hash */
-       if (tunnel_rule->hash_map)
-               rte_free(tunnel_rule->hash_map);
-       if (tunnel_rule->hash_table)
-               rte_hash_free(tunnel_rule->hash_table);
+       rte_free(tunnel_rule->hash_map);
+       rte_hash_free(tunnel_rule->hash_table);
 
        while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
                TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
@@ -1833,16 +1825,11 @@ i40e_fdir_memory_cleanup(struct i40e_pf *pf)
        fdir_info = &pf->fdir;
 
        /* flow director memory cleanup */
-       if (fdir_info->hash_map)
-               rte_free(fdir_info->hash_map);
-       if (fdir_info->hash_table)
-               rte_hash_free(fdir_info->hash_table);
-       if (fdir_info->fdir_flow_pool.bitmap)
-               rte_free(fdir_info->fdir_flow_pool.bitmap);
-       if (fdir_info->fdir_flow_pool.pool)
-               rte_free(fdir_info->fdir_flow_pool.pool);
-       if (fdir_info->fdir_filter_array)
-               rte_free(fdir_info->fdir_filter_array);
+       rte_free(fdir_info->hash_map);
+       rte_hash_free(fdir_info->hash_table);
+       rte_free(fdir_info->fdir_flow_pool.bitmap);
+       rte_free(fdir_info->fdir_flow_pool.pool);
+       rte_free(fdir_info->fdir_filter_array);
 }
 
 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
@@ -1922,7 +1909,7 @@ i40e_dev_configure(struct rte_eth_dev *dev)
                goto err;
 
        /* VMDQ setup.
-        *  General PMD driver call sequence are NIC init, configure,
+        *  General PMD call sequence are NIC init, configure,
         *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
         *  will try to lookup the VSI that specific queue belongs to if VMDQ
         *  applicable. So, VMDQ setting has to be done before
@@ -1972,7 +1959,7 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
        uint16_t i;
@@ -2088,10 +2075,11 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        uint16_t msix_vect = vsi->msix_intr;
-       uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+       uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+                                  rte_intr_nb_efd_get(intr_handle));
        uint16_t queue_idx = 0;
        int record = 0;
        int i;
@@ -2141,8 +2129,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
                                               vsi->nb_used_qps - i,
                                               itr_idx);
                        for (; !!record && i < vsi->nb_used_qps; i++)
-                               intr_handle->intr_vec[queue_idx + i] =
-                                       msix_vect;
+                               rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect);
                        break;
                }
                /* 1:1 queue/msix_vect mapping */
@@ -2150,7 +2138,9 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
                                       vsi->base_queue + i, 1,
                                       itr_idx);
                if (!!record)
-                       intr_handle->intr_vec[queue_idx + i] = msix_vect;
+                       if (rte_intr_vec_list_index_set(intr_handle,
+                                               queue_idx + i, msix_vect))
+                               return -rte_errno;
 
                msix_vect++;
                nb_msix--;
@@ -2164,7 +2154,7 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
@@ -2191,7 +2181,7 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
 {
        struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
@@ -2357,10 +2347,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int ret, i;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint32_t intr_vector = 0;
        struct i40e_vsi *vsi;
        uint16_t nb_rxq, nb_txq;
+       uint16_t max_frame_size;
 
        hw->adapter_stopped = 0;
 
@@ -2375,12 +2366,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
                        return ret;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int),
-                                   0);
-               if (!intr_handle->intr_vec) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                                  dev->data->nb_rx_queues)) {
                        PMD_INIT_LOG(ERR,
                                "Failed to allocate %d rx_queues intr_vec",
                                dev->data->nb_rx_queues);
@@ -2483,7 +2471,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
                if (ret != I40E_SUCCESS)
                        PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
-               /* Call get_link_info aq commond to enable/disable LSE */
+               /* Call get_link_info aq command to enable/disable LSE */
                i40e_dev_link_update(dev, 0);
        }
 
@@ -2502,6 +2490,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
                            "please call hierarchy_commit() "
                            "before starting the port");
 
+       max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
+       i40e_set_mac_max_frame(dev, max_frame_size);
+
        return I40E_SUCCESS;
 
 tx_err:
@@ -2521,7 +2512,7 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int i;
 
        if (hw->adapter_stopped == 1)
@@ -2562,10 +2553,9 @@ i40e_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+
+       /* Cleanup vector list */
+       rte_intr_vec_list_free(intr_handle);
 
        /* reset hierarchy commit */
        pf->tm_conf.committed = false;
@@ -2584,7 +2574,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_filter_control_settings settings;
        struct rte_flow *p_flow;
        uint32_t reg;
@@ -2849,6 +2839,9 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
        return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
+#define CHECK_INTERVAL             100  /* 100ms */
+#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
+
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2915,8 +2908,6 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
        bool enable_lse, int wait_to_complete)
 {
-#define CHECK_INTERVAL             100  /* 100ms */
-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
        uint32_t rep_cnt = MAX_REPEAT_TIME;
        struct i40e_link_status link_status;
        int status;
@@ -3556,7 +3547,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
                count++;
        }
 
-       /* Get individiual stats from i40e_hw_port struct */
+       /* Get individual stats from i40e_hw_port struct */
        for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
                strlcpy(xstats_names[count].name,
                        rte_i40e_hw_port_strings[i].name,
@@ -3614,7 +3605,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                count++;
        }
 
-       /* Get individiual stats from i40e_hw_port struct */
+       /* Get individual stats from i40e_hw_port struct */
        for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_i40e_hw_port_strings[i].offset);
@@ -3747,10 +3738,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
                RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
                dev_info->tx_queue_offload_capa;
        dev_info->dev_capa =
                RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
                RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
                                                sizeof(uint32_t);
@@ -5543,7 +5536,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
                                        &ets_sla_config, NULL);
        if (ret != I40E_SUCCESS) {
                PMD_DRV_LOG(ERR,
-                       "VSI failed to get TC bandwdith configuration %u",
+                       "VSI failed to get TC bandwidth configuration %u",
                        hw->aq.asq_last_status);
                return ret;
        }
@@ -6718,6 +6711,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
                        if (!ret)
                                rte_eth_dev_callback_process(dev,
                                        RTE_ETH_EVENT_INTR_LSC, NULL);
+
                        break;
                default:
                        PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -6821,7 +6815,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev)
  * @param handle
  *  Pointer to interrupt handle.
  * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *  The address of parameter (struct rte_eth_dev *) registered before.
  *
  * @return
  *  void
@@ -9718,7 +9712,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
        return 0;
 }
 
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
 struct i40e_ethertype_filter *
 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
                                const struct i40e_ethertype_filter_input *input)
@@ -11068,11 +11062,11 @@ static int
 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_INTENA_MASK |
@@ -11087,7 +11081,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                               I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 
        I40E_WRITE_FLUSH(hw);
-       rte_intr_ack(&pci_dev->intr_handle);
+       rte_intr_ack(pci_dev->intr_handle);
 
        return 0;
 }
@@ -11096,11 +11090,11 @@ static int
 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t msix_intr;
 
-       msix_intr = intr_handle->intr_vec[queue_id];
+       msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
@@ -12102,6 +12096,35 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
        return ret;
 }
 
+static void
+i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
+{
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rep_cnt = MAX_REPEAT_TIME;
+       struct rte_eth_link link;
+       enum i40e_status_code status;
+       bool can_be_set = true;
+
+       /* I40E_MEDIA_TYPE_BASET link up can be ignored */
+       if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
+               do {
+                       update_link_reg(hw, &link);
+                       if (link.link_status)
+                               break;
+                       rte_delay_ms(CHECK_INTERVAL);
+               } while (--rep_cnt);
+               can_be_set = !!link.link_status;
+       }
+
+       if (can_be_set) {
+               status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
+               if (status != I40E_SUCCESS)
+                       PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
+       } else {
+               PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
+       }
+}
+
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX