struct rte_ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
*/
i40e_add_tx_flow_control_drop_filter(pf);
- /* Set the max frame size to 0x2600 by default,
- * in case other drivers changed the default value.
- */
- i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
-
/* initialize RSS rule list */
TAILQ_INIT(&pf->rss_config_list);
ethertype_rule = &pf->ethertype;
/* Remove all ethertype filter rules and hash */
- if (ethertype_rule->hash_map)
- rte_free(ethertype_rule->hash_map);
- if (ethertype_rule->hash_table)
- rte_hash_free(ethertype_rule->hash_table);
+ rte_free(ethertype_rule->hash_map);
+ rte_hash_free(ethertype_rule->hash_table);
while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
TAILQ_REMOVE(ðertype_rule->ethertype_list,
tunnel_rule = &pf->tunnel;
/* Remove all tunnel director rules and hash */
- if (tunnel_rule->hash_map)
- rte_free(tunnel_rule->hash_map);
- if (tunnel_rule->hash_table)
- rte_hash_free(tunnel_rule->hash_table);
+ rte_free(tunnel_rule->hash_map);
+ rte_hash_free(tunnel_rule->hash_table);
while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
fdir_info = &pf->fdir;
/* flow director memory cleanup */
- if (fdir_info->hash_map)
- rte_free(fdir_info->hash_map);
- if (fdir_info->hash_table)
- rte_hash_free(fdir_info->hash_table);
- if (fdir_info->fdir_flow_pool.bitmap)
- rte_free(fdir_info->fdir_flow_pool.bitmap);
- if (fdir_info->fdir_flow_pool.pool)
- rte_free(fdir_info->fdir_flow_pool.pool);
- if (fdir_info->fdir_filter_array)
- rte_free(fdir_info->fdir_filter_array);
+ rte_free(fdir_info->hash_map);
+ rte_hash_free(fdir_info->hash_table);
+ rte_free(fdir_info->fdir_flow_pool.bitmap);
+ rte_free(fdir_info->fdir_flow_pool.pool);
+ rte_free(fdir_info->fdir_filter_array);
}
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
goto err;
/* VMDQ setup.
- * General PMD driver call sequence are NIC init, configure,
+ * General PMD call sequence are NIC init, configure,
* rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
* will try to lookup the VSI that specific queue belongs to if VMDQ
* applicable. So, VMDQ setting has to be done before
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
uint16_t nb_rxq, nb_txq;
+ uint16_t max_frame_size;
hw->adapter_stopped = 0;
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
"please call hierarchy_commit() "
"before starting the port");
+ max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
+ i40e_set_mac_max_frame(dev, max_frame_size);
+
return I40E_SUCCESS;
tx_err:
return i40e_phy_conf_link(hw, abilities, speed, false);
}
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+
static __rte_always_inline void
update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
{
update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
bool enable_lse, int wait_to_complete)
{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
int status;
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
rte_i40e_hw_port_strings[i].name,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
- "VSI failed to get TC bandwdith configuration %u",
+ "VSI failed to get TC bandwidth configuration %u",
hw->aq.asq_last_status);
return ret;
}
if (!ret)
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
+
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
return 0;
}
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
return ret;
}
+static void
+i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
+ struct rte_eth_link link;
+ enum i40e_status_code status;
+
+ do {
+ update_link_reg(hw, &link);
+ if (link.link_status)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (--rep_cnt);
+
+ if (link.link_status) {
+ status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
+ } else {
+ PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
+ }
+}
+
RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
#ifdef RTE_ETHDEV_DEBUG_RX