X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=c09b9b990c1b3e454a255a149a6b655042cd2a52;hb=b4668579286c56fded2816602e104abbcf0b5adb;hp=62865c5fd921352c08c83a1967ecfa69f50b0b04;hpb=f5c76298b4ea90fc7a2b9cd1b2a5d05036e7cc92;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 62865c5fd9..c09b9b990c 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -44,6 +44,7 @@ #define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" #define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" #define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec" +#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg" #define I40E_CLEAR_PXE_WAIT_MS 200 @@ -223,10 +224,10 @@ static int i40e_dev_start(struct rte_eth_dev *dev); static void i40e_dev_stop(struct rte_eth_dev *dev); static void i40e_dev_close(struct rte_eth_dev *dev); static int i40e_dev_reset(struct rte_eth_dev *dev); -static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); static int i40e_dev_set_link_up(struct rte_eth_dev *dev); static int i40e_dev_set_link_down(struct rte_eth_dev *dev); static int i40e_dev_stats_get(struct rte_eth_dev *dev, @@ -236,15 +237,11 @@ static int i40e_dev_xstats_get(struct rte_eth_dev *dev, static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned limit); -static void i40e_dev_stats_reset(struct rte_eth_dev *dev); -static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, - uint16_t queue_id, - uint8_t stat_idx, - uint8_t is_rx); +static int i40e_dev_stats_reset(struct rte_eth_dev *dev); static int i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); -static void i40e_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int i40e_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -265,7 +262,7 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); static int i40e_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); @@ -379,7 +376,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info); static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -403,6 +400,15 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); int i40e_logtype_init; int i40e_logtype_driver; +#ifdef RTE_LIBRTE_I40E_DEBUG_RX +int i40e_logtype_rx; +#endif +#ifdef RTE_LIBRTE_I40E_DEBUG_TX +int i40e_logtype_tx; +#endif +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE +int i40e_logtype_tx_free; +#endif static const char *const valid_keys[] = { ETH_I40E_FLOATING_VEB_ARG, @@ -410,6 +416,7 @@ static const char *const valid_keys[] = { ETH_I40E_SUPPORT_MULTI_DRIVER, ETH_I40E_QUEUE_NUM_PER_VF_ARG, ETH_I40E_USE_LATEST_VEC, + ETH_I40E_VF_MSG_CFG, NULL}; static const struct rte_pci_id pci_id_i40e_map[] = { @@ -433,6 +440,11 @@ static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -454,7 +466,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .xstats_get_names = i40e_dev_xstats_get_names, .stats_reset = i40e_dev_stats_reset, .xstats_reset = i40e_dev_stats_reset, - .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, .fw_version_get = i40e_fw_version_get, .dev_infos_get = i40e_dev_info_get, .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, @@ -493,6 +504,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .filter_ctrl = i40e_dev_filter_ctrl, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, + .rx_burst_mode_get = i40e_rx_burst_mode_get, + .tx_burst_mode_get = i40e_tx_burst_mode_get, .mirror_rule_set = i40e_mirror_rule_set, .mirror_rule_reset = i40e_mirror_rule_reset, .timesync_enable = i40e_timesync_enable, @@ -511,6 +524,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, + .tx_done_cleanup = i40e_tx_done_cleanup, }; /* store statistics names and its offset in stats structure */ @@ -523,13 +537,13 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, - {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, rx_unknown_protocol)}, {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, - {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)}, }; #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ @@ -687,19 +701,19 @@ static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) ethdev = rte_eth_dev_allocated(pci_dev->device.name); if (!ethdev) - return -ENODEV; - + return 0; if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) - return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit); + return rte_eth_dev_pci_generic_remove(pci_dev, + i40e_vf_representor_uninit); else - return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit); + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_i40e_dev_uninit); } static struct rte_pci_driver rte_i40e_pmd = { .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_i40e_pci_probe, .remove = eth_i40e_pci_remove, }; @@ -1095,6 +1109,7 @@ i40e_init_customized_info(struct i40e_pf *pf) } pf->gtp_support = false; + pf->esp_support = false; } void @@ -1208,11 +1223,9 @@ i40e_parse_latest_vec_handler(__rte_unused const char *key, const char *value, void *opaque) { - struct i40e_adapter *ad; + struct i40e_adapter *ad = opaque; int use_latest_vec; - ad = (struct i40e_adapter *)opaque; - use_latest_vec = atoi(value); if (use_latest_vec != 0 && use_latest_vec != 1) @@ -1261,6 +1274,73 @@ i40e_use_latest_vec(struct rte_eth_dev *dev) return 0; } +static int +read_vf_msg_config(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_vf_msg_cfg *cfg = opaque; + + if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period, + &cfg->ignore_second) != 3) { + memset(cfg, 0, sizeof(*cfg)); + PMD_DRV_LOG(ERR, "format error! example: " + "%s=60@120:180", ETH_I40E_VF_MSG_CFG); + return -EINVAL; + } + + /* + * If the message validation function been enabled, the 'period' + * and 'ignore_second' must greater than 0. + */ + if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) { + memset(cfg, 0, sizeof(*cfg)); + PMD_DRV_LOG(ERR, "%s error! the second and third" + " number must be greater than 0!", + ETH_I40E_VF_MSG_CFG); + return -EINVAL; + } + + return 0; +} + +static int +i40e_parse_vf_msg_config(struct rte_eth_dev *dev, + struct i40e_vf_msg_cfg *msg_cfg) +{ + struct rte_kvargs *kvlist; + int kvargs_count; + int ret = 0; + + memset(msg_cfg, 0, sizeof(*msg_cfg)); + + if (!dev->device->devargs) + return ret; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG); + if (!kvargs_count) + goto free_end; + + if (kvargs_count > 1) { + PMD_DRV_LOG(ERR, "More than one argument \"%s\"!", + ETH_I40E_VF_MSG_CFG); + ret = -EINVAL; + goto free_end; + } + + if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG, + read_vf_msg_config, msg_cfg) < 0) + ret = -EINVAL; + +free_end: + rte_kvargs_free(kvlist); + return ret; +} + #define I40E_ALARM_INTERVAL 50000 /* us */ static int @@ -1317,6 +1397,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) hw->adapter_stopped = 0; hw->adapter_closed = 0; + /* Init switch device pointer */ + hw->switch_dev = NULL; + /* * Switch Tag value should not be identical to either the First Tag * or Second Tag values. So set something other than common Ethertype @@ -1333,6 +1416,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) return -EIO; } + i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg); /* Check if need to support multi-driver */ i40e_support_multi_driver(dev); /* Check if users want the latest supported vec path */ @@ -1362,6 +1446,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); return -EIO; } + /* Firmware of SFP x722 does not support adminq option */ + if (hw->device_id == I40E_DEV_ID_SFP_X722) + hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, @@ -1466,8 +1554,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) goto err_get_mac_addr; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.addr, - (struct ether_addr *) hw->mac.perm_addr); + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + (struct rte_ether_addr *)hw->mac.perm_addr); /* Disable flow control */ hw->fc.requested_mode = I40E_FC_NONE; @@ -1476,7 +1564,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Set the global registers with default ether type value */ if (!pf->support_multi_driver) { ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, - ETHER_TYPE_VLAN); + RTE_ETHER_TYPE_VLAN); if (ret != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "Failed to set the default outer " @@ -1507,9 +1595,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) } if (!vsi->max_macaddrs) - len = ETHER_ADDR_LEN; + len = RTE_ETHER_ADDR_LEN; else - len = ETHER_ADDR_LEN * vsi->max_macaddrs; + len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs; /* Should be after VSI initialized */ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); @@ -1518,9 +1606,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) "Failed to allocated memory for storing mac address"); goto err_mac_alloc; } - ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* Init dcb to sw mode by default */ ret = i40e_dcb_init_configure(dev, TRUE); if (ret != I40E_SUCCESS) { @@ -1558,7 +1651,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Set the max frame size to 0x2600 by default, * in case other drivers changed the default value. */ - i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); @@ -1599,6 +1692,7 @@ err_init_tunnel_filter_list: rte_free(pf->ethertype.hash_map); err_init_ethtype_filter_list: rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; err_mac_alloc: i40e_vsi_release(pf->main_vsi); err_setup_pf_switch: @@ -1689,85 +1783,18 @@ void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) static int eth_i40e_dev_uninit(struct rte_eth_dev *dev) { - struct i40e_pf *pf; - struct rte_pci_device *pci_dev; - struct rte_intr_handle *intr_handle; struct i40e_hw *hw; - struct i40e_filter_control_settings settings; - struct rte_flow *p_flow; - int ret; - uint8_t aq_fail = 0; - int retries = 0; PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pci_dev = RTE_ETH_DEV_TO_PCI(dev); - intr_handle = &pci_dev->intr_handle; - - ret = rte_eth_switch_domain_free(pf->switch_domain_id); - if (ret) - PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); if (hw->adapter_closed == 0) i40e_dev_close(dev); - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - - /* Clear PXE mode */ - i40e_clear_pxe_mode(hw); - - /* Unconfigure filter control */ - memset(&settings, 0, sizeof(settings)); - ret = i40e_set_filter_control(hw, &settings); - if (ret) - PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", - ret); - - /* Disable flow control */ - hw->fc.requested_mode = I40E_FC_NONE; - i40e_set_fc(hw, &aq_fail, TRUE); - - /* uninitialize pf host driver */ - i40e_pf_host_uninit(dev); - - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); - - /* unregister callback func to eal lib */ - do { - ret = rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, dev); - if (ret >= 0) { - break; - } else if (ret != -EAGAIN) { - PMD_INIT_LOG(ERR, - "intr callback unregister failed: %d", - ret); - return ret; - } - i40e_msec_delay(500); - } while (retries++ < 5); - - i40e_rm_ethtype_filter_list(pf); - i40e_rm_tunnel_filter_list(pf); - i40e_rm_fdir_filter_list(pf); - - /* Remove all flows */ - while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { - TAILQ_REMOVE(&pf->flow_list, p_flow, node); - rte_free(p_flow); - } - - /* Remove all Traffic Manager configuration */ - i40e_tm_conf_uninit(dev); - return 0; } @@ -1793,6 +1820,9 @@ i40e_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Only legacy filter API needs the following fdir config. So when the * legacy filter API is deprecated, the following codes should also be * removed. @@ -2215,6 +2245,9 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *conf = &dev->data->dev_conf; + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_LINK_ENABLED; + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { conf->link_speeds = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_25G | @@ -2222,11 +2255,12 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) ETH_LINK_SPEED_10G | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M; + + abilities |= I40E_AQ_PHY_AN_ENABLED; + } else { + abilities &= ~I40E_AQ_PHY_AN_ENABLED; } speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | - I40E_AQ_PHY_AN_ENABLED | - I40E_AQ_PHY_LINK_ENABLED; return i40e_phy_conf_link(hw, abilities, speed, true); } @@ -2245,13 +2279,6 @@ i40e_dev_start(struct rte_eth_dev *dev) hw->adapter_stopped = 0; - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, - "Invalid link_speeds for port %u, autonegotiation disabled", - dev->data->port_id); - return -EINVAL; - } - rte_intr_disable(intr_handle); if ((rte_intr_cap_multiple(intr_handle) || @@ -2465,12 +2492,21 @@ i40e_dev_close(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_mirror_rule *p_mirror; + struct i40e_filter_control_settings settings; + struct rte_flow *p_flow; uint32_t reg; int i; int ret; + uint8_t aq_fail = 0; + int retries = 0; PMD_INIT_FUNC_TRACE(); + ret = rte_eth_switch_domain_free(pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + + i40e_dev_stop(dev); /* Remove all mirror rules */ @@ -2535,6 +2571,53 @@ i40e_dev_close(struct rte_eth_dev *dev) (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Clear PXE mode */ + i40e_clear_pxe_mode(hw); + + /* Unconfigure filter control */ + memset(&settings, 0, sizeof(settings)); + ret = i40e_set_filter_control(hw, &settings); + if (ret) + PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", + ret); + + /* Disable flow control */ + hw->fc.requested_mode = I40E_FC_NONE; + i40e_set_fc(hw, &aq_fail, TRUE); + + /* uninitialize pf host driver */ + i40e_pf_host_uninit(dev); + + do { + ret = rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + i40e_msec_delay(500); + } while (retries++ < 5); + + i40e_rm_ethtype_filter_list(pf); + i40e_rm_tunnel_filter_list(pf); + i40e_rm_fdir_filter_list(pf); + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + rte_free(p_flow); + } + + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + hw->adapter_closed = 1; } @@ -2564,7 +2647,7 @@ i40e_dev_reset(struct rte_eth_dev *dev) return ret; } -static void +static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2574,17 +2657,25 @@ i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, true, NULL, true); - if (status != I40E_SUCCESS) + if (status != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous"); + return -EAGAIN; + } status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); - if (status != I40E_SUCCESS) + if (status != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); + /* Rollback unicast promiscuous mode */ + i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + false, NULL, true); + return -EAGAIN; + } + return 0; } -static void +static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2594,20 +2685,29 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, false, NULL, true); - if (status != I40E_SUCCESS) + if (status != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); + return -EAGAIN; + } /* must remain in all_multicast mode */ if (dev->data->all_multicast == 1) - return; + return 0; status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, false, NULL); - if (status != I40E_SUCCESS) + if (status != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); + /* Rollback unicast promiscuous mode */ + i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + true, NULL, true); + return -EAGAIN; + } + + return 0; } -static void +static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2616,11 +2716,15 @@ i40e_dev_allmulticast_enable(struct rte_eth_dev *dev) int ret; ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); - if (ret != I40E_SUCCESS) + if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); + return -EAGAIN; + } + + return 0; } -static void +static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2629,12 +2733,16 @@ i40e_dev_allmulticast_disable(struct rte_eth_dev *dev) int ret; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, FALSE, NULL); - if (ret != I40E_SUCCESS) + if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); + return -EAGAIN; + } + + return 0; } /* @@ -2670,11 +2778,11 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) #define I40E_PRTMAC_MACC 0x001E24E0 #define I40E_REG_MACC_25GB 0x00020000 #define I40E_REG_SPEED_MASK 0x38000000 -#define I40E_REG_SPEED_100MB 0x00000000 -#define I40E_REG_SPEED_1GB 0x08000000 -#define I40E_REG_SPEED_10GB 0x10000000 -#define I40E_REG_SPEED_20GB 0x20000000 -#define I40E_REG_SPEED_25_40GB 0x18000000 +#define I40E_REG_SPEED_0 0x00000000 +#define I40E_REG_SPEED_1 0x08000000 +#define I40E_REG_SPEED_2 0x10000000 +#define I40E_REG_SPEED_3 0x18000000 +#define I40E_REG_SPEED_4 0x20000000 uint32_t link_speed; uint32_t reg_val; @@ -2688,26 +2796,35 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) /* Parse the link status */ switch (link_speed) { - case I40E_REG_SPEED_100MB: + case I40E_REG_SPEED_0: link->link_speed = ETH_SPEED_NUM_100M; break; - case I40E_REG_SPEED_1GB: + case I40E_REG_SPEED_1: link->link_speed = ETH_SPEED_NUM_1G; break; - case I40E_REG_SPEED_10GB: - link->link_speed = ETH_SPEED_NUM_10G; - break; - case I40E_REG_SPEED_20GB: - link->link_speed = ETH_SPEED_NUM_20G; + case I40E_REG_SPEED_2: + if (hw->mac.type == I40E_MAC_X722) + link->link_speed = ETH_SPEED_NUM_2_5G; + else + link->link_speed = ETH_SPEED_NUM_10G; break; - case I40E_REG_SPEED_25_40GB: - reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + case I40E_REG_SPEED_3: + if (hw->mac.type == I40E_MAC_X722) { + link->link_speed = ETH_SPEED_NUM_5G; + } else { + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); - if (reg_val & I40E_REG_MACC_25GB) - link->link_speed = ETH_SPEED_NUM_25G; + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + } + break; + case I40E_REG_SPEED_4: + if (hw->mac.type == I40E_MAC_X722) + link->link_speed = ETH_SPEED_NUM_10G; else - link->link_speed = ETH_SPEED_NUM_40G; - + link->link_speed = ETH_SPEED_NUM_20G; break; default: PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); @@ -2734,7 +2851,7 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); if (unlikely(status != I40E_SUCCESS)) { - link->link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); return; @@ -2768,7 +2885,7 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, link->link_speed = ETH_SPEED_NUM_40G; break; default: - link->link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_NONE; break; } } @@ -2794,6 +2911,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev, else update_link_aq(hw, &link, enable_lse, wait_to_complete); + if (hw->switch_dev) + rte_eth_linkstatus_get(hw->switch_dev, &link); + ret = rte_eth_linkstatus_set(dev, &link); i40e_notify_all_vfs_link_status(dev); @@ -2823,7 +2943,7 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) &nes->rx_broadcast); /* exclude CRC bytes */ nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + - nes->rx_broadcast) * ETHER_CRC_LEN; + nes->rx_broadcast) * RTE_ETHER_CRC_LEN; i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, &oes->rx_discards, &nes->rx_discards); @@ -2923,7 +3043,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) /* exclude CRC size */ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + pf->internal_stats.rx_multicast + - pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN; + pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN; /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), @@ -2943,10 +3063,11 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.rx_broadcast, &ns->eth.rx_broadcast); /* Workaround: CRC size should not be included in byte statistics, - * so subtract ETHER_CRC_LEN from the byte counter for each rx packet. + * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx + * packet. */ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + - ns->eth.rx_broadcast) * ETHER_CRC_LEN; + ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; /* exclude internal rx bytes * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before @@ -3000,7 +3121,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.tx_broadcast, &ns->eth.tx_broadcast); ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + - ns->eth.tx_broadcast) * ETHER_CRC_LEN; + ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; /* exclude internal tx bytes * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before @@ -3286,7 +3407,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) } /* Reset the statistics */ -static void +static int i40e_dev_stats_reset(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -3299,6 +3420,8 @@ i40e_dev_stats_reset(struct rte_eth_dev *dev) /* read the stats, reading current register values into offset */ i40e_read_stats_registers(pf, hw); + + return 0; } static uint32_t @@ -3420,17 +3543,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return count; } -static int -i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev, - __rte_unused uint16_t queue_id, - __rte_unused uint8_t stat_idx, - __rte_unused uint8_t is_rx) -{ - PMD_INIT_FUNC_TRACE(); - - return -ENOSYS; -} - static int i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { @@ -3484,7 +3596,7 @@ i40e_need_stop_lldp(struct rte_eth_dev *dev) return false; } -static void +static int i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -3499,7 +3611,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = vsi->max_macaddrs; dev_info->max_vfs = pci_dev->max_vfs; dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD; - dev_info->min_mtu = ETHER_MIN_MTU; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | @@ -3512,7 +3624,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_VLAN_EXTEND | DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_JUMBO_FRAME; + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->tx_offload_capa = @@ -3621,6 +3734,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) } dev_info->default_rxportconf.burst_size = 32; dev_info->default_txportconf.burst_size = 32; + + return 0; } static int @@ -3744,6 +3859,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct i40e_vsi *vsi = pf->main_vsi; struct rte_eth_rxmode *rxmode; + if (mask & ETH_QINQ_STRIP_MASK) { + PMD_DRV_LOG(ERR, "Strip qinq is not supported."); + return -ENOTSUP; + } + rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) @@ -3765,9 +3885,9 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) i40e_vsi_config_double_vlan(vsi, TRUE); /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, - ETHER_TYPE_VLAN); + RTE_ETHER_TYPE_VLAN); i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, - ETHER_TYPE_VLAN); + RTE_ETHER_TYPE_VLAN); } else i40e_vsi_config_double_vlan(vsi, FALSE); @@ -4000,7 +4120,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, /* Add a MAC address, and update filters */ static int i40e_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, __rte_unused uint32_t index, uint32_t pool) { @@ -4025,7 +4145,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, return -EINVAL; } - rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else @@ -4051,7 +4171,7 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi; struct rte_eth_dev_data *data = dev->data; - struct ether_addr *macaddr; + struct rte_ether_addr *macaddr; int ret; uint32_t i; uint64_t pool_sel; @@ -4092,8 +4212,8 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, { struct i40e_hw *hw; struct i40e_mac_filter_info mac_filter; - struct ether_addr old_mac; - struct ether_addr *new_mac; + struct rte_ether_addr old_mac; + struct rte_ether_addr *new_mac; struct i40e_pf_vf *vf = NULL; uint16_t vf_id; int ret; @@ -4111,7 +4231,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, new_mac = &filter->mac_addr; - if (is_zero_ether_addr(new_mac)) { + if (rte_is_zero_ether_addr(new_mac)) { PMD_DRV_LOG(ERR, "Invalid ethernet address."); return -EINVAL; } @@ -4124,17 +4244,17 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } vf = &pf->vfs[vf_id]; - if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) { + if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) { PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address."); return -EINVAL; } if (add) { - rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN); rte_memcpy(hw->mac.addr, new_mac->addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); mac_filter.filter_type = filter->filter_type; ret = i40e_vsi_add_mac(vf->vsi, &mac_filter); @@ -4142,10 +4262,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Failed to add MAC filter."); return -1; } - ether_addr_copy(new_mac, &pf->dev_addr); + rte_ether_addr_copy(new_mac, &pf->dev_addr); } else { rte_memcpy(hw->mac.addr, hw->mac.perm_addr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to delete MAC filter."); @@ -4153,8 +4273,8 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } /* Clear device address as it has been removed */ - if (is_same_ether_addr(&(pf->dev_addr), new_mac)) - memset(&pf->dev_addr, 0, sizeof(struct ether_addr)); + if (rte_is_same_ether_addr(&pf->dev_addr, new_mac)) + memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr)); } return 0; @@ -5331,7 +5451,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL); if (ret != I40E_SUCCESS) { struct i40e_mac_filter *f; - struct ether_addr *mac; + struct rte_ether_addr *mac; PMD_DRV_LOG(DEBUG, "Cannot remove the default macvlan filter"); @@ -5351,7 +5471,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return ret; } rte_memcpy(&filter.mac_addr, - (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); + (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); } @@ -5469,7 +5589,7 @@ i40e_vsi_setup(struct i40e_pf *pf, struct i40e_mac_filter_info filter; int ret; struct i40e_vsi_context ctxt; - struct ether_addr broadcast = + struct rte_ether_addr broadcast = {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && @@ -5813,7 +5933,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* MAC/VLAN configuration */ - rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); @@ -6641,6 +6761,92 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) rte_free(info.msg_buf); } +static void +i40e_handle_mdd_event(struct rte_eth_dev *dev) +{ +#define I40E_MDD_CLEAR32 0xFFFFFFFF +#define I40E_MDD_CLEAR16 0xFFFF + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool mdd_detected = false; + struct i40e_pf_vf *vf; + uint32_t reg; + int i; + + /* find what triggered the MDD event */ + reg = I40E_READ_REG(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " + "queue %d PF number 0x%02x VF number 0x%02x device %s\n", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + reg = I40E_READ_REG(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " + "queue %d of function 0x%02x device %s\n", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + + if (mdd_detected) { + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, + I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->vf_num && mdd_detected; i++) { + vf = &pf->vfs[i]; + reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + + reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + } +} + /** * Interrupt handler triggered by NIC for handling * specific interrupt. @@ -6673,8 +6879,10 @@ i40e_dev_interrupt_handler(void *param) } if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -6718,8 +6926,10 @@ i40e_dev_alarm_handler(void *param) goto done; if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); - if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) PMD_DRV_LOG(INFO, "ICR0: global reset requested"); if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) @@ -6896,12 +7106,12 @@ DONE: /* Find out specific MAC filter */ static struct i40e_mac_filter * i40e_find_mac_filter(struct i40e_vsi *vsi, - struct ether_addr *macaddr) + struct rte_ether_addr *macaddr) { struct i40e_mac_filter *f; TAILQ_FOREACH(f, &vsi->mac_list, next) { - if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) + if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) return f; } @@ -6980,7 +7190,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, - int num, struct ether_addr *addr) + int num, struct rte_ether_addr *addr) { int i; uint32_t j, k; @@ -7099,7 +7309,7 @@ i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan) int mac_num; int ret = I40E_SUCCESS; - if (!vsi || vlan > ETHER_MAX_VLAN_ID) + if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID) return I40E_ERR_PARAM; /* If it's already set, just return */ @@ -7150,7 +7360,7 @@ i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan) * Vlan 0 is the generic filter for untagged packets * and can't be removed. */ - if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID) + if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID) return I40E_ERR_PARAM; /* If can't find it, just return */ @@ -7274,7 +7484,7 @@ DONE: } int -i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) +i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) { struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; @@ -7574,10 +7784,12 @@ i40e_tunnel_filter_convert( struct i40e_aqc_cloud_filters_element_bb *cld_filter, struct i40e_tunnel_filter *tunnel_filter) { - ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac, - (struct ether_addr *)&tunnel_filter->input.outer_mac); - ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac, - (struct ether_addr *)&tunnel_filter->input.inner_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &cld_filter->element.outer_mac, + (struct rte_ether_addr *)&tunnel_filter->input.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &cld_filter->element.inner_mac, + (struct rte_ether_addr *)&tunnel_filter->input.inner_mac); tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan; if ((rte_le_to_cpu_16(cld_filter->element.flags) & I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == @@ -7685,10 +7897,10 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, - (struct ether_addr *)&pfilter->element.outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, - (struct ether_addr *)&pfilter->element.inner_mac); + rte_ether_addr_copy(&tunnel_filter->outer_mac, + (struct rte_ether_addr *)&pfilter->element.outer_mac); + rte_ether_addr_copy(&tunnel_filter->inner_mac, + (struct rte_ether_addr *)&pfilter->element.inner_mac); pfilter->element.inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); @@ -8132,10 +8344,10 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, } pfilter = cld_filter; - ether_addr_copy(&tunnel_filter->outer_mac, - (struct ether_addr *)&pfilter->element.outer_mac); - ether_addr_copy(&tunnel_filter->inner_mac, - (struct ether_addr *)&pfilter->element.inner_mac); + rte_ether_addr_copy(&tunnel_filter->outer_mac, + (struct rte_ether_addr *)&pfilter->element.outer_mac); + rte_ether_addr_copy(&tunnel_filter->inner_mac, + (struct rte_ether_addr *)&pfilter->element.inner_mac); pfilter->element.inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); @@ -8375,7 +8587,7 @@ static int i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type) { int idx, ret; - uint8_t filter_idx; + uint8_t filter_idx = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); idx = i40e_get_vxlan_port_idx(pf, port); @@ -8558,7 +8770,9 @@ i40e_pf_config_rss(struct i40e_pf *pf) num); if (num == 0) { - PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS"); + PMD_INIT_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); return -ENOTSUP; } @@ -8609,19 +8823,19 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, return -EINVAL; } - if (filter->inner_vlan > ETHER_MAX_VLAN_ID) { + if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) { PMD_DRV_LOG(ERR, "Invalid inner VLAN ID"); return -EINVAL; } if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && - (is_zero_ether_addr(&filter->outer_mac))) { + (rte_is_zero_ether_addr(&filter->outer_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); return -EINVAL; } if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && - (is_zero_ether_addr(&filter->inner_mac))) { + (rte_is_zero_ether_addr(&filter->inner_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); return -EINVAL; } @@ -9889,7 +10103,8 @@ static int i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, struct i40e_ethertype_filter *filter) { - rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN); + rte_memcpy(&filter->input.mac_addr, &input->mac_addr, + RTE_ETHER_ADDR_LEN); filter->input.ether_type = input->ether_type; filter->flags = input->flags; filter->queue = input->queue; @@ -9981,14 +10196,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Invalid queue ID"); return -EINVAL; } - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in control packet filter.", filter->ether_type); return -EINVAL; } - if (filter->ether_type == ETHER_TYPE_VLAN) + if (filter->ether_type == RTE_ETHER_TYPE_VLAN) PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is not supported."); @@ -10289,6 +10504,7 @@ i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, @@ -10827,8 +11043,7 @@ static void i40e_start_timecounters(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t tsync_inc_l; uint32_t tsync_inc_h; @@ -10880,8 +11095,7 @@ i40e_start_timecounters(struct rte_eth_dev *dev) static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -10894,8 +11108,7 @@ static int i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); @@ -10911,8 +11124,7 @@ static int i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; systime_cycles = i40e_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -10988,9 +11200,7 @@ i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp, uint32_t flags) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; - + struct i40e_adapter *adapter = dev->data->dev_private; uint32_t sync_status; uint32_t index = flags & 0x03; uint64_t rx_tstamp_cycles; @@ -11012,9 +11222,7 @@ i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; - + struct i40e_adapter *adapter = dev->data->dev_private; uint32_t sync_status; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -11463,12 +11671,12 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) */ if (sw_dcb == TRUE) { if (i40e_need_stop_lldp(dev)) { - ret = i40e_aq_stop_lldp(hw, TRUE, NULL); + ret = i40e_aq_stop_lldp(hw, TRUE, TRUE, NULL); if (ret != I40E_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); } - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); /* If lldp agent is stopped, the return value from * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM * adminq status. Otherwise, it should return success. @@ -11512,11 +11720,11 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) return -ENOTSUP; } } else { - ret = i40e_aq_start_lldp(hw, NULL); + ret = i40e_aq_start_lldp(hw, true, NULL); if (ret != I40E_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to start lldp"); - ret = i40e_init_dcb(hw); + ret = i40e_init_dcb(hw, true); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { PMD_INIT_LOG(ERR, @@ -11655,7 +11863,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_ack(&pci_dev->intr_handle); return 0; } @@ -11939,7 +12147,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev, } static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr) + struct rte_ether_addr *mac_addr) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11948,13 +12156,14 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct i40e_mac_filter *f; int ret; - if (!is_valid_assigned_ether_addr(mac_addr)) { + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); return -EINVAL; } TAILQ_FOREACH(f, &vsi->mac_list, next) { - if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) + if (rte_is_same_ether_addr(&pf->dev_addr, + &f->mac_info.mac_addr)) break; } @@ -11996,7 +12205,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) int ret = 0; /* check if mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX) return -EINVAL; /* mtu setting is forbidden if port is start */ @@ -12006,7 +12215,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } - if (frame_size > ETHER_MAX_LEN) + if (frame_size > RTE_ETHER_MAX_LEN) dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -12072,10 +12281,12 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) vsi = vf->vsi; } memset(&cld_filter, 0, sizeof(cld_filter)); - ether_addr_copy((struct ether_addr *)&f->input.outer_mac, - (struct ether_addr *)&cld_filter.element.outer_mac); - ether_addr_copy((struct ether_addr *)&f->input.inner_mac, - (struct ether_addr *)&cld_filter.element.inner_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &f->input.outer_mac, + (struct rte_ether_addr *)&cld_filter.element.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &f->input.inner_mac, + (struct rte_ether_addr *)&cld_filter.element.inner_mac); cld_filter.element.inner_vlan = f->input.inner_vlan; cld_filter.element.flags = f->input.flags; cld_filter.element.tenant_id = f->input.tenant_id; @@ -12123,7 +12334,7 @@ i40e_filter_restore(struct i40e_pf *pf) i40e_rss_filter_restore(pf); } -static bool +bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { if (strcmp(dev->device->driver->name, drv->driver.name)) @@ -12218,6 +12429,7 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, } } name[strlen(name) - 1] = '\0'; + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strcmp(name, "GTPC")) new_pctype = i40e_find_customized_pctype(pf, @@ -12234,6 +12446,38 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, new_pctype = i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); + else if (!strcmp(name, "IPV4_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (!strcmp(name, "IPV6_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + else if (!strcmp(name, "IPV4_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (!strcmp(name, "IPV6_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + else if (!strcmp(name, "IPV4_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (!strcmp(name, "IPV6_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + else if (!strcmp(name, "IPV4_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV4); + else if (!strcmp(name, "IPV6_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV6); if (new_pctype) { if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { new_pctype->pctype = pctype_value; @@ -12329,6 +12573,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, continue; memset(name, 0, sizeof(name)); strcpy(name, proto[n].name); + PMD_DRV_LOG(INFO, "name = %s\n", name); if (!strncasecmp(name, "PPPOE", 5)) ptype_mapping[i].sw_ptype |= RTE_PTYPE_L2_ETHER_PPPOE; @@ -12422,12 +12667,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GTPU; in_tunnel = true; + } else if (!strncasecmp(name, "ESP", 3)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_ESP; + in_tunnel = true; } else if (!strncasecmp(name, "GRENAT", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_GRENAT; in_tunnel = true; } else if (!strncasecmp(name, "L2TPV2CTL", 9) || - !strncasecmp(name, "L2TPV2", 6)) { + !strncasecmp(name, "L2TPV2", 6) || + !strncasecmp(name, "L2TPV3", 6)) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_TUNNEL_L2TP; in_tunnel = true; @@ -12441,7 +12691,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, ptype_num, 0); if (ret) - PMD_DRV_LOG(ERR, "Failed to update mapping table."); + PMD_DRV_LOG(ERR, "Failed to update ptype mapping table."); rte_free(ptype_mapping); rte_free(ptype); @@ -12506,6 +12756,17 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, } } + /* Check if ESP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "ESP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->esp_support = true; + else + pf->esp_support = false; + break; + } + } + /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, proto_num, proto, op); @@ -12723,7 +12984,9 @@ i40e_config_rss_filter(struct i40e_pf *pf, num); if (num == 0) { - PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS"); + PMD_DRV_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); return -ENOTSUP; } @@ -12772,6 +13035,24 @@ RTE_INIT(i40e_init_log) i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver"); if (i40e_logtype_driver >= 0) rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_I40E_DEBUG_RX + i40e_logtype_rx = rte_log_register("pmd.net.i40e.rx"); + if (i40e_logtype_rx >= 0) + rte_log_set_level(i40e_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX + i40e_logtype_tx = rte_log_register("pmd.net.i40e.tx"); + if (i40e_logtype_tx >= 0) + rte_log_set_level(i40e_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE + i40e_logtype_tx_free = rte_log_register("pmd.net.i40e.tx_free"); + if (i40e_logtype_tx_free >= 0) + rte_log_set_level(i40e_logtype_tx_free, RTE_LOG_DEBUG); +#endif } RTE_PMD_REGISTER_PARAM_STRING(net_i40e,