-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdio.h>
#include <inttypes.h>
#include <assert.h>
+#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_alarm.h>
#include <rte_dev.h>
-#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
#include <rte_hash_crc.h>
+#include <rte_bitmap.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
#include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
+#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
+#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg"
#define I40E_CLEAR_PXE_WAIT_MS 200
+#define I40E_VSI_TSR_QINQ_STRIP 0x4010
+#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
/* Maximun number of capability elements */
#define I40E_MAX_CAP_ELE_NUM 128
#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
-#define I40E_MAX_PERCENT 100
-#define I40E_DEFAULT_DCB_APP_NUM 1
-#define I40E_DEFAULT_DCB_APP_PRIO 3
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
/* Bit mask of Extended Tag enable/disable */
#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
-static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
static void i40e_dev_stop(struct rte_eth_dev *dev);
static void i40e_dev_close(struct rte_eth_dev *dev);
static int i40e_dev_reset(struct rte_eth_dev *dev);
-static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
-static void i40e_dev_stats_get(struct rte_eth_dev *dev,
+static int i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned limit);
-static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
-static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
- uint16_t queue_id,
- uint8_t stat_idx,
- uint8_t is_rx);
+static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
static int i40e_fw_version_get(struct rte_eth_dev *dev,
char *fw_version, size_t fw_size);
-static void i40e_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int i40e_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id,
int on);
static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid);
-static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue,
int on);
static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
static int i40e_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
uint32_t index,
uint32_t pool);
static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
static int i40e_pf_setup(struct i40e_pf *pf);
static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
-static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
static int i40e_dcb_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
static void i40e_dev_interrupt_handler(void *param);
+static void i40e_dev_alarm_handler(void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
static int i40e_veb_release(struct i40e_veb *veb);
static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
struct i40e_vsi *vsi);
-static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
static int i40e_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr);
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
struct i40e_ethertype_filter *filter);
static int i40e_tunnel_filter_convert(
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter,
struct i40e_tunnel_filter *tunnel_filter);
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
+static int i40e_pf_config_rss(struct i40e_pf *pf);
-int i40e_logtype_init;
-int i40e_logtype_driver;
+static const char *const valid_keys[] = {
+ ETH_I40E_FLOATING_VEB_ARG,
+ ETH_I40E_FLOATING_VEB_LIST_ARG,
+ ETH_I40E_SUPPORT_MULTI_DRIVER,
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ ETH_I40E_USE_LATEST_VEC,
+ ETH_I40E_VF_MSG_CFG,
+ NULL};
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
{ .vendor_id = 0, /* sentinel */ },
};
.xstats_get_names = i40e_dev_xstats_get_names,
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
- .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
.fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.filter_ctrl = i40e_dev_filter_ctrl,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
+ .rx_burst_mode_get = i40e_rx_burst_mode_get,
+ .tx_burst_mode_get = i40e_tx_burst_mode_get,
.mirror_rule_set = i40e_mirror_rule_set,
.mirror_rule_reset = i40e_mirror_rule_reset,
.timesync_enable = i40e_timesync_enable,
.get_reg = i40e_get_regs,
.get_eeprom_length = i40e_get_eeprom_length,
.get_eeprom = i40e_get_eeprom,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
+ .tx_done_cleanup = i40e_tx_done_cleanup,
};
/* store statistics names and its offset in stats structure */
{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
- {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
+ {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
rx_unknown_protocol)},
{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
- {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
};
#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+static int
+eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct i40e_adapter), eth_i40e_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ ð_da);
+ if (retval)
+ return retval;
+ }
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct i40e_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_i40e_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
+ pci_dev->device.name);
+
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct i40e_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
+ pf_ethdev->data->dev_private)->switch_domain_id,
+ .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
+ pf_ethdev->data->dev_private)
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct i40e_vf_representor), NULL, NULL,
+ i40e_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create i40e vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return 0;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ i40e_vf_representor_uninit);
+ else
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ eth_i40e_dev_uninit);
}
static struct rte_pci_driver rte_i40e_pmd = {
.remove = eth_i40e_pci_remove,
};
-static inline int
-rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
+ uint32_t reg_val)
{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
+ uint32_t ori_reg_val;
+ struct rte_eth_dev *dev;
- return 0;
+ ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ i40e_write_rx_ctl(hw, reg_addr, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
}
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
/*
- * Initialize registers for flexible payload, which should be set by NVM.
- * This should be removed from code once it is fixed in NVM.
+ * Initialize registers for parsing packet type of QinQ
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
-
- /* Initialize registers for parsing packet type of QinQ */
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+ /* If support multi-driver, PF will use INT0. */
+ if (!pf->support_multi_driver)
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
}
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
if (devargs == NULL)
return;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return;
if (devargs == NULL)
return 0;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return 0;
i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_info *fdir_info = &pf->fdir;
char fdir_hash_name[RTE_HASH_NAMESIZE];
+ uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
+ uint32_t best = hw->func_caps.fd_filters_best_effort;
+ struct rte_bitmap *bmp = NULL;
+ uint32_t bmp_size;
+ void *mem = NULL;
+ uint32_t i = 0;
int ret;
struct rte_hash_parameters fdir_hash_params = {
.name = fdir_hash_name,
.entries = I40E_MAX_FDIR_FILTER_NUM,
- .key_len = sizeof(struct rte_eth_fdir_input),
+ .key_len = sizeof(struct i40e_fdir_input),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id(),
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
return -EINVAL;
}
+
fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
sizeof(struct i40e_fdir_filter *) *
I40E_MAX_FDIR_FILTER_NUM,
ret = -ENOMEM;
goto err_fdir_hash_map_alloc;
}
+
+ fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
+ sizeof(struct i40e_fdir_filter) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+
+ if (!fdir_info->fdir_filter_array) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir filter array!");
+ ret = -ENOMEM;
+ goto err_fdir_filter_array_alloc;
+ }
+
+ fdir_info->fdir_space_size = alloc + best;
+ fdir_info->fdir_actual_cnt = 0;
+ fdir_info->fdir_guarantee_total_space = alloc;
+ fdir_info->fdir_guarantee_free_space =
+ fdir_info->fdir_guarantee_total_space;
+
+ PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
+
+ fdir_info->fdir_flow_pool.pool =
+ rte_zmalloc("i40e_fdir_entry",
+ sizeof(struct i40e_fdir_entry) *
+ fdir_info->fdir_space_size,
+ 0);
+
+ if (!fdir_info->fdir_flow_pool.pool) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for bitmap flow!");
+ ret = -ENOMEM;
+ goto err_fdir_bitmap_flow_alloc;
+ }
+
+ for (i = 0; i < fdir_info->fdir_space_size; i++)
+ fdir_info->fdir_flow_pool.pool[i].idx = i;
+
+ bmp_size =
+ rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
+ mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir bitmap!");
+ ret = -ENOMEM;
+ goto err_fdir_mem_alloc;
+ }
+ bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
+ if (bmp == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to initialization fdir bitmap!");
+ ret = -ENOMEM;
+ goto err_fdir_bmp_alloc;
+ }
+ for (i = 0; i < fdir_info->fdir_space_size; i++)
+ rte_bitmap_set(bmp, i);
+
+ fdir_info->fdir_flow_pool.bitmap = bmp;
+
return 0;
+err_fdir_bmp_alloc:
+ rte_free(mem);
+err_fdir_mem_alloc:
+ rte_free(fdir_info->fdir_flow_pool.pool);
+err_fdir_bitmap_flow_alloc:
+ rte_free(fdir_info->fdir_filter_array);
+err_fdir_filter_array_alloc:
+ rte_free(fdir_info->hash_map);
err_fdir_hash_map_alloc:
rte_hash_free(fdir_info->hash_table);
return ret;
}
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+ int i;
+
+ /* Initialize customized pctype */
+ for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+ pf->customized_pctype[i].index = i;
+ pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+ pf->customized_pctype[i].valid = false;
+ }
+
+ pf->gtp_support = false;
+ pf->esp_support = false;
+}
+
+static void
+i40e_init_filter_invalidation(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ uint32_t glqf_ctl_reg = 0;
+
+ glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!pf->support_multi_driver) {
+ fdir_info->fdir_invalprio = 1;
+ glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
+ i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
+ } else {
+ if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
+ fdir_info->fdir_invalprio = 1;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
+ } else {
+ fdir_info->fdir_invalprio = 0;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
+ }
+ }
+}
+
+void
+i40e_init_queue_region_conf(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i;
+
+ for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
+
+ memset(info, 0, sizeof(struct i40e_queue_regions));
+}
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long support_multi_driver;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+
+ errno = 0;
+ support_multi_driver = strtoul(value, &end, 10);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong global configuration");
+ return -(EINVAL);
+ }
+
+ if (support_multi_driver == 1 || support_multi_driver == 0)
+ pf->support_multi_driver = (bool)support_multi_driver;
+ else
+ PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+ "enable global configuration by default."
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+ return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ /* Enable global configuration by default */
+ pf->support_multi_driver = false;
+
+ if (!dev->device->devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+ i40e_parse_multi_drv_handler, pf) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+static int
+i40e_aq_debug_write_global_register(struct i40e_hw *hw,
+ uint32_t reg_addr, uint64_t reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ uint64_t ori_reg_val;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from 0x%08x",
+ reg_addr);
+ return -EIO;
+ }
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%"PRIx64", after: 0x%"PRIx64,
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
+
+ return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
+}
+
+static int
+i40e_parse_latest_vec_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_adapter *ad = opaque;
+ int use_latest_vec;
+
+ use_latest_vec = atoi(value);
+
+ if (use_latest_vec != 0 && use_latest_vec != 1)
+ PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
+
+ ad->use_latest_vec = (uint8_t)use_latest_vec;
+
+ return 0;
+}
+
+static int
+i40e_use_latest_vec(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ ad->use_latest_vec = false;
+
+ if (!dev->device->devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_USE_LATEST_VEC);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
+ i40e_parse_latest_vec_handler, ad) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+static int
+read_vf_msg_config(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_vf_msg_cfg *cfg = opaque;
+
+ if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
+ &cfg->ignore_second) != 3) {
+ memset(cfg, 0, sizeof(*cfg));
+ PMD_DRV_LOG(ERR, "format error! example: "
+ "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
+ return -EINVAL;
+ }
+
+ /*
+ * If the message validation function been enabled, the 'period'
+ * and 'ignore_second' must greater than 0.
+ */
+ if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
+ memset(cfg, 0, sizeof(*cfg));
+ PMD_DRV_LOG(ERR, "%s error! the second and third"
+ " number must be greater than 0!",
+ ETH_I40E_VF_MSG_CFG);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
+ struct i40e_vf_msg_cfg *msg_cfg)
+{
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+ int ret = 0;
+
+ memset(msg_cfg, 0, sizeof(*msg_cfg));
+
+ if (!dev->device->devargs)
+ return ret;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
+ if (!kvargs_count)
+ goto free_end;
+
+ if (kvargs_count > 1) {
+ PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
+ ETH_I40E_VF_MSG_CFG);
+ ret = -EINVAL;
+ goto free_end;
+ }
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
+ read_vf_msg_config, msg_cfg) < 0)
+ ret = -EINVAL;
+
+free_end:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+#define I40E_ALARM_INTERVAL 50000 /* us */
+
static int
-eth_i40e_dev_init(struct rte_eth_dev *dev)
+eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
int ret;
- uint32_t len;
+ uint32_t len, val;
uint8_t aq_fail = 0;
PMD_INIT_FUNC_TRACE();
return 0;
}
i40e_set_default_ptype_table(dev);
- i40e_set_default_pctype_table(dev);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
hw->bus.device = pci_dev->addr.devid;
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ hw->adapter_closed = 0;
+
+ /* Init switch device pointer */
+ hw->switch_dev = NULL;
+
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
+ val = I40E_READ_REG(hw, I40E_GL_FWSTS);
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ PMD_INIT_LOG(ERR, "\nERROR: "
+ "Firmware recovery mode detected. Limiting functionality.\n"
+ "Refer to the Intel(R) Ethernet Adapters and Devices "
+ "User Guide for details on firmware recovery mode.");
+ return -EIO;
+ }
+
+ i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
+ /* Check if need to support multi-driver */
+ i40e_support_multi_driver(dev);
+ /* Check if users want the latest supported vec path */
+ i40e_use_latest_vec(dev);
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
- /* Initialize the hardware */
- i40e_hw_init(dev);
-
/* Reset here to make sure all is clean for each PF */
ret = i40e_pf_reset(hw);
if (ret) {
return ret;
}
- /*
- * To work around the NVM issue, initialize registers
- * for flexible payload and packet type of QinQ by
- * software. It should be removed once issues are fixed
- * in NVM.
- */
- i40e_GLQF_reg_init(hw);
-
- /* Initialize the input set for filters (hash and fd) to default value */
- i40e_filter_input_set_init(pf);
-
/* Initialize the parameters for adminq */
i40e_init_adminq_parameter(hw);
ret = i40e_init_adminq(hw);
PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
return -EIO;
}
+ /* Firmware of SFP x722 does not support adminq option */
+ if (hw->device_id == I40E_DEV_ID_SFP_X722)
+ hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
+
PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
+ /* Initialize the hardware */
+ i40e_hw_init(dev);
+
+ i40e_config_automask(pf);
+
+ i40e_set_default_pctype_table(dev);
+
+ /*
+ * To work around the NVM issue, initialize registers
+ * for packet type of QinQ by software.
+ * It should be removed once issues are fixed in NVM.
+ */
+ if (!pf->support_multi_driver)
+ i40e_GLQF_reg_init(hw);
+
+ /* Initialize the input set for filters (hash and fd) to default value */
+ i40e_filter_input_set_init(pf);
+
/* initialise the L3_MAP register */
- ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
- 0x00000028, NULL);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
+ if (!pf->support_multi_driver) {
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
+ ret);
+ PMD_INIT_LOG(DEBUG,
+ "Global register 0x%08x is changed with 0x28",
+ I40E_GLQF_L3_MAP(40));
+ }
/* Need the special FW version to support floating VEB */
config_floating_veb(dev);
goto err_get_mac_addr;
}
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *) hw->mac.addr,
- (struct ether_addr *) hw->mac.perm_addr);
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+ (struct rte_ether_addr *)hw->mac.perm_addr);
/* Disable flow control */
hw->fc.requested_mode = I40E_FC_NONE;
i40e_set_fc(hw, &aq_fail, TRUE);
/* Set the global registers with default ether type value */
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR,
- "Failed to set the default outer VLAN ether type");
- goto err_setup_pf_switch;
+ if (!pf->support_multi_driver) {
+ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ RTE_ETHER_TYPE_VLAN);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer "
+ "VLAN ether type");
+ goto err_setup_pf_switch;
+ }
}
/* PF setup, which includes VSI setup */
goto err_setup_pf_switch;
}
- /* reset all stats of the device, including pf and main vsi */
- i40e_dev_stats_reset(dev);
-
vsi = pf->main_vsi;
/* Disable double vlan by default */
}
if (!vsi->max_macaddrs)
- len = ETHER_ADDR_LEN;
+ len = RTE_ETHER_ADDR_LEN;
else
- len = ETHER_ADDR_LEN * vsi->max_macaddrs;
+ len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
"Failed to allocated memory for storing mac address");
goto err_mac_alloc;
}
- ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
&dev->data->mac_addrs[0]);
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
/* Init dcb to sw mode by default */
ret = i40e_dcb_init_configure(dev, TRUE);
if (ret != I40E_SUCCESS) {
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+
+ /* By default disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
/* Set the max frame size to 0x2600 by default,
* in case other drivers changed the default value.
*/
- i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
+ /* initialize RSS rule list */
+ TAILQ_INIT(&pf->rss_config_list);
+
/* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev);
+ /* Initialize customized information */
+ i40e_init_customized_info(pf);
+
+ /* Initialize the filter invalidation configuration */
+ i40e_init_filter_invalidation(pf);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
if (ret < 0)
goto err_init_fdir_filter_list;
+ /* initialize queue region configuration */
+ i40e_init_queue_region_conf(dev);
+
+ /* initialize RSS configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
+ /* reset all stats of the device, including pf and main vsi */
+ i40e_dev_stats_reset(dev);
+
return 0;
err_init_fdir_filter_list:
rte_free(pf->ethertype.hash_map);
err_init_ethtype_filter_list:
rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
struct i40e_fdir_info *fdir_info;
fdir_info = &pf->fdir;
- /* Remove all flow director rules and hash */
+
+ /* Remove all flow director rules */
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+}
+
+static void
+i40e_fdir_memory_cleanup(struct i40e_pf *pf)
+{
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+
+ /* flow director memory cleanup */
if (fdir_info->hash_map)
rte_free(fdir_info->hash_map);
if (fdir_info->hash_table)
rte_hash_free(fdir_info->hash_table);
+ if (fdir_info->fdir_flow_pool.bitmap)
+ rte_bitmap_free(fdir_info->fdir_flow_pool.bitmap);
+ if (fdir_info->fdir_flow_pool.pool)
+ rte_free(fdir_info->fdir_flow_pool.pool);
+ if (fdir_info->fdir_filter_array)
+ rte_free(fdir_info->fdir_filter_array);
+}
- while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
- TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
- rte_free(p_fdir);
- }
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
+{
+ /*
+ * Disable by default flexible payload
+ * for corresponding L2/L3/L4 layers.
+ */
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
}
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
- struct i40e_pf *pf;
- struct rte_pci_device *pci_dev;
- struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
- struct i40e_filter_control_settings settings;
- struct rte_flow *p_flow;
- int ret;
- uint8_t aq_fail = 0;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- intr_handle = &pci_dev->intr_handle;
- if (hw->adapter_stopped == 0)
+ if (hw->adapter_closed == 0)
i40e_dev_close(dev);
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
- /* Clear PXE mode */
- i40e_clear_pxe_mode(hw);
-
- /* Unconfigure filter control */
- memset(&settings, 0, sizeof(settings));
- ret = i40e_set_filter_control(hw, &settings);
- if (ret)
- PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
- ret);
-
- /* Disable flow control */
- hw->fc.requested_mode = I40E_FC_NONE;
- i40e_set_fc(hw, &aq_fail, TRUE);
-
- /* uninitialize pf host driver */
- i40e_pf_host_uninit(dev);
-
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
-
- /* register callback func to eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40e_dev_interrupt_handler, dev);
-
- i40e_rm_ethtype_filter_list(pf);
- i40e_rm_tunnel_filter_list(pf);
- i40e_rm_fdir_filter_list(pf);
-
- /* Remove all flows */
- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
- rte_free(p_flow);
- }
-
- /* Remove all Traffic Manager configuration */
- i40e_tm_conf_uninit(dev);
-
return 0;
}
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /* Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following codes should also be
+ * removed.
+ */
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
goto err;
/* VMDQ setup.
- * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
- * RSS setting have different requirements.
* General PMD driver call sequence are NIC init, configure,
* rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
* will try to lookup the VSI that specific queue belongs to if VMDQ
rte_free(pf->vmdq);
pf->vmdq = NULL;
err:
- /* need to release fdir resource if exists */
+ /* Need to release fdir resource if exists.
+ * Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following code should also be
+ * removed.
+ */
i40e_fdir_teardown(pf);
return ret;
}
int i;
uint32_t val;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
/* Bind all RX queues to allocated MSIX interrupt */
for (i = 0; i < nb_queue; i++) {
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(1, pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
I40E_WRITE_FLUSH(hw);
}
-void
+int
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
uint16_t queue_idx = 0;
int record = 0;
- uint32_t val;
int i;
for (i = 0; i < vsi->nb_qps; i++) {
I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
}
- /* INTENA flag is not auto-cleared for interrupt */
- val = I40E_READ_REG(hw, I40E_GLINT_CTL);
- val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
- I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
-
/* VF bind interrupt */
if (vsi->type == I40E_VSI_SRIOV) {
+ if (vsi->nb_msix == 0) {
+ PMD_DRV_LOG(ERR, "No msix resource");
+ return -EINVAL;
+ }
__vsi_queues_bind_intr(vsi, msix_vect,
vsi->base_queue, vsi->nb_qps,
itr_idx);
- return;
+ return 0;
}
/* PF & VMDq bind interrupt */
}
for (i = 0; i < vsi->nb_used_qps; i++) {
- if (nb_msix <= 1) {
+ if (vsi->nb_msix == 0) {
+ PMD_DRV_LOG(ERR, "No msix resource");
+ return -EINVAL;
+ } else if (nb_msix <= 1) {
if (!rte_intr_allow_others(intr_handle))
/* allow to share MISC_VEC_ID */
msix_vect = I40E_MISC_VEC_ID;
msix_vect++;
nb_msix--;
}
+
+ return 0;
}
-static void
+void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint16_t interval = i40e_calc_itr_interval(\
- RTE_LIBRTE_I40E_ITR_INTERVAL);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
-static void
+void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
- speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_LINK_ENABLED;
+
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+
abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ } else {
+ abilities &= ~I40E_AQ_PHY_AN_ENABLED;
+ }
+ speed = i40e_parse_link_speeds(conf->link_speeds);
return i40e_phy_conf_link(hw, abilities, speed, true);
}
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
+ uint16_t nb_rxq, nb_txq;
hw->adapter_stopped = 0;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
- dev->data->port_id);
- return -EINVAL;
- }
-
rte_intr_disable(intr_handle);
if ((rte_intr_cap_multiple(intr_handle) ||
ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
- goto err_up;
+ return ret;
}
/* Map queues with MSIX interrupt */
main_vsi->nb_used_qps = dev->data->nb_rx_queues -
pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
+ ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
+ if (ret < 0)
+ return ret;
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
- I40E_ITR_INDEX_DEFAULT);
+ ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
+ I40E_ITR_INDEX_DEFAULT);
+ if (ret < 0)
+ return ret;
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
- /* enable FDIR MSIX interrupt */
- if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
- I40E_ITR_INDEX_NONE);
- i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ /* Enable all queues which have been configured */
+ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
+ ret = i40e_dev_rx_queue_start(dev, nb_rxq);
+ if (ret)
+ goto rx_err;
}
- /* Enable all queues which have been configured */
- ret = i40e_dev_switch_queues(pf, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to enable VSI");
- goto err_up;
+ for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
+ ret = i40e_dev_tx_queue_start(dev, nb_txq);
+ if (ret)
+ goto tx_err;
}
/* Enable receiving broadcast packets */
}
}
- /* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
+ /* Enable mac loopback mode */
+ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
+ dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
+ ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "fail to set loopback link");
+ goto tx_err;
+ }
}
+
+ /* Apply link configure */
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
- goto err_up;
+ goto tx_err;
}
if (!rte_intr_allow_others(intr_handle)) {
i40e_dev_link_update(dev, 0);
}
- /* enable uio intr after callback register */
- rte_intr_enable(intr_handle);
+ if (dev->data->dev_conf.intr_conf.rxq == 0) {
+ rte_eal_alarm_set(I40E_ALARM_INTERVAL,
+ i40e_dev_alarm_handler, dev);
+ } else {
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+ }
i40e_filter_restore(pf);
return I40E_SUCCESS;
-err_up:
- i40e_dev_switch_queues(pf, FALSE);
- i40e_dev_clear_queues(dev);
+tx_err:
+ for (i = 0; i < nb_txq; i++)
+ i40e_dev_tx_queue_stop(dev, i);
+rx_err:
+ for (i = 0; i < nb_rxq; i++)
+ i40e_dev_rx_queue_stop(dev, i);
return ret;
}
if (hw->adapter_stopped == 1)
return;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0) {
+ rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
+ rte_intr_enable(intr_handle);
+ }
+
/* Disable all queues */
- i40e_dev_switch_queues(pf, FALSE);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ i40e_dev_tx_queue_stop(dev, i);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ i40e_dev_rx_queue_stop(dev, i);
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(main_vsi);
i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
}
- if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
- i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
- }
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
pf->tm_conf.committed = false;
hw->adapter_stopped = 1;
+
+ pf->adapter->rss_reta_updated = 0;
}
static void
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_mirror_rule *p_mirror;
+ struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
uint32_t reg;
int i;
int ret;
+ uint8_t aq_fail = 0;
+ int retries = 0;
PMD_INIT_FUNC_TRACE();
+ ret = rte_eth_switch_domain_free(pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
+
i40e_dev_stop(dev);
/* Remove all mirror rules */
i40e_pf_disable_irq0(hw);
rte_intr_disable(intr_handle);
+ /*
+ * Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following code should also be
+ * removed.
+ */
+ i40e_fdir_teardown(pf);
+
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
pf->vmdq = NULL;
/* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
/* shutdown the adminq */
i40e_res_pool_destroy(&pf->qp_pool);
i40e_res_pool_destroy(&pf->msix_pool);
+ /* Disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
/* force a PF reset to clean anything leftover */
reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
I40E_WRITE_FLUSH(hw);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /* Unconfigure filter control */
+ memset(&settings, 0, sizeof(settings));
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* uninitialize pf host driver */
+ i40e_pf_host_uninit(dev);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+ if (ret >= 0 || ret == -ENOENT) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ }
+ i40e_msec_delay(500);
+ } while (retries++ < 5);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ /* Do not free FDIR flows since they are static allocated */
+ if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
+ rte_free(p_flow);
+ }
+
+ /* release the fdir static allocated memory */
+ i40e_fdir_memory_cleanup(pf);
+
+ /* Remove all Traffic Manager configuration */
+ i40e_tm_conf_uninit(dev);
+
+ hw->adapter_closed = 1;
}
/*
if (ret)
return ret;
- ret = eth_i40e_dev_init(dev);
+ ret = eth_i40e_dev_init(dev, NULL);
return ret;
}
-static void
+static int
i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
true, NULL, true);
- if (status != I40E_SUCCESS)
+ if (status != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
+ return -EAGAIN;
+ }
status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
TRUE, NULL);
- if (status != I40E_SUCCESS)
+ if (status != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+ /* Rollback unicast promiscuous mode */
+ i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ false, NULL, true);
+ return -EAGAIN;
+ }
+ return 0;
}
-static void
+static int
i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
false, NULL, true);
- if (status != I40E_SUCCESS)
+ if (status != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+ return -EAGAIN;
+ }
+
+ /* must remain in all_multicast mode */
+ if (dev->data->all_multicast == 1)
+ return 0;
status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
false, NULL);
- if (status != I40E_SUCCESS)
+ if (status != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+ /* Rollback unicast promiscuous mode */
+ i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ true, NULL, true);
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret;
ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
- if (ret != I40E_SUCCESS)
+ if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret;
if (dev->data->promiscuous == 1)
- return; /* must remain in all_multicast mode */
+ return 0; /* must remain in all_multicast mode */
ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, FALSE, NULL);
- if (ret != I40E_SUCCESS)
+ if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+ return -EAGAIN;
+ }
+
+ return 0;
}
/*
return i40e_phy_conf_link(hw, abilities, speed, false);
}
-int
-i40e_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
-{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_link_status link_status;
- struct rte_eth_link link, old;
- int status;
- unsigned rep_cnt = MAX_REPEAT_TIME;
- bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
-
- memset(&link, 0, sizeof(link));
- memset(&old, 0, sizeof(old));
- memset(&link_status, 0, sizeof(link_status));
- rte_i40e_dev_atomic_read_link_status(dev, &old);
+static __rte_always_inline void
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+{
+/* Link status registers and values*/
+#define I40E_PRTMAC_LINKSTA 0x001E2420
+#define I40E_REG_LINK_UP 0x40000080
+#define I40E_PRTMAC_MACC 0x001E24E0
+#define I40E_REG_MACC_25GB 0x00020000
+#define I40E_REG_SPEED_MASK 0x38000000
+#define I40E_REG_SPEED_0 0x00000000
+#define I40E_REG_SPEED_1 0x08000000
+#define I40E_REG_SPEED_2 0x10000000
+#define I40E_REG_SPEED_3 0x18000000
+#define I40E_REG_SPEED_4 0x20000000
+ uint32_t link_speed;
+ uint32_t reg_val;
+
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ link_speed = reg_val & I40E_REG_SPEED_MASK;
+ reg_val &= I40E_REG_LINK_UP;
+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
+
+ if (unlikely(link->link_status == 0))
+ return;
- do {
- /* Get link status information from hardware */
- status = i40e_aq_get_link_info(hw, enable_lse,
+ /* Parse the link status */
+ switch (link_speed) {
+ case I40E_REG_SPEED_0:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_REG_SPEED_1:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_REG_SPEED_2:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_2_5G;
+ else
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_REG_SPEED_3:
+ if (hw->mac.type == I40E_MAC_X722) {
+ link->link_speed = ETH_SPEED_NUM_5G;
+ } else {
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+ }
+ break;
+ case I40E_REG_SPEED_4:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_10G;
+ else
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+static __rte_always_inline void
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
+ struct i40e_link_status link_status;
+ int status;
+
+ memset(&link_status, 0, sizeof(link_status));
+
+ do {
+ memset(&link_status, 0, sizeof(link_status));
+
+ /* Get link status information from hardware */
+ status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
- if (status != I40E_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ if (unlikely(status != I40E_SUCCESS)) {
+ link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
- goto out;
+ return;
}
- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (!wait_to_complete || link.link_status)
+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete || link->link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
} while (--rep_cnt);
- if (!link.link_status)
- goto out;
-
- /* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = ETH_SPEED_NUM_40G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_NONE;
break;
}
+}
+
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
-out:
- rte_i40e_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
+ else
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
+
+ if (hw->switch_dev)
+ rte_eth_linkstatus_get(hw->switch_dev, &link);
+ ret = rte_eth_linkstatus_set(dev, &link);
i40e_notify_all_vfs_link_status(dev);
- return 0;
+ return ret;
}
/* Get all the statistics of a VSI */
&nes->rx_broadcast);
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
- nes->rx_broadcast) * ETHER_CRC_LEN;
+ nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
pf->offset_loaded,
&pf->internal_stats_offset.rx_broadcast,
&pf->internal_stats.rx_broadcast);
+ /* Get total internal tx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
+ I40E_GLV_UPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_unicast,
+ &pf->internal_stats.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
+ I40E_GLV_MPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_multicast,
+ &pf->internal_stats.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
+ I40E_GLV_BPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_broadcast,
+ &pf->internal_stats.tx_broadcast);
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
pf->internal_stats.rx_multicast +
- pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
+ pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
/* Workaround: CRC size should not be included in byte statistics,
- * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
+ * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
+ * packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
- /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
- * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ /* exclude internal rx bytes
+ * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
* value.
+ * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
*/
if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
ns->eth.rx_bytes = 0;
- /* exlude internal rx bytes */
else
ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
+ if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
+ ns->eth.rx_unicast = 0;
+ else
+ ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
+
+ if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
+ ns->eth.rx_multicast = 0;
+ else
+ ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
+
+ if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
+ ns->eth.rx_broadcast = 0;
+ else
+ ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
+
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
&ns->eth.rx_discards);
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
- /* exclude internal tx bytes */
+ /* exclude internal tx bytes
+ * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
+ * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
+ * value.
+ * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
+ */
if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
ns->eth.tx_bytes = 0;
else
ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+ if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
+ ns->eth.tx_unicast = 0;
+ else
+ ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
+
+ if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
+ ns->eth.tx_multicast = 0;
+ else
+ ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
+
+ if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
+ ns->eth.tx_broadcast = 0;
+ else
+ ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
}
/* Get all statistics of a port */
-static void
+static int
i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ struct i40e_vsi *vsi;
unsigned i;
/* call read registers - updates values, now write them to struct */
i40e_read_stats_registers(pf, hw);
- stats->ipackets = ns->eth.rx_unicast +
- ns->eth.rx_multicast +
- ns->eth.rx_broadcast -
- ns->eth.rx_discards -
+ stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
pf->main_vsi->eth_stats.rx_discards;
stats->opackets = ns->eth.tx_unicast +
ns->eth.tx_multicast +
ns->eth.tx_broadcast;
- stats->ibytes = ns->eth.rx_bytes;
+ stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
pf->main_vsi->eth_stats.tx_errors;
ns->rx_length_errors + ns->rx_undersize +
ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
+ if (pf->vfs) {
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets += (vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast -
+ vsi->eth_stats.rx_discards);
+ stats->ibytes += vsi->eth_stats.rx_bytes;
+ stats->oerrors += vsi->eth_stats.tx_errors;
+ stats->imissed += vsi->eth_stats.rx_discards;
+ }
+ }
+
PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
ns->checksum_error);
PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+ return 0;
}
/* Reset the statistics */
-static void
+static int
i40e_dev_stats_reset(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
/* read the stats, reading current register values into offset */
i40e_read_stats_registers(pf, hw);
+
+ return 0;
}
static uint32_t
/* Get stats from i40e_eth_stats struct */
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s", rte_i40e_stats_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_i40e_stats_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
/* Get individiual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s", rte_i40e_hw_port_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_i40e_hw_port_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
return count;
}
-static int
-i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
- __rte_unused uint16_t queue_id,
- __rte_unused uint8_t stat_idx,
- __rte_unused uint8_t is_rx)
-{
- PMD_INIT_FUNC_TRACE();
-
- return -ENOSYS;
-}
-
static int
i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
return 0;
}
-static void
+/*
+ * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
+ * the Rx data path does not hang if the FW LLDP is stopped.
+ * return true if lldp need to stop
+ * return false if we cannot disable the LLDP to avoid Rx data path blocking.
+ */
+static bool
+i40e_need_stop_lldp(struct rte_eth_dev *dev)
+{
+ double nvm_ver;
+ char ver_str[64] = {0};
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_fw_version_get(dev, ver_str, 64);
+ nvm_ver = atof(ver_str);
+ if ((hw->mac.type == I40E_MAC_X722 ||
+ hw->mac.type == I40E_MAC_X722_VF) &&
+ ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
+ return true;
+ else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
+ return true;
+
+ return false;
+}
+
+static int
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
- else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ dev_info->default_rxportconf.nb_queues = 2;
+ dev_info->default_txportconf.nb_queues = 2;
+ if (dev->data->nb_rx_queues == 1)
+ dev_info->default_rxportconf.ring_size = 2048;
+ else
+ dev_info->default_rxportconf.ring_size = 1024;
+ if (dev->data->nb_tx_queues == 1)
+ dev_info->default_txportconf.ring_size = 1024;
+ else
+ dev_info->default_txportconf.ring_size = 512;
+
+ } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
dev_info->speed_capa = ETH_LINK_SPEED_25G;
- else
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ dev_info->default_rxportconf.ring_size = 512;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ }
+ }
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+
+ return 0;
}
static int
return 0;
}
- ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_w, reg_id);
+ "Global register 0x%08x is changed with value 0x%08x",
+ I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
return 0;
}
uint16_t tpid)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
"Unsupported vlan type.");
return -EINVAL;
}
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+ return -ENOTSUP;
+ }
+
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
if (vlan_type == ETH_VLAN_TYPE_OUTER)
hw->second_tag = rte_cpu_to_le_16(tpid);
}
- ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Set switch config failed aq_err: %d",
return ret;
}
-static void
+/* Configure outer vlan stripping on or off in QinQ mode */
+static int
+i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = I40E_SUCCESS;
+ uint32_t reg;
+
+ if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
+ PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
+ return -EINVAL;
+ }
+
+ /* Configure for outer VLAN RX stripping */
+ reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
+
+ if (on)
+ reg |= I40E_VSI_TSR_QINQ_STRIP;
+ else
+ reg &= ~I40E_VSI_TSR_QINQ_STRIP;
+
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_TSR(vsi->vsi_id),
+ reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+
+ return ret;
+}
+
+static int
i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
- ETHER_TYPE_VLAN);
+ RTE_ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
- ETHER_TYPE_VLAN);
+ RTE_ETHER_TYPE_VLAN);
}
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
+
+ if (mask & ETH_QINQ_STRIP_MASK) {
+ /* Enable or disable outer VLAN stripping */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+ i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
+ else
+ i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
+ }
+
+ return 0;
}
static void
I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
}
- /* config the water marker both based on the packets and bytes */
- I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
- (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
- (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
- pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
- I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
- pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
+ if (!pf->support_multi_driver) {
+ /* config water marker both based on the packets and bytes */
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Water marker configuration is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
/* Add a MAC address, and update filters */
static int
i40e_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
__rte_unused uint32_t index,
uint32_t pool)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
return -EINVAL;
}
- rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi;
struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *macaddr;
+ struct rte_ether_addr *macaddr;
int ret;
uint32_t i;
uint64_t pool_sel;
{
struct i40e_hw *hw;
struct i40e_mac_filter_info mac_filter;
- struct ether_addr old_mac;
- struct ether_addr *new_mac;
+ struct rte_ether_addr old_mac;
+ struct rte_ether_addr *new_mac;
struct i40e_pf_vf *vf = NULL;
uint16_t vf_id;
int ret;
new_mac = &filter->mac_addr;
- if (is_zero_ether_addr(new_mac)) {
+ if (rte_is_zero_ether_addr(new_mac)) {
PMD_DRV_LOG(ERR, "Invalid ethernet address.");
return -EINVAL;
}
}
vf = &pf->vfs[vf_id];
- if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
+ if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
return -EINVAL;
}
if (add) {
- rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
mac_filter.filter_type = filter->filter_type;
ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
return -1;
}
- ether_addr_copy(new_mac, &pf->dev_addr);
+ rte_ether_addr_copy(new_mac, &pf->dev_addr);
} else {
rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
}
/* Clear device address as it has been removed */
- if (is_same_ether_addr(&(pf->dev_addr), new_mac))
- memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+ if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
+ memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
}
return 0;
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
int ret;
if (!lut)
return -EINVAL;
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
lut, lut_size);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
uint32_t *lut_dw = (uint32_t *)lut;
uint16_t i, lut_size_dw = lut_size / 4;
- for (i = 0; i < lut_size_dw; i++)
- lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= lut_size_dw; i++) {
+ reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+ lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw,
+ I40E_PFQF_HLUT(i));
+ }
}
return 0;
}
-static int
+int
i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
struct i40e_pf *pf;
hw = I40E_VSI_TO_HW(vsi);
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
+ ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
lut, lut_size);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
uint32_t *lut_dw = (uint32_t *)lut;
uint16_t i, lut_size_dw = lut_size / 4;
- for (i = 0; i < lut_size_dw; i++)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HLUT1(i, vsi->user_param),
+ lut_dw[i]);
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
+ lut_dw[i]);
+ }
I40E_WRITE_FLUSH(hw);
}
}
ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
+ pf->adapter->rss_reta_updated = 1;
+
out:
rte_free(lut);
* @alignment: what to align the allocation to
**/
enum i40e_status_code
-i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_dma_mem *mem,
u64 size,
u32 alignment)
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
mem->size = size;
mem->va = mz->addr;
- mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
* @mem: ptr to mem struct to free
**/
enum i40e_status_code
-i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_dma_mem *mem)
{
if (!mem)
* @size: size of memory requested
**/
enum i40e_status_code
-i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size)
{
* @mem: pointer to mem struct to free
**/
enum i40e_status_code
-i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_virt_mem *mem)
{
if (!mem)
}
void
-i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
+i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
{
return;
}
return ret;
}
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+
+static int i40e_pf_parse_vf_queue_number_handler(const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long num;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+ RTE_SET_USED(key);
+
+ errno = 0;
+ num = strtoul(value, &end, 0);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
+ "kept the value = %hu", value, pf->vf_nb_qp_max);
+ return -(EINVAL);
+ }
+
+ if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
+ pf->vf_nb_qp_max = (uint16_t)num;
+ else
+ /* here return 0 to make next valid same argument work */
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
+ "power of 2 and equal or less than 16 !, Now it is "
+ "kept the value = %hu", num, pf->vf_nb_qp_max);
+
+ return 0;
+}
+
+static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ /* set default queue number per VF as 4 */
+ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+ if (dev->device->devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (kvlist == NULL)
+ return -(EINVAL);
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+
+ rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ i40e_pf_parse_vf_queue_number_handler, pf);
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
static int
i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
+
+ i40e_pf_config_vf_rxq_number(dev);
+
/* Add the parameter init for LFC */
pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
pf->max_num_vsi = hw->func_caps.num_vsis;
pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
/* FDir queue/VSI allocation */
pf->fdir_qp_offset = 0;
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
- pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+ pf->vf_nb_qps = pf->vf_nb_qp_max;
pf->vf_num = pci_dev->max_vfs;
PMD_DRV_LOG(DEBUG,
"%u VF VSIs, %u queues per VF VSI, in total %u queues",
{
struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
uint32_t pool_offset;
+ uint16_t len;
int insert;
if (pool == NULL) {
}
insert = 0;
+ len = valid_entry->len;
/* Try to merge with next one*/
if (next != NULL) {
/* Merge with next one */
- if (valid_entry->base + valid_entry->len == next->base) {
+ if (valid_entry->base + len == next->base) {
next->base = valid_entry->base;
- next->len += valid_entry->len;
+ next->len += len;
rte_free(valid_entry);
valid_entry = next;
insert = 1;
if (prev != NULL) {
/* Merge with previous one */
if (prev->base + prev->len == valid_entry->base) {
- prev->len += valid_entry->len;
+ prev->len += len;
/* If it merge with next one, remove next node */
if (insert == 1) {
LIST_REMOVE(valid_entry, next);
rte_free(valid_entry);
+ valid_entry = NULL;
} else {
rte_free(valid_entry);
+ valid_entry = NULL;
insert = 1;
}
}
LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
}
- pool->num_free += valid_entry->len;
- pool->num_alloc -= valid_entry->len;
+ pool->num_free += len;
+ pool->num_alloc -= len;
return 0;
}
ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
if (ret != I40E_SUCCESS) {
struct i40e_mac_filter *f;
- struct ether_addr *mac;
+ struct rte_ether_addr *mac;
PMD_DRV_LOG(DEBUG,
"Cannot remove the default macvlan filter");
return ret;
}
rte_memcpy(&filter.mac_addr,
- (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
+ (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
return i40e_vsi_add_mac(vsi, &filter);
}
int ret;
/* Use the FW API if FW >= v5.0 */
- if (hw->aq.fw_maj_ver < 5) {
+ if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return;
}
struct i40e_mac_filter_info filter;
int ret;
struct i40e_vsi_context ctxt;
- struct ether_addr broadcast =
+ struct rte_ether_addr broadcast =
{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
/* VF has MSIX interrupt in VF range, don't allocate here */
if (type == I40E_VSI_MAIN) {
- ret = i40e_res_pool_alloc(&pf->msix_pool,
- RTE_MIN(vsi->nb_qps,
- RTE_MAX_RXTX_INTR_VEC_ID));
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
- vsi->seid, ret);
- goto fail_queue_alloc;
+ if (pf->support_multi_driver) {
+ /* If support multi-driver, need to use INT0 instead of
+ * allocating from msix pool. The Msix pool is init from
+ * INT1, so it's OK just set msix_intr to 0 and nb_msix
+ * to 1 without calling i40e_res_pool_alloc.
+ */
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 1;
+ } else {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID);
}
- vsi->msix_intr = ret;
- vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
} else if (type != I40E_VSI_SRIOV) {
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
- goto fail_queue_alloc;
+ if (type != I40E_VSI_FDIR)
+ goto fail_queue_alloc;
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ } else {
+ vsi->msix_intr = ret;
+ vsi->nb_msix = 1;
}
- vsi->msix_intr = ret;
- vsi->nb_msix = 1;
} else {
vsi->msix_intr = 0;
vsi->nb_msix = 0;
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
/* Use the VEB configuration if FW >= v5.0 */
- if (hw->aq.fw_maj_ver >= 5) {
+ if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
/* Configure switch ID */
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
}
/* MAC/VLAN configuration */
- rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
- i40e_vlan_offload_set(dev, mask);
-
- /* Apply double-vlan setting, not implemented yet */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_QINQ_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = i40e_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Failed to update vlan offload");
+ return ret;
+ }
/* Apply pvid setting */
ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
+
+ ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device %d", ret);
+
if (pf->flags & I40E_FLAG_FDIR) {
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
return I40E_SUCCESS;
}
-/* Swith on or off the tx queues */
-static int
-i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
-{
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- struct i40e_tx_queue *txq;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
- uint16_t i;
- int ret;
-
- for (i = 0; i < dev_data->nb_tx_queues; i++) {
- txq = dev_data->tx_queues[i];
- /* Don't operate the queue if not configured or
- * if starting only per queue */
- if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
- continue;
- if (on)
- ret = i40e_dev_tx_queue_start(dev, i);
- else
- ret = i40e_dev_tx_queue_stop(dev, i);
- if ( ret != I40E_SUCCESS)
- return ret;
- }
-
- return I40E_SUCCESS;
-}
-
int
i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
{
return I40E_SUCCESS;
}
-/* Switch on or off the rx queues */
+
+/* Initialize VSI for TX */
static int
-i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
+i40e_dev_tx_init(struct i40e_pf *pf)
{
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- struct i40e_rx_queue *rxq;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev_data *data = pf->dev_data;
uint16_t i;
- int ret;
+ uint32_t ret = I40E_SUCCESS;
+ struct i40e_tx_queue *txq;
- for (i = 0; i < dev_data->nb_rx_queues; i++) {
- rxq = dev_data->rx_queues[i];
- /* Don't operate the queue if not configured or
- * if starting only per queue */
- if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ txq = data->tx_queues[i];
+ if (!txq || !txq->q_set)
continue;
- if (on)
- ret = i40e_dev_rx_queue_start(dev, i);
- else
- ret = i40e_dev_rx_queue_stop(dev, i);
+ ret = i40e_tx_queue_init(txq);
if (ret != I40E_SUCCESS)
- return ret;
+ break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
- return I40E_SUCCESS;
-}
-
-/* Switch on or off all the rx/tx queues */
-int
-i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
-{
- int ret;
-
- if (on) {
- /* enable rx queues before enabling tx queues */
- ret = i40e_dev_switch_rx_queues(pf, on);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to switch rx queues");
- return ret;
- }
- ret = i40e_dev_switch_tx_queues(pf, on);
- } else {
- /* Stop tx queues before stopping rx queues */
- ret = i40e_dev_switch_tx_queues(pf, on);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to switch tx queues");
- return ret;
- }
- ret = i40e_dev_switch_rx_queues(pf, on);
- }
-
- return ret;
-}
-
-/* Initialize VSI for TX */
-static int
-i40e_dev_tx_init(struct i40e_pf *pf)
-{
- struct rte_eth_dev_data *data = pf->dev_data;
- uint16_t i;
- uint32_t ret = I40E_SUCCESS;
- struct i40e_tx_queue *txq;
-
- for (i = 0; i < data->nb_tx_queues; i++) {
- txq = data->tx_queues[i];
- if (!txq || !txq->q_set)
- continue;
- ret = i40e_tx_queue_init(txq);
- if (ret != I40E_SUCCESS)
- break;
- }
- if (ret == I40E_SUCCESS)
- i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
- ->eth_dev);
-
- return ret;
+ return ret;
}
/* Initialize VSI for RX */
uint16_t i;
struct i40e_rx_queue *rxq;
- i40e_pf_config_mq_rx(pf);
+ i40e_pf_config_rss(pf);
for (i = 0; i < data->nb_rx_queues; i++) {
rxq = data->rx_queues[i];
if (!rxq || !rxq->q_set)
i40e_pf_disable_irq0(struct i40e_hw *hw)
{
/* Disable all interrupt types */
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
rte_free(info.msg_buf);
}
+static void
+i40e_handle_mdd_event(struct rte_eth_dev *dev)
+{
+#define I40E_MDD_CLEAR32 0xFFFFFFFF
+#define I40E_MDD_CLEAR16 0xFFFF
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ bool mdd_detected = false;
+ struct i40e_pf_vf *vf;
+ uint32_t reg;
+ int i;
+
+ /* find what triggered the MDD event */
+ reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+ I40E_GL_MDET_TX_VF_NUM_SHIFT;
+ uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ hw->func_caps.base_queue;
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
+ "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
+ event, queue, pf_num, vf_num, dev->data->name);
+ I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
+ mdd_detected = true;
+ }
+ reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ hw->func_caps.base_queue;
+
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
+ "queue %d of function 0x%02x device %s\n",
+ event, queue, func, dev->data->name);
+ I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
+ PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
+ }
+ reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
+ I40E_MDD_CLEAR16);
+ PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
+ }
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->vf_num && mdd_detected; i++) {
+ vf = &pf->vfs[i];
+ reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
+ I40E_MDD_CLEAR16);
+ vf->num_mdd_events++;
+ PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
+ PRIu64 "times\n",
+ i, vf->num_mdd_events);
+ }
+
+ reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
+ I40E_MDD_CLEAR16);
+ vf->num_mdd_events++;
+ PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
+ PRIu64 "times\n",
+ i, vf->num_mdd_events);
+ }
+ }
+}
+
/**
* Interrupt handler triggered by NIC for handling
* specific interrupt.
}
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ i40e_handle_mdd_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: adminq event");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+done:
+ /* Enable interrupt */
+ i40e_pf_enable_irq0(hw);
+}
+
+static void
+i40e_dev_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
+ goto done;
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ i40e_handle_mdd_event(dev);
+ }
if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
PMD_DRV_LOG(INFO, "ICR0: global reset requested");
if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(dev->intr_handle);
+ rte_eal_alarm_set(I40E_ALARM_INTERVAL,
+ i40e_dev_alarm_handler, dev);
}
int
/* Find out specific MAC filter */
static struct i40e_mac_filter *
i40e_find_mac_filter(struct i40e_vsi *vsi,
- struct ether_addr *macaddr)
+ struct rte_ether_addr *macaddr)
{
struct i40e_mac_filter *f;
TAILQ_FOREACH(f, &vsi->mac_list, next) {
- if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
+ if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
return f;
}
int
i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
- int num, struct ether_addr *addr)
+ int num, struct rte_ether_addr *addr)
{
int i;
uint32_t j, k;
int mac_num;
int ret = I40E_SUCCESS;
- if (!vsi || vlan > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
return I40E_ERR_PARAM;
/* If it's already set, just return */
* Vlan 0 is the generic filter for untagged packets
* and can't be removed.
*/
- if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
+ if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
return I40E_ERR_PARAM;
/* If can't find it, just return */
}
int
-i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
+i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
{
struct i40e_mac_filter *f;
struct i40e_macvlan_filter *mv_f;
I40E_WRITE_FLUSH(hw);
}
-static int
+int
i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
+ I40E_VFQF_HKEY_MAX_INDEX :
+ I40E_PFQF_HKEY_MAX_INDEX;
int ret = 0;
if (!key || key_len == 0) {
PMD_DRV_LOG(DEBUG, "No key to be configured");
return 0;
- } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ } else if (key_len != (key_idx + 1) *
sizeof(uint32_t)) {
PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
return -EINVAL;
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HKEY1(i, vsi->user_param),
+ hash_key[i]);
+
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
+ hash_key[i]);
+ }
I40E_WRITE_FLUSH(hw);
}
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
int ret;
if (!key || !key_len)
- return -EINVAL;
+ return 0;
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
uint32_t *key_dw = (uint32_t *)key;
uint16_t i;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_VFQF_HKEY1(i, vsi->user_param);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_PFQF_HKEY(i);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
}
- *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-
return 0;
}
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t hena;
+ int ret;
+
+ if (!rss_conf)
+ return -EINVAL;
- i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
+ ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
&rss_conf->rss_key_len);
+ if (ret)
+ return ret;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
/* Convert tunnel filter structure */
static int
i40e_tunnel_filter_convert(
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter,
struct i40e_tunnel_filter *tunnel_filter)
{
- ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
- (struct ether_addr *)&tunnel_filter->input.outer_mac);
- ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
- (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ rte_ether_addr_copy((struct rte_ether_addr *)
+ &cld_filter->element.outer_mac,
+ (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
+ rte_ether_addr_copy((struct rte_ether_addr *)
+ &cld_filter->element.inner_mac,
+ (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
if ((rte_le_to_cpu_16(cld_filter->element.flags) &
I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
uint8_t add)
{
uint16_t ip_type;
- uint32_t ipv4_addr;
+ uint32_t ipv4_addr, ipv4_addr_le;
uint8_t i, tun_type = 0;
/* internal varialbe to convert ipv6 byte order */
uint32_t convert_ipv6[4];
int val, ret = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb *pfilter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *tunnel, *node;
struct i40e_tunnel_filter check_filter; /* Check if filter exists */
}
pfilter = cld_filter;
- ether_addr_copy(&tunnel_filter->outer_mac,
- (struct ether_addr *)&pfilter->element.outer_mac);
- ether_addr_copy(&tunnel_filter->inner_mac,
- (struct ether_addr *)&pfilter->element.inner_mac);
+ rte_ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct rte_ether_addr *)&pfilter->element.outer_mac);
+ rte_ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct rte_ether_addr *)&pfilter->element.inner_mac);
pfilter->element.inner_vlan =
rte_cpu_to_le_16(tunnel_filter->inner_vlan);
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
rte_memcpy(&pfilter->element.ipaddr.v4.data,
- &rte_cpu_to_le_32(ipv4_addr),
+ &ipv4_addr_le,
sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
case RTE_TUNNEL_TYPE_IP_IN_GRE:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
break;
+ case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
+ break;
default:
/* Other tunnel types is not supported. */
PMD_DRV_LOG(ERR, "tunnel type is not supported.");
node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
if (add && node) {
PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
return -EINVAL;
}
vsi->seid, &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
} else {
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
&cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
ret = i40e_sw_tunnel_filter_del(pf, &node->input);
#define I40E_TR_GRE_KEY_MASK 0x400
#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
+#define I40E_DIRECTION_INGRESS_KEY 0x8000
+#define I40E_TR_L4_TYPE_TCP 0x2
+#define I40E_TR_L4_TYPE_UDP 0x4
+#define I40E_TR_L4_TYPE_SCTP 0x8
static enum
i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
memset(&filter_replace_buf, 0,
/* create L1 filter */
filter_replace.old_filter_type =
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
- filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace.tr_bit = 0;
/* Prepare the buffer, 3 entries */
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
return status;
}
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
/* For MPLSoUDP */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
if (status < 0)
return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* For MPLSoGRE */
memset(&filter_replace, 0,
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
return status;
}
-int
-i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
- struct i40e_tunnel_filter_conf *tunnel_filter,
- uint8_t add)
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
{
- uint16_t ip_type;
- uint32_t ipv4_addr;
- uint8_t i, tun_type = 0;
- /* internal variable to convert ipv6 byte order */
- uint32_t convert_ipv6[4];
- int val, ret = 0;
- struct i40e_pf_vf *vf = NULL;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_vsi *vsi;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
- struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
- struct i40e_tunnel_filter *tunnel, *node;
- struct i40e_tunnel_filter check_filter; /* Check if filter exists */
- uint32_t teid_le;
- bool big_buffer = 0;
-
- cld_filter = rte_zmalloc("tunnel_filter",
- sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
- 0);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
- if (cld_filter == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -ENOMEM;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
}
- pfilter = cld_filter;
- ether_addr_copy(&tunnel_filter->outer_mac,
- (struct ether_addr *)&pfilter->element.outer_mac);
- ether_addr_copy(&tunnel_filter->inner_mac,
- (struct ether_addr *)&pfilter->element.inner_mac);
+ /* For GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
- pfilter->element.inner_vlan =
- rte_cpu_to_le_16(tunnel_filter->inner_vlan);
- if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
- ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
- ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
- rte_memcpy(&pfilter->element.ipaddr.v4.data,
- &rte_cpu_to_le_32(ipv4_addr),
- sizeof(pfilter->element.ipaddr.v4.data));
- } else {
- ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
- for (i = 0; i < 4; i++) {
- convert_ipv6[i] =
- rte_cpu_to_le_32(rte_be_to_cpu_32(
- tunnel_filter->ip_addr.ipv6_addr[i]));
- }
- rte_memcpy(&pfilter->element.ipaddr.v6.data,
- &convert_ipv6,
- sizeof(pfilter->element.ipaddr.v6.data));
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
}
- /* check tunneled type */
- switch (tunnel_filter->tunnel_type) {
- case I40E_TUNNEL_TYPE_VXLAN:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
- break;
- case I40E_TUNNEL_TYPE_NVGRE:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
- break;
- case I40E_TUNNEL_TYPE_IP_IN_GRE:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
- break;
- case I40E_TUNNEL_TYPE_MPLSoUDP:
- if (!pf->mpls_replace_flag) {
- i40e_replace_mpls_l1_filter(pf);
- i40e_replace_mpls_cloud_filter(pf);
- pf->mpls_replace_flag = 1;
- }
- teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
- pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
- teid_le >> 4;
- pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
- (teid_le & 0xF) << 12;
- pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ /* for GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_port_l1_filter(struct i40e_pf *pf,
+ enum i40e_l4_port_type l4_port_type)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[8] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
+ } else {
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
+ filter_replace_buf.data[8] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
+ }
+
+ filter_replace.tr_bit = 0;
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0x00;
+ filter_replace_buf.data[3] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[5] = 0x00;
+ filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
+ I40E_TR_L4_TYPE_TCP |
+ I40E_TR_L4_TYPE_SCTP;
+ filter_replace_buf.data[7] = 0x00;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[9] = 0x00;
+ filter_replace_buf.data[10] = 0xFF;
+ filter_replace_buf.data[11] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_port_cloud_filter(struct i40e_pf *pf,
+ enum i40e_l4_port_type l4_port_type)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ } else {
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ }
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.tr_bit = 0;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+
+ if (!status && filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr, ipv4_addr_le;
+ uint8_t i, tun_type = 0;
+ /* internal variable to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_pf_vf *vf = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+ uint32_t teid_le;
+ bool big_buffer = 0;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (cld_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ rte_ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct rte_ether_addr *)&pfilter->element.outer_mac);
+ rte_ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct rte_ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &ipv4_addr_le,
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(
+ tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case I40E_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case I40E_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case I40E_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoUDP:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x40;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
break;
case I40E_TUNNEL_TYPE_MPLSoGRE:
if (!pf->mpls_replace_flag) {
pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x0;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+ break;
+ case I40E_TUNNEL_TYPE_GTPC:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ break;
+ case I40E_TUNNEL_TYPE_GTPU:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+ 0x0;
+ big_buffer = 1;
break;
case I40E_TUNNEL_TYPE_QINQ:
if (!pf->qinq_replace_flag) {
pfilter->general_fields[0] = tunnel_filter->inner_vlan;
pfilter->general_fields[1] = tunnel_filter->outer_vlan;
big_buffer = 1;
+ break;
+ case I40E_CLOUD_TYPE_UDP:
+ case I40E_CLOUD_TYPE_TCP:
+ case I40E_CLOUD_TYPE_SCTP:
+ if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ if (!pf->sport_replace_flag) {
+ i40e_replace_port_l1_filter(pf,
+ tunnel_filter->l4_port_type);
+ i40e_replace_port_cloud_filter(pf,
+ tunnel_filter->l4_port_type);
+ pf->sport_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ I40E_DIRECTION_INGRESS_KEY;
+
+ if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_UDP;
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_TCP;
+ else
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_SCTP;
+
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ (teid_le >> 16) & 0xFFFF;
+ big_buffer = 1;
+ } else {
+ if (!pf->dport_replace_flag) {
+ i40e_replace_port_l1_filter(pf,
+ tunnel_filter->l4_port_type);
+ i40e_replace_port_cloud_filter(pf,
+ tunnel_filter->l4_port_type);
+ pf->dport_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
+ I40E_DIRECTION_INGRESS_KEY;
+
+ if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_UDP;
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_TCP;
+ else
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_SCTP;
+
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
+ (teid_le >> 16) & 0xFFFF;
+ big_buffer = 1;
+ }
+
break;
default:
/* Other tunnel types is not supported. */
if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
pfilter->element.flags |=
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
- else {
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
+ tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
+ tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
+ if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ } else {
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
&pfilter->element.flags);
if (val < 0) {
else {
if (tunnel_filter->vf_id >= pf->vf_num) {
PMD_DRV_LOG(ERR, "Invalid argument.");
+ rte_free(cld_filter);
return -EINVAL;
}
vf = &pf->vfs[tunnel_filter->vf_id];
node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
if (add && node) {
PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
return -EINVAL;
}
if (add) {
if (big_buffer)
- ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ ret = i40e_aq_add_cloud_filters_bb(hw,
vsi->seid, cld_filter, 1);
else
ret = i40e_aq_add_cloud_filters(hw,
vsi->seid, &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
} else {
if (big_buffer)
- ret = i40e_aq_remove_cloud_filters_big_buffer(
+ ret = i40e_aq_rem_cloud_filters_bb(
hw, vsi->seid, cld_filter, 1);
else
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- &cld_filter->element, 1);
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
ret = i40e_sw_tunnel_filter_del(pf, &node->input);
}
static int
-i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
{
int idx, ret;
- uint8_t filter_idx;
+ uint8_t filter_idx = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
idx = i40e_get_vxlan_port_idx(pf, port);
return -ENOSPC;
}
- ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+ ret = i40e_aq_add_udp_tunnel(hw, port, udp_type,
&filter_idx, NULL);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
- ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
+ I40E_AQC_TUNNEL_TYPE_VXLAN);
+ break;
+ case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
+ I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
case RTE_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
break;
case RTE_TUNNEL_TYPE_GENEVE:
static int
i40e_pf_config_rss(struct i40e_pf *pf)
{
+ enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct rte_eth_rss_conf rss_conf;
uint32_t i, lut = 0;
num);
if (num == 0) {
- PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+ PMD_INIT_LOG(ERR,
+ "No PF queues are configured to enable RSS for port %u",
+ pf->dev_data->port_id);
return -ENOTSUP;
}
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (j & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ if (pf->adapter->rss_reta_updated == 0) {
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (j & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
+ rte_bswap32(lut));
+ }
}
rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
+ !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
i40e_pf_disable_rss(pf);
return 0;
}
return -EINVAL;
}
- if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
return -EINVAL;
}
if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
- (is_zero_ether_addr(&filter->outer_mac))) {
+ (rte_is_zero_ether_addr(&filter->outer_mac))) {
PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
return -EINVAL;
}
if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
- (is_zero_ether_addr(&filter->inner_mac))) {
+ (rte_is_zero_ether_addr(&filter->inner_mac))) {
PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
return -EINVAL;
}
#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
-static int
+int
i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
uint32_t val, reg;
int ret = -EINVAL;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+ return -ENOTSUP;
+ }
+
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
}
if (reg != val) {
- ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
reg, NULL);
if (ret != 0)
return ret;
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+ "with value 0x%08x",
+ I40E_GL_PRS_FVBM(2), reg);
} else {
ret = 0;
}
return ret;
}
-static int
-i40e_pf_config_mq_rx(struct i40e_pf *pf)
-{
- int ret = 0;
- enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
-
- /* RSS setup */
- if (mq_mode & ETH_MQ_RX_RSS_FLAG)
- ret = i40e_pf_config_rss(pf);
- else
- i40e_pf_disable_rss(pf);
-
- return ret;
-}
-
/* Get the symmetric hash enable configurations per port */
static void
i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
/*
- * We work only with lowest 32 bits which is not correct, but to work
- * properly the valid_bit_mask size should be increased up to 64 bits
- * and this will brake ABI. This modification will be done in next
- * release
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be checked.
*/
- g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
+ for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ g_cfg->valid_bit_mask[i] = 0ULL;
+ g_cfg->sym_hash_enable_mask[i] = 0ULL;
+ }
+
+ g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
if (!adapter->pctypes_tbl[i])
continue;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
g_cfg->sym_hash_enable_mask[0] |=
- (1UL << i);
+ (1ULL << i);
}
}
}
const struct rte_eth_hash_global_conf *g_cfg)
{
uint32_t i;
- uint32_t mask0, i40e_mask = adapter->flow_types_mask;
+ uint64_t mask0, i40e_mask = adapter->flow_types_mask;
if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
}
/*
- * As i40e supports less than 32 flow types, only first 32 bits need to
+ * As i40e supports less than 64 flow types, only first 64 bits need to
* be checked.
*/
mask0 = g_cfg->valid_bit_mask[0];
struct rte_eth_hash_global_conf *g_cfg)
{
struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
int ret;
uint16_t i, j;
uint32_t reg;
- /*
- * We work only with lowest 32 bits which is not correct, but to work
- * properly the valid_bit_mask size should be increased up to 64 bits
- * and this will brake ABI. This modification will be done in next
- * release
- */
- uint32_t mask0 = g_cfg->valid_bit_mask[0] &
- (uint32_t)adapter->flow_types_mask;
+ uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+ return -ENOTSUP;
+ }
/* Check the input parameters */
ret = i40e_hash_global_config_check(adapter, g_cfg);
if (ret < 0)
return ret;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
+ /*
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be configured.
+ */
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
if (mask0 & (1UL << i)) {
- reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
j < I40E_FILTER_PCTYPE_MAX; j++) {
if (adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_rx_ctl(hw,
+ i40e_write_global_rx_ctl(hw,
I40E_GLQF_HSYM(j),
reg);
}
/* Use the default, and keep it as it is */
goto out;
- i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
out:
I40E_WRITE_FLUSH(hw);
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
I40E_INSET_IPV4_TTL,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_SCTP_VT,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
-static void
-i40e_filter_input_set_init(struct i40e_pf *pf)
+void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+ struct rte_eth_dev *dev;
+
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ if (reg != val) {
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, addr, reg,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+ }
+}
+
+static void
+i40e_filter_input_set_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
enum i40e_filter_pctype pctype;
uint64_t input_set, inset_reg;
uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+ return;
+ }
inset_reg = i40e_translate_input_set_reg(hw->mac.type,
input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
-
- for (i = 0; i < num; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
- }
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
}
I40E_WRITE_FLUSH(hw);
/* store the default input set */
- pf->hash_input_set[pctype] = input_set;
+ if (!pf->support_multi_driver)
+ pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
}
return -EINVAL;
}
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+ return -ENOTSUP;
+ }
+
pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
/*clear unused mask registers of the pctype */
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return -EINVAL;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ return -ENOTSUP;
+ }
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ for (i = 0; i < num; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ } else {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
pf->fdir.input_set[pctype] = input_set;
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
struct i40e_ethertype_filter *filter)
{
- rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
+ RTE_ETHER_ADDR_LEN);
filter->input.ether_type = input->ether_type;
filter->flags = input->flags;
filter->queue = input->queue;
PMD_DRV_LOG(ERR, "Invalid queue ID");
return -EINVAL;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
PMD_DRV_LOG(ERR,
"unsupported ether_type(0x%04x) in control packet filter.",
filter->ether_type);
return -EINVAL;
}
- if (filter->ether_type == ETHER_TYPE_VLAN)
+ if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
PMD_DRV_LOG(WARNING,
"filter vlan ether_type in first tag is not supported.");
if (add) {
ethertype_filter = rte_zmalloc("ethertype_filter",
sizeof(*ethertype_filter), 0);
+ if (ethertype_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+
rte_memcpy(ethertype_filter, &check_filter,
sizeof(check_filter));
ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ if (ret < 0)
+ rte_free(ethertype_filter);
} else {
ret = i40e_sw_ethertype_filter_del(pf, &node->input);
}
/* For both X710 and XL710 */
#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
}
}
-#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
i40e_start_timecounters(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
+ struct i40e_adapter *adapter = dev->data->dev_private;
struct rte_eth_link link;
uint32_t tsync_inc_l;
uint32_t tsync_inc_h;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
i40e_dev_link_update(dev, 1);
- rte_i40e_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
+ case ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
static int
i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
{
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
+ struct i40e_adapter *adapter = dev->data->dev_private;
adapter->systime_tc.nsec += delta;
adapter->rx_tstamp_tc.nsec += delta;
i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
{
uint64_t ns;
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
+ struct i40e_adapter *adapter = dev->data->dev_private;
ns = rte_timespec_to_ns(ts);
i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
{
uint64_t ns, systime_cycles;
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
+ struct i40e_adapter *adapter = dev->data->dev_private;
systime_cycles = i40e_read_systime_cyclecounter(dev);
ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
struct timespec *timestamp, uint32_t flags)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
-
+ struct i40e_adapter *adapter = dev->data->dev_private;
uint32_t sync_status;
uint32_t index = flags & 0x03;
uint64_t rx_tstamp_cycles;
struct timespec *timestamp)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_adapter *adapter =
- (struct i40e_adapter *)dev->data->dev_private;
-
+ struct i40e_adapter *adapter = dev->data->dev_private;
uint32_t sync_status;
uint64_t tx_tstamp_cycles;
uint64_t ns;
*
* Returns 0 on success, negative value on failure
*/
-static int
+int
i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
- ret = i40e_init_dcb(hw);
+ /* Stopping lldp is necessary for DPDK, but it will cause
+ * DCB init failed. For i40e_init_dcb(), the prerequisite
+ * for successful initialization of DCB is that LLDP is
+ * enabled. So it is needed to start lldp before DCB init
+ * and stop it after initialization.
+ */
+ ret = i40e_aq_start_lldp(hw, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to start lldp");
+
+ ret = i40e_init_dcb(hw, true);
/* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
* adminq status. Otherwise, it should return success.
ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
+
+ if (i40e_need_stop_lldp(dev)) {
+ ret = i40e_aq_stop_lldp(hw, true, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
+ }
} else {
- ret = i40e_aq_start_lldp(hw, NULL);
+ ret = i40e_aq_start_lldp(hw, true, NULL);
if (ret != I40E_SUCCESS)
PMD_INIT_LOG(DEBUG, "Failed to start lldp");
- ret = i40e_init_dcb(hw);
+ ret = i40e_init_dcb(hw, true);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
PMD_INIT_LOG(ERR,
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_ack(&pci_dev->intr_handle);
return 0;
}
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
return 0;
}
+/**
+ * This function is used to check if the register is valid.
+ * Below is the valid registers list for X722 only:
+ * 0x2b800--0x2bb00
+ * 0x38700--0x38a00
+ * 0x3d800--0x3db00
+ * 0x208e00--0x209000
+ * 0x20be00--0x20c000
+ * 0x263c00--0x264000
+ * 0x265c00--0x266000
+ */
+static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
+{
+ if ((type != I40E_MAC_X722) &&
+ ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
+ (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
+ (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
+ (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
+ (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
+ (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
+ (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
+ return 0;
+ else
+ return 1;
+}
+
static int i40e_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs)
{
reg_offset = arr_idx * reg_info->stride1 +
arr_idx2 * reg_info->stride2;
reg_offset += reg_info->base_addr;
- ptr_data[reg_offset >> 2] =
- I40E_READ_REG(hw, reg_offset);
+ if (!i40e_valid_regs(hw->mac.type, reg_offset))
+ ptr_data[reg_offset >> 2] = 0;
+ else
+ ptr_data[reg_offset >> 2] =
+ I40E_READ_REG(hw, reg_offset);
}
}
return 0;
}
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sff8472_comp = 0;
+ uint32_t sff8472_swap = 0;
+ uint32_t sff8636_rev = 0;
+ i40e_status status;
+ uint32_t type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ PMD_DRV_LOG(ERR,
+ "Module EEPROM memory read not supported. "
+ "Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ PMD_DRV_LOG(ERR,
+ "Cannot read module EEPROM memory. "
+ "No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR, 1,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR, 1,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ PMD_DRV_LOG(WARNING,
+ "Module address swap to access "
+ "page 0xA2 is not supported.\n");
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0, 1,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ bool is_sfp = false;
+ i40e_status status;
+ uint8_t *data;
+ uint32_t value = 0;
+ uint32_t i;
+
+ if (!info || !info->length || !info->data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ data = info->data;
+ for (i = 0; i < info->length; i++) {
+ u32 offset = i + info->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
+ offset -= RTE_ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, 1, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = (uint8_t)value;
+ }
+ return 0;
+}
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_mac_filter *f;
+ int ret;
- if (!is_valid_assigned_ether_addr(mac_addr)) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
+ }
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (rte_is_same_ether_addr(&pf->dev_addr,
+ &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return -EIO;
+ }
+
+ mac_filter = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return -EIO;
+ }
+ memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return -EIO;
+ }
+ memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to change mac");
+ return -EIO;
}
- /* Flags: 0x3 updates port address */
- i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+ return 0;
}
static int
int ret = 0;
/* check if mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
return -EINVAL;
/* mtu setting is forbidden if port is start */
return -EBUSY;
}
- if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
struct i40e_tunnel_filter_list
*tunnel_list = &pf->tunnel.tunnel_list;
struct i40e_tunnel_filter *f;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
bool big_buffer = 0;
TAILQ_FOREACH(f, tunnel_list, rules) {
vsi = vf->vsi;
}
memset(&cld_filter, 0, sizeof(cld_filter));
- ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
- (struct ether_addr *)&cld_filter.element.outer_mac);
- ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
- (struct ether_addr *)&cld_filter.element.inner_mac);
+ rte_ether_addr_copy((struct rte_ether_addr *)
+ &f->input.outer_mac,
+ (struct rte_ether_addr *)&cld_filter.element.outer_mac);
+ rte_ether_addr_copy((struct rte_ether_addr *)
+ &f->input.inner_mac,
+ (struct rte_ether_addr *)&cld_filter.element.inner_mac);
cld_filter.element.inner_vlan = f->input.inner_vlan;
cld_filter.element.flags = f->input.flags;
cld_filter.element.tenant_id = f->input.tenant_id;
sizeof(f->input.general_fields));
if (((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
big_buffer = 1;
if (big_buffer)
- i40e_aq_add_cloud_filters_big_buffer(hw,
- vsi->seid, &cld_filter, 1);
+ i40e_aq_add_cloud_filters_bb(hw,
+ vsi->seid, &cld_filter, 1);
else
i40e_aq_add_cloud_filters(hw, vsi->seid,
&cld_filter.element, 1);
}
}
+/* Restore RSS filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rss_conf_list *list = &pf->rss_config_list;
+ struct i40e_rss_filter *filter;
+
+ TAILQ_FOREACH(filter, list, next) {
+ i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
-static bool
+bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
if (strcmp(dev->device->driver->name, drv->driver.name))
return is_device_supported(dev, &rte_i40e_pmd);
}
-/* Create a QinQ cloud filter
- *
- * The Fortville NIC has limited resources for tunnel filters,
- * so we can only reuse existing filters.
- *
- * In step 1 we define which Field Vector fields can be used for
- * filter types.
- * As we do not have the inner tag defined as a field,
- * we have to define it first, by reusing one of L1 entries.
- *
- * In step 2 we are replacing one of existing filter types with
- * a new one for QinQ.
- * As we reusing L1 and replacing L2, some of the default filter
- * types will disappear,which depends on L1 and L2 entries we reuse.
- *
- * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
- *
- * 1. Create L1 filter of outer vlan (12b) which will be in use
- * later when we define the cloud filter.
- * a. Valid_flags.replace_cloud = 0
- * b. Old_filter = 10 (Stag_Inner_Vlan)
- * c. New_filter = 0x10
- * d. TR bit = 0xff (optional, not used here)
- * e. Buffer – 2 entries:
- * i. Byte 0 = 8 (outer vlan FV index).
- * Byte 1 = 0 (rsv)
- * Byte 2-3 = 0x0fff
- * ii. Byte 0 = 37 (inner vlan FV index).
- * Byte 1 =0 (rsv)
- * Byte 2-3 = 0x0fff
- *
- * Step 2:
- * 2. Create cloud filter using two L1 filters entries: stag and
- * new filter(outer vlan+ inner vlan)
- * a. Valid_flags.replace_cloud = 1
- * b. Old_filter = 1 (instead of outer IP)
- * c. New_filter = 0x10
- * d. Buffer – 2 entries:
- * i. Byte 0 = 0x80 | 7 (valid | Stag).
- * Byte 1-3 = 0 (rsv)
- * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
- * Byte 9-11 = 0 (rsv)
- */
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+ int i;
+
+ for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+ if (pf->customized_pctype[i].index == index)
+ return &pf->customized_pctype[i];
+ }
+ return NULL;
+}
+
static int
-i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
- int ret = -ENOTSUP;
- struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
- struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t pctype_num;
+ struct rte_pmd_i40e_ptype_info *pctype;
+ uint32_t buff_size;
+ struct i40e_customized_pctype *new_pctype = NULL;
+ uint8_t proto_id;
+ uint8_t pctype_value;
+ char name[64];
+ uint32_t i, j, n;
+ int ret;
- /* Init */
- memset(&filter_replace, 0,
- sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
- memset(&filter_replace_buf, 0,
- sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
- /* create L1 filter */
- filter_replace.old_filter_type =
- I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
- filter_replace.tr_bit = 0;
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&pctype_num, sizeof(pctype_num),
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype number");
+ return -1;
+ }
+ if (!pctype_num) {
+ PMD_DRV_LOG(INFO, "No new pctype added");
+ return -1;
+ }
- /* Prepare the buffer, 2 entries */
- filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
- filter_replace_buf.data[0] |=
- I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- /* Field Vector 12b mask */
- filter_replace_buf.data[2] = 0xff;
- filter_replace_buf.data[3] = 0x0f;
- filter_replace_buf.data[4] =
- I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
- filter_replace_buf.data[4] |=
- I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- /* Field Vector 12b mask */
+ buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+ pctype = rte_zmalloc("new_pctype", buff_size, 0);
+ if (!pctype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+ /* get information about new pctype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)pctype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype list");
+ rte_free(pctype);
+ return -1;
+ }
+
+ /* Update customized pctype. */
+ for (i = 0; i < pctype_num; i++) {
+ pctype_value = pctype[i].ptype_id;
+ memset(name, 0, sizeof(name));
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = pctype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ strlcat(name, proto[n].name, sizeof(name));
+ strlcat(name, "_", sizeof(name));
+ break;
+ }
+ }
+ name[strlen(name) - 1] = '\0';
+ PMD_DRV_LOG(INFO, "name = %s\n", name);
+ if (!strcmp(name, "GTPC"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPC);
+ else if (!strcmp(name, "GTPU_IPV4"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV4);
+ else if (!strcmp(name, "GTPU_IPV6"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV6);
+ else if (!strcmp(name, "GTPU"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU);
+ else if (!strcmp(name, "IPV4_L2TPV3"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV4_L2TPV3);
+ else if (!strcmp(name, "IPV6_L2TPV3"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV6_L2TPV3);
+ else if (!strcmp(name, "IPV4_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4);
+ else if (!strcmp(name, "IPV6_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6);
+ else if (!strcmp(name, "IPV4_UDP_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4_UDP);
+ else if (!strcmp(name, "IPV6_UDP_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6_UDP);
+ else if (!strcmp(name, "IPV4_AH"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_AH_IPV4);
+ else if (!strcmp(name, "IPV6_AH"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_AH_IPV6);
+ if (new_pctype) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ } else {
+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
+ new_pctype->valid = false;
+ }
+ }
+ }
+
+ rte_free(pctype);
+ return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+ uint16_t port_id = dev->data->port_id;
+ uint32_t ptype_num;
+ struct rte_pmd_i40e_ptype_info *ptype;
+ uint32_t buff_size;
+ uint8_t proto_id;
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
+ uint32_t i, j, n;
+ bool in_tunnel;
+ int ret;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ rte_pmd_i40e_ptype_mapping_reset(port_id);
+ return 0;
+ }
+
+ /* get information about new ptype num */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&ptype_num, sizeof(ptype_num),
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype number");
+ return ret;
+ }
+ if (!ptype_num) {
+ PMD_DRV_LOG(INFO, "No new ptype added");
+ return -1;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+ ptype = rte_zmalloc("new_ptype", buff_size, 0);
+ if (!ptype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+
+ /* get information about new ptype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)ptype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype list");
+ rte_free(ptype);
+ return ret;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+ ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+ if (!ptype_mapping) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ rte_free(ptype);
+ return -1;
+ }
+
+ /* Update ptype mapping table. */
+ for (i = 0; i < ptype_num; i++) {
+ ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+ ptype_mapping[i].sw_ptype = 0;
+ in_tunnel = false;
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = ptype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ memset(name, 0, sizeof(name));
+ strcpy(name, proto[n].name);
+ PMD_DRV_LOG(INFO, "name = %s\n", name);
+ if (!strncasecmp(name, "PPPOE", 5))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L2_ETHER_PPPOE;
+ else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ !in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncasecmp(name, "OIPV4", 5)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV4", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV4", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ !in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ in_tunnel) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncasecmp(name, "OIPV6", 5)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV6", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV6", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_UDP;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_UDP;
+ else if (!strncasecmp(name, "TCP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_TCP;
+ else if (!strncasecmp(name, "TCP", 3) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_TCP;
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_SCTP;
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_SCTP;
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_ICMP;
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_ICMP;
+ else if (!strncasecmp(name, "GTPC", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPC;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GTPU", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPU;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "ESP", 3)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_ESP;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GRENAT", 6)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GRENAT;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
+ !strncasecmp(name, "L2TPV2", 6) ||
+ !strncasecmp(name, "L2TPV3", 6)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_L2TP;
+ in_tunnel = true;
+ }
+
+ break;
+ }
+ }
+ }
+
+ ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+ ptype_num, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
+
+ rte_free(ptype_mapping);
+ rte_free(ptype);
+ return ret;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t proto_num;
+ struct rte_pmd_i40e_proto_info *proto;
+ uint32_t buff_size;
+ uint32_t i;
+ int ret;
+
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return;
+ }
+
+ /* get information about protocol number */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&proto_num, sizeof(proto_num),
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol number");
+ return;
+ }
+ if (!proto_num) {
+ PMD_DRV_LOG(INFO, "No new protocol added");
+ return;
+ }
+
+ buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+ proto = rte_zmalloc("new_proto", buff_size, 0);
+ if (!proto) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return;
+ }
+
+ /* get information about protocol list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)proto, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol list");
+ rte_free(proto);
+ return;
+ }
+
+ /* Check if GTP is supported. */
+ for (i = 0; i < proto_num; i++) {
+ if (!strncmp(proto[i].name, "GTP", 3)) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->gtp_support = true;
+ else
+ pf->gtp_support = false;
+ break;
+ }
+ }
+
+ /* Check if ESP is supported. */
+ for (i = 0; i < proto_num; i++) {
+ if (!strncmp(proto[i].name, "ESP", 3)) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->esp_support = true;
+ else
+ pf->esp_support = false;
+ break;
+ }
+ }
+
+ /* Update customized pctype info */
+ ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+ proto_num, proto, op);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+ /* Update customized ptype info */
+ ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+ proto_num, proto, op);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+ rte_free(proto);
+}
+
+/* Create a QinQ cloud filter
+ *
+ * The Fortville NIC has limited resources for tunnel filters,
+ * so we can only reuse existing filters.
+ *
+ * In step 1 we define which Field Vector fields can be used for
+ * filter types.
+ * As we do not have the inner tag defined as a field,
+ * we have to define it first, by reusing one of L1 entries.
+ *
+ * In step 2 we are replacing one of existing filter types with
+ * a new one for QinQ.
+ * As we reusing L1 and replacing L2, some of the default filter
+ * types will disappear,which depends on L1 and L2 entries we reuse.
+ *
+ * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
+ *
+ * 1. Create L1 filter of outer vlan (12b) which will be in use
+ * later when we define the cloud filter.
+ * a. Valid_flags.replace_cloud = 0
+ * b. Old_filter = 10 (Stag_Inner_Vlan)
+ * c. New_filter = 0x10
+ * d. TR bit = 0xff (optional, not used here)
+ * e. Buffer – 2 entries:
+ * i. Byte 0 = 8 (outer vlan FV index).
+ * Byte 1 = 0 (rsv)
+ * Byte 2-3 = 0x0fff
+ * ii. Byte 0 = 37 (inner vlan FV index).
+ * Byte 1 =0 (rsv)
+ * Byte 2-3 = 0x0fff
+ *
+ * Step 2:
+ * 2. Create cloud filter using two L1 filters entries: stag and
+ * new filter(outer vlan+ inner vlan)
+ * a. Valid_flags.replace_cloud = 1
+ * b. Old_filter = 1 (instead of outer IP)
+ * c. New_filter = 0x10
+ * d. Buffer – 2 entries:
+ * i. Byte 0 = 0x80 | 7 (valid | Stag).
+ * Byte 1-3 = 0 (rsv)
+ * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
+ * Byte 9-11 = 0 (rsv)
+ */
+static int
+i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+{
+ int ret = -ENOTSUP;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return ret;
+ }
+
+ /* Init */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[2] = 0xff;
+ filter_replace_buf.data[3] = 0x0f;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
filter_replace_buf.data[6] = 0xff;
filter_replace_buf.data[7] = 0x0f;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
if (ret != I40E_SUCCESS)
return ret;
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
/* Apply the second L2 cloud filter */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
/* create L2 filter, input for L2 filter will be L1 filter */
filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!ret && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
return ret;
}
-RTE_INIT(i40e_init_log);
+int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ if (!in->key && in->key_len)
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ if (in->key)
+ out->conf.key = memcpy(out->key, in->key, in->key_len);
+ return 0;
+}
+
+/* Write HENA register to enable hash */
+static int
+i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
+ uint64_t hena;
+ int ret;
+
+ ret = i40e_set_rss_key(pf->main_vsi, key,
+ rss_conf->conf.key_len);
+ if (ret)
+ return ret;
+
+ hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Configure hash input set */
+static int
+i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_input_set_conf conf;
+ uint64_t mask0;
+ int ret = 0;
+ uint32_t j;
+ int i;
+ static const struct {
+ uint64_t type;
+ enum rte_eth_input_set_field field;
+ } inset_match_table[] = {
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+ RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+ RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
+ RTE_ETH_INPUT_SET_UNKNOWN},
+ };
+
+ mask0 = types & pf->adapter->flow_types_mask;
+ conf.op = RTE_ETH_INPUT_SET_SELECT;
+ conf.inset_size = 0;
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
+ if (mask0 & (1ULL << i)) {
+ conf.flow_type = i;
+ break;
+ }
+ }
+
+ for (j = 0; j < RTE_DIM(inset_match_table); j++) {
+ if ((types & inset_match_table[j].type) ==
+ inset_match_table[j].type) {
+ if (inset_match_table[j].field ==
+ RTE_ETH_INPUT_SET_UNKNOWN)
+ return -EINVAL;
+
+ conf.field[conf.inset_size] =
+ inset_match_table[j].field;
+ conf.inset_size++;
+ }
+ }
+
+ if (conf.inset_size) {
+ ret = i40e_hash_filter_inset_select(hw, &conf);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Look up the conflicted rule then mark it as invalid */
static void
-i40e_init_log(void)
+i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rss_filter *rss_item;
+ uint64_t rss_inset;
+
+ /* Clear input set bits before comparing the pctype */
+ rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
+ ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+
+ /* Look up the conflicted rule then mark it as invalid */
+ TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
+ if (!rss_item->rss_filter_info.valid)
+ continue;
+
+ if (conf->conf.queue_num &&
+ rss_item->rss_filter_info.conf.queue_num)
+ rss_item->rss_filter_info.valid = false;
+
+ if (conf->conf.types &&
+ (rss_item->rss_filter_info.conf.types &
+ rss_inset) ==
+ (conf->conf.types & rss_inset))
+ rss_item->rss_filter_info.valid = false;
+
+ if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
+ rss_item->rss_filter_info.conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ rss_item->rss_filter_info.valid = false;
+ }
+}
+
+/* Configure RSS hash function */
+static int
+i40e_rss_config_hash_function(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg, i;
+ uint64_t mask0;
+ uint16_t j;
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
+ PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
+ I40E_WRITE_FLUSH(hw);
+ i40e_rss_mark_invalid_rule(pf, conf);
+
+ return 0;
+ }
+ reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
+
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+ i40e_rss_mark_invalid_rule(pf, conf);
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+ mask0 = conf->conf.types & pf->adapter->flow_types_mask;
+
+ i40e_set_symmetric_hash_enable_per_port(hw, 1);
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
+ if (mask0 & (1UL << i))
+ break;
+ }
+
+ if (i == UINT64_BIT)
+ return -EINVAL;
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ I40E_GLQF_HSYM_SYMH_ENA_MASK);
+ }
+ }
+
+ return 0;
+}
+
+/* Enable RSS according to the configuration */
+static int
+i40e_rss_enable_hash(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_rte_flow_rss_conf rss_conf;
+
+ if (!(conf->conf.types & pf->adapter->flow_types_mask))
+ return -ENOTSUP;
+
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
+
+ /* Configure hash input set */
+ if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
+ return -EINVAL;
+
+ if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.conf.key = (uint8_t *)rss_key_default;
+ rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ PMD_DRV_LOG(INFO,
+ "No valid RSS key config for i40e, using default\n");
+ }
+
+ rss_conf.conf.types |= rss_info->conf.types;
+ i40e_rss_hash_set(pf, &rss_conf);
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+ i40e_rss_config_hash_function(pf, conf);
+
+ i40e_rss_mark_invalid_rule(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS queue region */
+static int
+i40e_rss_config_queue_region(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t lut = 0;
+ uint16_t j, num;
+ uint32_t i;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->conf.queue_num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR,
+ "No PF queues are configured to enable RSS for port %u",
+ pf->dev_data->port_id);
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ i40e_rss_mark_invalid_rule(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS hash function to default */
+static int
+i40e_rss_clear_hash_function(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, reg;
+ uint64_t mask0;
+ uint16_t j;
+
+ if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+ reg |= I40E_GLQF_CTL_HTOEP_MASK;
+
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ I40E_WRITE_FLUSH(hw);
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+ mask0 = conf->conf.types & pf->adapter->flow_types_mask;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
+ if (mask0 & (1UL << i))
+ break;
+ }
+
+ if (i == UINT64_BIT)
+ return -EINVAL;
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_global_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ 0);
+ }
+ }
+
+ return 0;
+}
+
+/* Disable RSS hash and configure default input set */
+static int
+i40e_rss_disable_hash(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_rte_flow_rss_conf rss_conf;
+ uint32_t i;
+
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
+
+ /* Disable RSS hash */
+ rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
+ i40e_rss_hash_set(pf, &rss_conf);
+
+ for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
+ if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
+ !(conf->conf.types & (1ULL << i)))
+ continue;
+
+ /* Configure default input set */
+ struct rte_eth_input_set_conf input_conf = {
+ .op = RTE_ETH_INPUT_SET_SELECT,
+ .flow_type = i,
+ .inset_size = 1,
+ };
+ input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
+ i40e_hash_filter_inset_select(hw, &input_conf);
+ }
+
+ rss_info->conf.types = rss_conf.conf.types;
+
+ i40e_rss_clear_hash_function(pf, conf);
+
+ return 0;
+}
+
+/* Configure RSS queue region to default */
+static int
+i40e_rss_clear_queue_region(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t queue[I40E_MAX_Q_PER_TC];
+ uint32_t num_rxq, i;
+ uint32_t lut = 0;
+ uint16_t j, num;
+
+ num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
+
+ for (j = 0; j < num_rxq; j++)
+ queue[j] = j;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, num_rxq);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR,
+ "No PF queues are configured to enable RSS for port %u",
+ pf->dev_data->port_id);
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_info->conf.queue_num = 0;
+ memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
+
+ return 0;
+}
+
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
{
- i40e_logtype_init = rte_log_register("pmd.i40e.init");
- if (i40e_logtype_init >= 0)
- rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
- i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
- if (i40e_logtype_driver >= 0)
- rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct rte_flow_action_rss update_conf = rss_info->conf;
+ int ret = 0;
+
+ if (add) {
+ if (conf->conf.queue_num) {
+ /* Configure RSS queue region */
+ ret = i40e_rss_config_queue_region(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.queue_num = conf->conf.queue_num;
+ update_conf.queue = conf->conf.queue;
+ } else if (conf->conf.func ==
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
+ /* Configure hash function */
+ ret = i40e_rss_config_hash_function(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.func = conf->conf.func;
+ } else {
+ /* Configure hash enable and input set */
+ ret = i40e_rss_enable_hash(pf, conf);
+ if (ret)
+ return ret;
+
+ update_conf.types |= conf->conf.types;
+ update_conf.key = conf->conf.key;
+ update_conf.key_len = conf->conf.key_len;
+ }
+
+ /* Update RSS info in pf */
+ if (i40e_rss_conf_init(rss_info, &update_conf))
+ return -EINVAL;
+ } else {
+ if (!conf->valid)
+ return 0;
+
+ if (conf->conf.queue_num)
+ i40e_rss_clear_queue_region(pf);
+ else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
+ i40e_rss_clear_hash_function(pf, conf);
+ else
+ i40e_rss_disable_hash(pf, conf);
+ }
+
+ return 0;
}
+
+RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
+RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
+#ifdef RTE_LIBRTE_I40E_DEBUG_RX
+RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
+#endif
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX
+RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
+#endif
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
+#endif
+
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+ ETH_I40E_FLOATING_VEB_ARG "=1"
+ ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
+ ETH_I40E_USE_LATEST_VEC "=0|1");