#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_dev.h>
#include <rte_tailq.h>
#include <rte_hash_crc.h>
+#include <rte_bitmap.h>
+#include <rte_os_shim.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
#include "i40e_pf.h"
#include "i40e_regs.h"
#include "rte_pmd_i40e.h"
+#include "i40e_hash.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
-#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg"
#define I40E_CLEAR_PXE_WAIT_MS 200
+#define I40E_VSI_TSR_QINQ_STRIP 0x4010
+#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
/* Maximun number of capability elements */
#define I40E_MAX_CAP_ELE_NUM 128
#define I40E_TRANSLATE_INSET 0
#define I40E_TRANSLATE_REG 1
-#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
-#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL
-#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
-#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
-#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
-#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
+#define I40E_INSET_IPV4_TOS_MASK 0x0000FF00UL
+#define I40E_INSET_IPV4_TTL_MASK 0x000000FFUL
+#define I40E_INSET_IPV4_PROTO_MASK 0x0000FF00UL
+#define I40E_INSET_IPV6_TC_MASK 0x0000F00FUL
+#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x0000FF00UL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000000FFUL
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* Bit mask of Extended Tag enable/disable */
#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+#define I40E_GLQF_PIT_IPV4_START 2
+#define I40E_GLQF_PIT_IPV4_COUNT 2
+#define I40E_GLQF_PIT_IPV6_START 4
+#define I40E_GLQF_PIT_IPV6_COUNT 2
+
+#define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
+ (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
+ I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_DEST_OFF_GET(a) \
+ (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
+ I40E_GLQF_PIT_DEST_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_FSIZE_GET(a) (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
+ I40E_GLQF_PIT_FSIZE_SHIFT)
+
+#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask))
+#define I40E_FDIR_FIELD_OFFSET(a) ((a) >> 1)
+
static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
-static void i40e_dev_stop(struct rte_eth_dev *dev);
-static void i40e_dev_close(struct rte_eth_dev *dev);
+static int i40e_dev_stop(struct rte_eth_dev *dev);
+static int i40e_dev_close(struct rte_eth_dev *dev);
static int i40e_dev_reset(struct rte_eth_dev *dev);
static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int i40e_veb_release(struct i40e_veb *veb);
static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
struct i40e_vsi *vsi);
-static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg);
-static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct rte_eth_dev *dev);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
-static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
- uint16_t seid,
- uint16_t rule_type,
- uint16_t *entries,
- uint16_t count,
- uint16_t rule_id);
-static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t sw_id, uint8_t on);
-static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
static int i40e_timesync_enable(struct rte_eth_dev *dev);
static int i40e_timesync_disable(struct rte_eth_dev *dev);
static void i40e_filter_restore(struct i40e_pf *pf);
static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
-int i40e_logtype_init;
-int i40e_logtype_driver;
-#ifdef RTE_LIBRTE_I40E_DEBUG_RX
-int i40e_logtype_rx;
-#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX
-int i40e_logtype_tx;
-#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
-int i40e_logtype_tx_free;
-#endif
-
static const char *const valid_keys[] = {
ETH_I40E_FLOATING_VEB_ARG,
ETH_I40E_FLOATING_VEB_LIST_ARG,
ETH_I40E_SUPPORT_MULTI_DRIVER,
ETH_I40E_QUEUE_NUM_PER_VF_ARG,
- ETH_I40E_USE_LATEST_VEC,
ETH_I40E_VF_MSG_CFG,
NULL};
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
+ { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
{ .vendor_id = 0, /* sentinel */ },
};
.rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
.rx_queue_release = i40e_dev_rx_queue_release,
- .rx_queue_count = i40e_dev_rx_queue_count,
- .rx_descriptor_done = i40e_dev_rx_descriptor_done,
- .rx_descriptor_status = i40e_dev_rx_descriptor_status,
- .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
.rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
.udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
- .filter_ctrl = i40e_dev_filter_ctrl,
+ .flow_ops_get = i40e_dev_flow_ops_get,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.rx_burst_mode_get = i40e_rx_burst_mode_get,
.tx_burst_mode_get = i40e_tx_burst_mode_get,
- .mirror_rule_set = i40e_mirror_rule_set,
- .mirror_rule_reset = i40e_mirror_rule_reset,
.timesync_enable = i40e_timesync_enable,
.timesync_disable = i40e_timesync_disable,
.timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
+ .tx_done_cleanup = i40e_tx_done_cleanup,
+ .get_monitor_addr = i40e_get_monitor_addr,
};
/* store statistics names and its offset in stats structure */
struct rte_i40e_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
- unsigned offset;
+ int offset;
};
static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
rx_unknown_protocol)},
+ {"rx_size_error_packets", offsetof(struct i40e_pf, rx_err1) -
+ offsetof(struct i40e_pf, stats)},
{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
return retval;
}
+ if (eth_da.nb_representor_ports > 0 &&
+ eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+ PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
+ pci_dev->device.devargs->args);
+ return -ENOTSUP;
+ }
+
retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
sizeof(struct i40e_adapter),
eth_dev_pci_specific_init, pci_dev,
uint32_t reg_val)
{
uint32_t ori_reg_val;
- struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *dev_data =
+ ((struct i40e_adapter *)hw->back)->pf.dev_data;
+ struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
- dev = ((struct i40e_adapter *)hw->back)->eth_dev;
i40e_write_rx_ctl(hw, reg_addr, reg_val);
if (ori_reg_val != reg_val)
PMD_DRV_LOG(WARNING,
I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
}
+static inline void i40e_clear_automask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
+
+ if (!pf->support_multi_driver)
+ val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
+}
+
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
/*
idx = strtoul(floating_veb_value, &end, 10);
if (errno || end == NULL)
return -1;
+ if (idx < 0)
+ return -1;
while (isblank(*end))
end++;
if (*end == '-') {
i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_info *fdir_info = &pf->fdir;
char fdir_hash_name[RTE_HASH_NAMESIZE];
+ uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
+ uint32_t best = hw->func_caps.fd_filters_best_effort;
+ enum i40e_filter_pctype pctype;
+ struct rte_bitmap *bmp = NULL;
+ uint32_t bmp_size;
+ void *mem = NULL;
+ uint32_t i = 0;
int ret;
struct rte_hash_parameters fdir_hash_params = {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
return -EINVAL;
}
+
fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
sizeof(struct i40e_fdir_filter *) *
I40E_MAX_FDIR_FILTER_NUM,
ret = -ENOMEM;
goto err_fdir_hash_map_alloc;
}
+
+ fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
+ sizeof(struct i40e_fdir_filter) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+
+ if (!fdir_info->fdir_filter_array) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir filter array!");
+ ret = -ENOMEM;
+ goto err_fdir_filter_array_alloc;
+ }
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+ pf->fdir.flow_count[pctype] = 0;
+
+ fdir_info->fdir_space_size = alloc + best;
+ fdir_info->fdir_actual_cnt = 0;
+ fdir_info->fdir_guarantee_total_space = alloc;
+ fdir_info->fdir_guarantee_free_space =
+ fdir_info->fdir_guarantee_total_space;
+
+ PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
+
+ fdir_info->fdir_flow_pool.pool =
+ rte_zmalloc("i40e_fdir_entry",
+ sizeof(struct i40e_fdir_entry) *
+ fdir_info->fdir_space_size,
+ 0);
+
+ if (!fdir_info->fdir_flow_pool.pool) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for bitmap flow!");
+ ret = -ENOMEM;
+ goto err_fdir_bitmap_flow_alloc;
+ }
+
+ for (i = 0; i < fdir_info->fdir_space_size; i++)
+ fdir_info->fdir_flow_pool.pool[i].idx = i;
+
+ bmp_size =
+ rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
+ mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir bitmap!");
+ ret = -ENOMEM;
+ goto err_fdir_mem_alloc;
+ }
+ bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
+ if (bmp == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to initialization fdir bitmap!");
+ ret = -ENOMEM;
+ goto err_fdir_bmp_alloc;
+ }
+ for (i = 0; i < fdir_info->fdir_space_size; i++)
+ rte_bitmap_set(bmp, i);
+
+ fdir_info->fdir_flow_pool.bitmap = bmp;
+
return 0;
+err_fdir_bmp_alloc:
+ rte_free(mem);
+err_fdir_mem_alloc:
+ rte_free(fdir_info->fdir_flow_pool.pool);
+err_fdir_bitmap_flow_alloc:
+ rte_free(fdir_info->fdir_filter_array);
+err_fdir_filter_array_alloc:
+ rte_free(fdir_info->hash_map);
err_fdir_hash_map_alloc:
rte_hash_free(fdir_info->hash_table);
}
pf->gtp_support = false;
+ pf->esp_support = false;
+}
+
+static void
+i40e_init_filter_invalidation(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ uint32_t glqf_ctl_reg = 0;
+
+ glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!pf->support_multi_driver) {
+ fdir_info->fdir_invalprio = 1;
+ glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
+ i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
+ } else {
+ if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
+ fdir_info->fdir_invalprio = 1;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
+ } else {
+ fdir_info->fdir_invalprio = 0;
+ PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
+ }
+ }
}
void
struct i40e_asq_cmd_details *cmd_details)
{
uint64_t ori_reg_val;
- struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *dev_data =
+ ((struct i40e_adapter *)hw->back)->pf.dev_data;
+ struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
int ret;
ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
reg_addr);
return -EIO;
}
- dev = ((struct i40e_adapter *)hw->back)->eth_dev;
if (ori_reg_val != reg_val)
PMD_DRV_LOG(WARNING,
return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
}
-static int
-i40e_parse_latest_vec_handler(__rte_unused const char *key,
- const char *value,
- void *opaque)
-{
- struct i40e_adapter *ad = opaque;
- int use_latest_vec;
-
- use_latest_vec = atoi(value);
-
- if (use_latest_vec != 0 && use_latest_vec != 1)
- PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
-
- ad->use_latest_vec = (uint8_t)use_latest_vec;
-
- return 0;
-}
-
-static int
-i40e_use_latest_vec(struct rte_eth_dev *dev)
-{
- struct i40e_adapter *ad =
- I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct rte_kvargs *kvlist;
- int kvargs_count;
-
- ad->use_latest_vec = false;
-
- if (!dev->device->devargs)
- return 0;
-
- kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
- if (!kvlist)
- return -EINVAL;
-
- kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
- if (!kvargs_count) {
- rte_kvargs_free(kvlist);
- return 0;
- }
-
- if (kvargs_count > 1)
- PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
- "the first invalid or last valid one is used !",
- ETH_I40E_USE_LATEST_VEC);
-
- if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
- i40e_parse_latest_vec_handler, ad) < 0) {
- rte_kvargs_free(kvlist);
- return -EINVAL;
- }
-
- rte_kvargs_free(kvlist);
- return 0;
-}
-
static int
read_vf_msg_config(__rte_unused const char *key,
const char *value,
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_queue_count = i40e_dev_rx_queue_count;
+ dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
+ dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
dev->tx_pkt_prepare = i40e_prep_pkts;
}
i40e_set_default_ptype_table(dev);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- intr_handle = &pci_dev->intr_handle;
+ intr_handle = pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- pf->adapter->eth_dev = dev;
pf->dev_data = dev->data;
hw->back = I40E_PF_TO_ADAPTER(pf);
i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
- /* Check if users want the latest supported vec path */
- i40e_use_latest_vec(dev);
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
return -EIO;
}
- /* Firmware of SFP x722 does not support adminq option */
- if (hw->device_id == I40E_DEV_ID_SFP_X722)
+ /* Firmware of SFP x722 does not support 802.1ad frames ability */
+ if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
+ hw->device_id == I40E_DEV_ID_SFP_I_X722)
hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
/* Set the global registers with default ether type value */
if (!pf->support_multi_driver) {
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
PMD_INIT_LOG(ERR,
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
&dev->data->mac_addrs[0]);
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
/* Init dcb to sw mode by default */
ret = i40e_dcb_init_configure(dev, TRUE);
if (ret != I40E_SUCCESS) {
/* Set the max frame size to 0x2600 by default,
* in case other drivers changed the default value.
*/
- i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
+ i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
- /* initialize mirror rule list */
- TAILQ_INIT(&pf->mirror_list);
+ /* initialize RSS rule list */
+ TAILQ_INIT(&pf->rss_config_list);
/* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev);
/* Initialize customized information */
i40e_init_customized_info(pf);
+ /* Initialize the filter invalidation configuration */
+ i40e_init_filter_invalidation(pf);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
- /* initialize rss configuration from rte_flow */
- memset(&pf->rss_info, 0,
- sizeof(struct i40e_rte_flow_rss_conf));
-
/* reset all stats of the device, including pf and main vsi */
i40e_dev_stats_reset(dev);
return 0;
err_init_fdir_filter_list:
- rte_free(pf->tunnel.hash_table);
+ rte_hash_free(pf->tunnel.hash_table);
rte_free(pf->tunnel.hash_map);
err_init_tunnel_filter_list:
- rte_free(pf->ethertype.hash_table);
+ rte_hash_free(pf->ethertype.hash_table);
rte_free(pf->ethertype.hash_map);
err_init_ethtype_filter_list:
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
err_mac_alloc:
struct i40e_fdir_info *fdir_info;
fdir_info = &pf->fdir;
- /* Remove all flow director rules and hash */
+
+ /* Remove all flow director rules */
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+}
+
+static void
+i40e_fdir_memory_cleanup(struct i40e_pf *pf)
+{
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+
+ /* flow director memory cleanup */
if (fdir_info->hash_map)
rte_free(fdir_info->hash_map);
if (fdir_info->hash_table)
rte_hash_free(fdir_info->hash_table);
-
- while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
- TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
- rte_free(p_fdir);
- }
+ if (fdir_info->fdir_flow_pool.bitmap)
+ rte_free(fdir_info->fdir_flow_pool.bitmap);
+ if (fdir_info->fdir_flow_pool.pool)
+ rte_free(fdir_info->fdir_flow_pool.pool);
+ if (fdir_info->fdir_filter_array)
+ rte_free(fdir_info->fdir_filter_array);
}
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Only legacy filter API needs the following fdir config. So when the
* legacy filter API is deprecated, the following codes should also be
goto err;
/* VMDQ setup.
- * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
- * RSS setting have different requirements.
- * General PMD driver call sequence are NIC init, configure,
+ * General PMD call sequence are NIC init, configure,
* rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
* will try to lookup the VSI that specific queue belongs to if VMDQ
* applicable. So, VMDQ setting has to be done before
* number, which will be available after rx_queue_setup(). dev_start()
* function is good to place RSS setup.
*/
- if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
ret = i40e_vmdq_setup(dev);
if (ret)
goto err;
}
- if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
ret = i40e_dcb_setup(dev);
if (ret) {
PMD_DRV_LOG(ERR, "failed to configure DCB.");
void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
I40E_WRITE_FLUSH(hw);
}
-void
+int
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
- uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
+ rte_intr_nb_efd_get(intr_handle));
uint16_t queue_idx = 0;
int record = 0;
int i;
/* VF bind interrupt */
if (vsi->type == I40E_VSI_SRIOV) {
+ if (vsi->nb_msix == 0) {
+ PMD_DRV_LOG(ERR, "No msix resource");
+ return -EINVAL;
+ }
__vsi_queues_bind_intr(vsi, msix_vect,
vsi->base_queue, vsi->nb_qps,
itr_idx);
- return;
+ return 0;
}
/* PF & VMDq bind interrupt */
}
for (i = 0; i < vsi->nb_used_qps; i++) {
- if (nb_msix <= 1) {
+ if (vsi->nb_msix == 0) {
+ PMD_DRV_LOG(ERR, "No msix resource");
+ return -EINVAL;
+ } else if (nb_msix <= 1) {
if (!rte_intr_allow_others(intr_handle))
/* allow to share MISC_VEC_ID */
msix_vect = I40E_MISC_VEC_ID;
vsi->nb_used_qps - i,
itr_idx);
for (; !!record && i < vsi->nb_used_qps; i++)
- intr_handle->intr_vec[queue_idx + i] =
- msix_vect;
+ rte_intr_vec_list_index_set(intr_handle,
+ queue_idx + i, msix_vect);
break;
}
/* 1:1 queue/msix_vect mapping */
vsi->base_queue + i, 1,
itr_idx);
if (!!record)
- intr_handle->intr_vec[queue_idx + i] = msix_vect;
+ if (rte_intr_vec_list_index_set(intr_handle,
+ queue_idx + i, msix_vect))
+ return -rte_errno;
msix_vect++;
nb_msix--;
}
+
+ return 0;
}
-static void
+void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
I40E_WRITE_FLUSH(hw);
}
-static void
+void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
{
uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
- if (link_speeds & ETH_LINK_SPEED_40G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
link_speed |= I40E_LINK_SPEED_40GB;
- if (link_speeds & ETH_LINK_SPEED_25G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_25G)
link_speed |= I40E_LINK_SPEED_25GB;
- if (link_speeds & ETH_LINK_SPEED_20G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_20G)
link_speed |= I40E_LINK_SPEED_20GB;
- if (link_speeds & ETH_LINK_SPEED_10G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G)
link_speed |= I40E_LINK_SPEED_10GB;
- if (link_speeds & ETH_LINK_SPEED_1G)
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
link_speed |= I40E_LINK_SPEED_1GB;
- if (link_speeds & ETH_LINK_SPEED_100M)
+ if (link_speeds & RTE_ETH_LINK_SPEED_100M)
link_speed |= I40E_LINK_SPEED_100MB;
return link_speed;
phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
- I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
+ I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
+ I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
I40E_AQ_PHY_LINK_ENABLED;
- if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
- conf->link_speeds = ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_100M;
+ if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
+ RTE_ETH_LINK_SPEED_25G |
+ RTE_ETH_LINK_SPEED_20G |
+ RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_100M;
abilities |= I40E_AQ_PHY_AN_ENABLED;
} else {
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
+ uint16_t nb_rxq, nb_txq;
hw->adapter_stopped = 0;
return ret;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int),
- 0);
- if (!intr_handle->intr_vec) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d rx_queues intr_vec",
dev->data->nb_rx_queues);
ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
- goto err_up;
+ return ret;
}
/* Map queues with MSIX interrupt */
main_vsi->nb_used_qps = dev->data->nb_rx_queues -
pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
+ ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
+ if (ret < 0)
+ return ret;
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
- I40E_ITR_INDEX_DEFAULT);
+ ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
+ I40E_ITR_INDEX_DEFAULT);
+ if (ret < 0)
+ return ret;
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
- /* enable FDIR MSIX interrupt */
- if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
- I40E_ITR_INDEX_NONE);
- i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ /* Enable all queues which have been configured */
+ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
+ ret = i40e_dev_rx_queue_start(dev, nb_rxq);
+ if (ret)
+ goto rx_err;
}
- /* Enable all queues which have been configured */
- ret = i40e_dev_switch_queues(pf, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to enable VSI");
- goto err_up;
+ for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
+ ret = i40e_dev_tx_queue_start(dev, nb_txq);
+ if (ret)
+ goto tx_err;
}
/* Enable receiving broadcast packets */
ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "fail to set loopback link");
- goto err_up;
+ goto tx_err;
}
}
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
- goto err_up;
+ goto tx_err;
}
if (!rte_intr_allow_others(intr_handle)) {
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
return I40E_SUCCESS;
-err_up:
- i40e_dev_switch_queues(pf, FALSE);
- i40e_dev_clear_queues(dev);
+tx_err:
+ for (i = 0; i < nb_txq; i++)
+ i40e_dev_tx_queue_stop(dev, i);
+rx_err:
+ for (i = 0; i < nb_rxq; i++)
+ i40e_dev_rx_queue_stop(dev, i);
return ret;
}
-static void
+static int
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
int i;
if (hw->adapter_stopped == 1)
- return;
+ return 0;
if (dev->data->dev_conf.intr_conf.rxq == 0) {
rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
}
/* Disable all queues */
- i40e_dev_switch_queues(pf, FALSE);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ i40e_dev_tx_queue_stop(dev, i);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ i40e_dev_rx_queue_stop(dev, i);
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(main_vsi);
i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
}
- if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
- i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
- }
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+
+ /* Cleanup vector list */
+ rte_intr_vec_list_free(intr_handle);
/* reset hierarchy commit */
pf->tm_conf.committed = false;
hw->adapter_stopped = 1;
+ dev->data->dev_started = 0;
pf->adapter->rss_reta_updated = 0;
+
+ return 0;
}
-static void
+static int
i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct i40e_mirror_rule *p_mirror;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_filter_control_settings settings;
struct rte_flow *p_flow;
uint32_t reg;
int retries = 0;
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
ret = rte_eth_switch_domain_free(pf->switch_domain_id);
if (ret)
PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
- i40e_dev_stop(dev);
-
- /* Remove all mirror rules */
- while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
- ret = i40e_aq_del_mirror_rule(hw,
- pf->main_vsi->veb->seid,
- p_mirror->rule_type,
- p_mirror->entries,
- p_mirror->num_entries,
- p_mirror->id);
- if (ret < 0)
- PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
- "status = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
-
- /* remove mirror software resource anyway */
- TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
- rte_free(p_mirror);
- pf->nb_mirror_rule--;
- }
+ ret = i40e_dev_stop(dev);
i40e_dev_free_queues(dev);
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
I40E_WRITE_FLUSH(hw);
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
/* Remove all flows */
while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
TAILQ_REMOVE(&pf->flow_list, p_flow, node);
- rte_free(p_flow);
+ /* Do not free FDIR flows since they are static allocated */
+ if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
+ rte_free(p_flow);
}
+ /* release the fdir static allocated memory */
+ i40e_fdir_memory_cleanup(pf);
+
/* Remove all Traffic Manager configuration */
i40e_tm_conf_uninit(dev);
+ i40e_clear_automask(pf);
+
hw->adapter_closed = 1;
+ return ret;
}
/*
update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
{
/* Link status registers and values*/
-#define I40E_PRTMAC_LINKSTA 0x001E2420
#define I40E_REG_LINK_UP 0x40000080
#define I40E_PRTMAC_MACC 0x001E24E0
#define I40E_REG_MACC_25GB 0x00020000
uint32_t link_speed;
uint32_t reg_val;
- reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0));
link_speed = reg_val & I40E_REG_SPEED_MASK;
reg_val &= I40E_REG_LINK_UP;
link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
/* Parse the link status */
switch (link_speed) {
case I40E_REG_SPEED_0:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_REG_SPEED_1:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_REG_SPEED_2:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_2_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_REG_SPEED_3:
if (hw->mac.type == I40E_MAC_X722) {
- link->link_speed = ETH_SPEED_NUM_5G;
+ link->link_speed = RTE_ETH_SPEED_NUM_5G;
} else {
reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
if (reg_val & I40E_REG_MACC_25GB)
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
else
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
}
break;
case I40E_REG_SPEED_4:
if (hw->mac.type == I40E_MAC_X722)
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
default:
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
if (unlikely(status != I40E_SUCCESS)) {
- link->link_speed = ETH_SPEED_NUM_NONE;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
return;
}
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link->link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link->link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link->link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link->link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
break;
default:
- link->link_speed = ETH_SPEED_NUM_NONE;
+ if (link->link_status)
+ link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+ else
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
break;
}
}
memset(&link, 0, sizeof(link));
/* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
+ RTE_ETH_LINK_SPEED_FIXED);
if (!wait_to_complete && !enable_lse)
update_link_reg(hw, &link);
return ret;
}
+static void
+i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
+ uint32_t loreg, bool offset_loaded, uint64_t *offset,
+ uint64_t *stat, uint64_t *prev_stat)
+{
+ i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
+ /* enlarge the limitation when statistics counters overflowed */
+ if (offset_loaded) {
+ if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
+ *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
+ *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
+ }
+ *prev_stat = *stat;
+}
+
/* Get all the statistics of a VSI */
void
i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
- i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
- vsi->offset_loaded, &oes->rx_bytes,
- &nes->rx_bytes);
+ i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes, &vsi->prev_rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
vsi->offset_loaded, &oes->rx_unicast,
&nes->rx_unicast);
i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
&oes->rx_unknown_protocol,
&nes->rx_unknown_protocol);
- i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
- vsi->offset_loaded, &oes->tx_bytes,
- &nes->tx_bytes);
+ i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes, &vsi->prev_tx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
vsi->offset_loaded, &oes->tx_unicast,
&nes->tx_unicast);
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
/* Get rx/tx bytes of internal transfer packets */
- i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
- I40E_GLV_GORCL(hw->port),
- pf->offset_loaded,
- &pf->internal_stats_offset.rx_bytes,
- &pf->internal_stats.rx_bytes);
-
- i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
- I40E_GLV_GOTCL(hw->port),
- pf->offset_loaded,
- &pf->internal_stats_offset.tx_bytes,
- &pf->internal_stats.tx_bytes);
+ i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
+ I40E_GLV_GORCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes,
+ &pf->internal_prev_rx_bytes);
+ i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
+ I40E_GLV_GOTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes,
+ &pf->internal_prev_tx_bytes);
/* Get total internal rx packet count */
i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
I40E_GLV_UPRCL(hw->port),
pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
- i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->offset_loaded, &os->eth.rx_bytes,
- &ns->eth.rx_bytes);
+ i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_bytes,
+ &ns->eth.rx_bytes, &pf->prev_rx_bytes);
i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->offset_loaded, &os->eth.rx_unicast,
pf->offset_loaded,
&os->eth.rx_unknown_protocol,
&ns->eth.rx_unknown_protocol);
- i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->offset_loaded, &os->eth.tx_bytes,
- &ns->eth.tx_bytes);
+ i40e_stat_update_48(hw, I40E_GL_RXERR1_H(hw->pf_id + I40E_MAX_VF),
+ I40E_GL_RXERR1_L(hw->pf_id + I40E_MAX_VF),
+ pf->offset_loaded, &pf->rx_err1_offset,
+ &pf->rx_err1);
+ i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_bytes,
+ &ns->eth.tx_bytes, &pf->prev_tx_bytes);
i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
I40E_GLPRT_UPTCL(hw->port),
pf->offset_loaded, &os->eth.tx_unicast,
stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
pf->main_vsi->eth_stats.rx_multicast +
pf->main_vsi->eth_stats.rx_broadcast -
- pf->main_vsi->eth_stats.rx_discards;
+ pf->main_vsi->eth_stats.rx_discards -
+ pf->rx_err1;
stats->opackets = ns->eth.tx_unicast +
ns->eth.tx_multicast +
ns->eth.tx_broadcast;
pf->main_vsi->eth_stats.rx_discards;
stats->ierrors = ns->crc_errors +
ns->rx_length_errors + ns->rx_undersize +
- ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
+ ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
+ pf->rx_err1;
if (pf->vfs) {
for (i = 0; i < pf->vf_num; i++) {
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
rte_i40e_hw_port_strings[i].name,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack,
ver, build, patch);
+ if (ret < 0)
+ return -EINVAL;
ret += 1; /* add the size of '\0' */
- if (fw_size < (u32)ret)
+ if (fw_size < (size_t)ret)
return ret;
else
return 0;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_QINQ_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_RSS_HASH;
-
- dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_QINQ_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
- DEV_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
- dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
dev_info->default_rxportconf.nb_queues = 2;
dev_info->default_txportconf.nb_queues = 2;
if (dev->data->nb_rx_queues == 1)
} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
- dev_info->speed_capa = ETH_LINK_SPEED_25G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
dev_info->default_rxportconf.ring_size = 256;
dev_info->default_txportconf.ring_size = 256;
} else {
/* For X710 */
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
dev_info->default_rxportconf.ring_size = 512;
dev_info->default_txportconf.ring_size = 256;
} else {
int ret;
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
reg_id = 2;
}
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
- if ((vlan_type != ETH_VLAN_TYPE_INNER &&
- vlan_type != ETH_VLAN_TYPE_OUTER) ||
- (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
+ vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->first_tag = rte_cpu_to_le_16(tpid);
- else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
hw->second_tag = rte_cpu_to_le_16(tpid);
} else {
- if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->second_tag = rte_cpu_to_le_16(tpid);
}
ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
return ret;
}
+/* Configure outer vlan stripping on or off in QinQ mode */
+static int
+i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret = I40E_SUCCESS;
+ uint32_t reg;
+
+ if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
+ PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
+ return -EINVAL;
+ }
+
+ /* Configure for outer VLAN RX stripping */
+ reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
+
+ if (on)
+ reg |= I40E_VSI_TSR_QINQ_STRIP;
+ else
+ reg &= ~I40E_VSI_TSR_QINQ_STRIP;
+
+ ret = i40e_aq_debug_write_register(hw,
+ I40E_VSI_TSR(vsi->vsi_id),
+ reg, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
+ vsi->vsi_id);
+ return I40E_ERR_CONFIG;
+ }
+
+ return ret;
+}
+
static int
i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_vsi *vsi = pf->main_vsi;
struct rte_eth_rxmode *rxmode;
- if (mask & ETH_QINQ_STRIP_MASK) {
- PMD_DRV_LOG(ERR, "Strip qinq is not supported.");
- return -ENOTSUP;
- }
-
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
}
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
- i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
}
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+ /* Enable or disable outer VLAN stripping */
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+ i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
+ else
+ i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
+ }
+
return 0;
}
/* Return current mode according to actual setting*/
switch (hw->fc.current_mode) {
case I40E_FC_FULL:
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
break;
case I40E_FC_TX_PAUSE:
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case I40E_FC_RX_PAUSE:
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case I40E_FC_NONE:
default:
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
};
return 0;
struct i40e_hw *hw;
struct i40e_pf *pf;
enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
- [RTE_FC_NONE] = I40E_FC_NONE,
- [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
- [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
- [RTE_FC_FULL] = I40E_FC_FULL
+ [RTE_ETH_FC_NONE] = I40E_FC_NONE,
+ [RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
+ [RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
+ [RTE_ETH_FC_FULL] = I40E_FC_FULL
};
/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
else
- mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
+ mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
if (pool == 0)
vsi = pf->main_vsi;
}
}
-/* Set perfect match or hash match of MAC and VLAN for a VF */
static int
-i40e_vf_mac_filter_set(struct i40e_pf *pf,
- struct rte_eth_mac_filter *filter,
- bool add)
+i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
- struct i40e_hw *hw;
- struct i40e_mac_filter_info mac_filter;
- struct rte_ether_addr old_mac;
- struct rte_ether_addr *new_mac;
- struct i40e_pf_vf *vf = NULL;
- uint16_t vf_id;
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
int ret;
- if (pf == NULL) {
- PMD_DRV_LOG(ERR, "Invalid PF argument.");
- return -EINVAL;
- }
- hw = I40E_PF_TO_HW(pf);
-
- if (filter == NULL) {
- PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
+ if (!lut)
return -EINVAL;
- }
- new_mac = &filter->mac_addr;
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
+ lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return ret;
+ }
+ } else {
+ uint32_t *lut_dw = (uint32_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
- if (rte_is_zero_ether_addr(new_mac)) {
- PMD_DRV_LOG(ERR, "Invalid ethernet address.");
- return -EINVAL;
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= lut_size_dw; i++) {
+ reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+ lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw,
+ I40E_PFQF_HLUT(i));
+ }
}
- vf_id = filter->dst_id;
+ return 0;
+}
- if (vf_id > pf->vf_num - 1 || !pf->vfs) {
- PMD_DRV_LOG(ERR, "Invalid argument.");
- return -EINVAL;
- }
- vf = &pf->vfs[vf_id];
-
- if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
- PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
- return -EINVAL;
- }
-
- if (add) {
- rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
- rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
- RTE_ETHER_ADDR_LEN);
- rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
- RTE_ETHER_ADDR_LEN);
-
- mac_filter.filter_type = filter->filter_type;
- ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
- return -1;
- }
- rte_ether_addr_copy(new_mac, &pf->dev_addr);
- } else {
- rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
- RTE_ETHER_ADDR_LEN);
- ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
- return -1;
- }
-
- /* Clear device address as it has been removed */
- if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
- memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
- }
-
- return 0;
-}
-
-/* MAC filter handle */
-static int
-i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
- void *arg)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_eth_mac_filter *filter;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- int ret = I40E_NOT_SUPPORTED;
-
- filter = (struct rte_eth_mac_filter *)(arg);
-
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- ret = I40E_SUCCESS;
- break;
- case RTE_ETH_FILTER_ADD:
- i40e_pf_disable_irq0(hw);
- if (filter->is_vf)
- ret = i40e_vf_mac_filter_set(pf, filter, 1);
- i40e_pf_enable_irq0(hw);
- break;
- case RTE_ETH_FILTER_DELETE:
- i40e_pf_disable_irq0(hw);
- if (filter->is_vf)
- ret = i40e_vf_mac_filter_set(pf, filter, 0);
- i40e_pf_enable_irq0(hw);
- break;
- default:
- PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
- ret = I40E_ERR_PARAM;
- break;
- }
-
- return ret;
-}
-
-static int
-i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
-{
- struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
- struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint32_t reg;
- int ret;
-
- if (!lut)
- return -EINVAL;
-
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
- vsi->type != I40E_VSI_SRIOV,
- lut, lut_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
- return ret;
- }
- } else {
- uint32_t *lut_dw = (uint32_t *)lut;
- uint16_t i, lut_size_dw = lut_size / 4;
-
- if (vsi->type == I40E_VSI_SRIOV) {
- for (i = 0; i <= lut_size_dw; i++) {
- reg = I40E_VFQF_HLUT1(i, vsi->user_param);
- lut_dw[i] = i40e_read_rx_ctl(hw, reg);
- }
- } else {
- for (i = 0; i < lut_size_dw; i++)
- lut_dw[i] = I40E_READ_REG(hw,
- I40E_PFQF_HLUT(i));
- }
- }
-
- return 0;
-}
-
-int
-i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
-{
- struct i40e_pf *pf;
- struct i40e_hw *hw;
- int ret;
+int
+i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
if (!vsi || !lut)
return -EINVAL;
hw = I40E_VSI_TO_HW(vsi);
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
- vsi->type != I40E_VSI_SRIOV,
- lut, lut_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
- return ret;
+ enum i40e_status_code status;
+
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
+ vsi->type != I40E_VSI_SRIOV,
+ lut, lut_size);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to update RSS lookup table, error status: %d",
+ status);
+ return -EIO;
}
} else {
uint32_t *lut_dw = (uint32_t *)lut;
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
lut[i] = reta_conf[idx].reta[shift];
}
int ret;
if (reta_size != lut_size ||
- reta_size > ETH_RSS_RETA_SIZE_512) {
+ reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
if (ret)
goto out;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift))
reta_conf[idx].reta[shift] = lut[i];
}
* @alignment: what to align the allocation to
**/
enum i40e_status_code
-i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_dma_mem *mem,
u64 size,
u32 alignment)
{
+ static uint64_t i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return I40E_ERR_PARAM;
- snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
+ __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
* @mem: ptr to mem struct to free
**/
enum i40e_status_code
-i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_dma_mem *mem)
{
if (!mem)
* @size: size of memory requested
**/
enum i40e_status_code
-i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size)
{
* @mem: pointer to mem struct to free
**/
enum i40e_status_code
-i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
struct i40e_virt_mem *mem)
{
if (!mem)
}
void
-i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
+i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
{
return;
}
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
hw->func_caps.num_vsis - vsi_count);
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
- ETH_64_POOLS);
+ RTE_ETH_64_POOLS);
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
{
struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
uint32_t pool_offset;
+ uint16_t len;
int insert;
if (pool == NULL) {
}
insert = 0;
+ len = valid_entry->len;
/* Try to merge with next one*/
if (next != NULL) {
/* Merge with next one */
- if (valid_entry->base + valid_entry->len == next->base) {
+ if (valid_entry->base + len == next->base) {
next->base = valid_entry->base;
- next->len += valid_entry->len;
+ next->len += len;
rte_free(valid_entry);
valid_entry = next;
insert = 1;
if (prev != NULL) {
/* Merge with previous one */
if (prev->base + prev->len == valid_entry->base) {
- prev->len += valid_entry->len;
+ prev->len += len;
/* If it merge with next one, remove next node */
if (insert == 1) {
LIST_REMOVE(valid_entry, next);
rte_free(valid_entry);
+ valid_entry = NULL;
} else {
rte_free(valid_entry);
+ valid_entry = NULL;
insert = 1;
}
}
LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
}
- pool->num_free += valid_entry->len;
- pool->num_alloc -= valid_entry->len;
+ pool->num_free += len;
+ pool->num_alloc -= len;
return 0;
}
/* VSI has child to attach, release child first */
if (vsi->veb) {
- TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
+ RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
return -1;
}
}
if (vsi->floating_veb) {
- TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
+ RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head,
+ list, temp) {
if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
return -1;
}
/* Remove all macvlan filters of the VSI */
i40e_vsi_remove_all_macvlan_filter(vsi);
- TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
rte_free(f);
if (vsi->type != I40E_VSI_MAIN &&
mac = &f->mac_info.mac_addr;
rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
ETH_ADDR_LEN);
- f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
vsi->mac_num++;
}
rte_memcpy(&filter.mac_addr,
(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
- filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
return i40e_vsi_add_mac(vsi, &filter);
}
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
- "VSI failed to get TC bandwdith configuration %u",
+ "VSI failed to get TC bandwidth configuration %u",
hw->aq.asq_last_status);
return ret;
}
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
- goto fail_queue_alloc;
+ if (type != I40E_VSI_FDIR)
+ goto fail_queue_alloc;
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ } else {
+ vsi->msix_intr = ret;
+ vsi->nb_msix = 1;
}
- vsi->msix_intr = ret;
- vsi->nb_msix = 1;
} else {
vsi->msix_intr = 0;
vsi->nb_msix = 0;
/* MAC/VLAN configuration */
rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
- filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
if (ret != I40E_SUCCESS) {
struct i40e_mac_filter *f;
void *temp;
struct i40e_mac_filter_info *mac_filter;
- enum rte_mac_filter_type desired_filter;
+ enum i40e_mac_filter_type desired_filter;
int ret = I40E_SUCCESS;
if (on) {
/* Filter to match MAC and VLAN */
- desired_filter = RTE_MACVLAN_PERFECT_MATCH;
+ desired_filter = I40E_MACVLAN_PERFECT_MATCH;
} else {
/* Filter to match only MAC */
- desired_filter = RTE_MAC_PERFECT_MATCH;
+ desired_filter = I40E_MAC_PERFECT_MATCH;
}
num = vsi->mac_num;
i = 0;
/* Remove all existing mac */
- TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
mac_filter[i] = f->mac_info;
ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
if (ret) {
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_QINQ_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
ret = i40e_vlan_offload_set(dev, mask);
if (ret) {
PMD_DRV_LOG(INFO, "Failed to update vlan offload");
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
+ pf->rx_err1 = 0;
+ pf->rx_err1_offset = 0;
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
- if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
- else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
return I40E_SUCCESS;
}
-/* Swith on or off the tx queues */
-static int
-i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
-{
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- struct i40e_tx_queue *txq;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
- uint16_t i;
- int ret;
-
- for (i = 0; i < dev_data->nb_tx_queues; i++) {
- txq = dev_data->tx_queues[i];
- /* Don't operate the queue if not configured or
- * if starting only per queue */
- if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
- continue;
- if (on)
- ret = i40e_dev_tx_queue_start(dev, i);
- else
- ret = i40e_dev_tx_queue_stop(dev, i);
- if ( ret != I40E_SUCCESS)
- return ret;
- }
-
- return I40E_SUCCESS;
-}
-
int
i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
{
return I40E_SUCCESS;
}
-/* Switch on or off the rx queues */
-static int
-i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
-{
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- struct i40e_rx_queue *rxq;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
- uint16_t i;
- int ret;
-
- for (i = 0; i < dev_data->nb_rx_queues; i++) {
- rxq = dev_data->rx_queues[i];
- /* Don't operate the queue if not configured or
- * if starting only per queue */
- if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
- continue;
- if (on)
- ret = i40e_dev_rx_queue_start(dev, i);
- else
- ret = i40e_dev_rx_queue_stop(dev, i);
- if (ret != I40E_SUCCESS)
- return ret;
- }
-
- return I40E_SUCCESS;
-}
-
-/* Switch on or off all the rx/tx queues */
-int
-i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
-{
- int ret;
-
- if (on) {
- /* enable rx queues before enabling tx queues */
- ret = i40e_dev_switch_rx_queues(pf, on);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to switch rx queues");
- return ret;
- }
- ret = i40e_dev_switch_tx_queues(pf, on);
- } else {
- /* Stop tx queues before stopping rx queues */
- ret = i40e_dev_switch_tx_queues(pf, on);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to switch tx queues");
- return ret;
- }
- ret = i40e_dev_switch_rx_queues(pf, on);
- }
-
- return ret;
-}
/* Initialize VSI for TX */
static int
break;
}
if (ret == I40E_SUCCESS)
- i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
- ->eth_dev);
+ i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
return ret;
}
uint16_t i;
struct i40e_rx_queue *rxq;
- i40e_pf_config_mq_rx(pf);
+ i40e_pf_config_rss(pf);
for (i = 0; i < data->nb_rx_queues; i++) {
rxq = data->rx_queues[i];
if (!rxq || !rxq->q_set)
}
}
if (ret == I40E_SUCCESS)
- i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
- ->eth_dev);
+ i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
return ret;
}
{
uint64_t new_data;
- new_data = (uint64_t)I40E_READ_REG(hw, loreg);
- new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
- I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
+ new_data = (uint64_t)I40E_READ_REG(hw, loreg);
+ new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
+ I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
+ } else {
+ new_data = I40E_READ_REG64(hw, loreg);
+ }
if (!offset_loaded)
*offset = new_data;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
if (!ret)
- _rte_eth_dev_callback_process(dev,
+ rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
break;
default:
rte_free(info.msg_buf);
}
+static void
+i40e_handle_mdd_event(struct rte_eth_dev *dev)
+{
+#define I40E_MDD_CLEAR32 0xFFFFFFFF
+#define I40E_MDD_CLEAR16 0xFFFF
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ bool mdd_detected = false;
+ struct i40e_pf_vf *vf;
+ uint32_t reg;
+ int i;
+
+ /* find what triggered the MDD event */
+ reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+ I40E_GL_MDET_TX_VF_NUM_SHIFT;
+ uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ hw->func_caps.base_queue;
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
+ "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
+ event, queue, pf_num, vf_num, dev->data->name);
+ I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
+ mdd_detected = true;
+ }
+ reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ hw->func_caps.base_queue;
+
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
+ "queue %d of function 0x%02x device %s\n",
+ event, queue, func, dev->data->name);
+ I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
+ PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
+ }
+ reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
+ I40E_MDD_CLEAR16);
+ PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
+ }
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->vf_num && mdd_detected; i++) {
+ vf = &pf->vfs[i];
+ reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
+ I40E_MDD_CLEAR16);
+ vf->num_mdd_events++;
+ PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
+ PRIu64 "times\n",
+ i, vf->num_mdd_events);
+ }
+
+ reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
+ I40E_MDD_CLEAR16);
+ vf->num_mdd_events++;
+ PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
+ PRIu64 "times\n",
+ i, vf->num_mdd_events);
+ }
+ }
+}
+
/**
* Interrupt handler triggered by NIC for handling
* specific interrupt.
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
}
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ i40e_handle_mdd_event(dev);
+ }
if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
PMD_DRV_LOG(INFO, "ICR0: global reset requested");
if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
goto done;
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ i40e_handle_mdd_event(dev);
+ }
if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
PMD_DRV_LOG(INFO, "ICR0: global reset requested");
if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
rte_cpu_to_le_16(filter[num + i].vlan_id);
switch (filter[num + i].filter_type) {
- case RTE_MAC_PERFECT_MATCH:
+ case I40E_MAC_PERFECT_MATCH:
flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
break;
- case RTE_MACVLAN_PERFECT_MATCH:
+ case I40E_MACVLAN_PERFECT_MATCH:
flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
break;
- case RTE_MAC_HASH_MATCH:
+ case I40E_MAC_HASH_MATCH:
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
break;
- case RTE_MACVLAN_HASH_MATCH:
+ case I40E_MACVLAN_HASH_MATCH:
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
int ret = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
struct i40e_aqc_remove_macvlan_element_data *req_list;
+ enum i40e_admin_queue_err aq_status;
if (filter == NULL || total == 0)
return I40E_ERR_PARAM;
rte_cpu_to_le_16(filter[num + i].vlan_id);
switch (filter[num + i].filter_type) {
- case RTE_MAC_PERFECT_MATCH:
+ case I40E_MAC_PERFECT_MATCH:
flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
break;
- case RTE_MACVLAN_PERFECT_MATCH:
+ case I40E_MACVLAN_PERFECT_MATCH:
flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
break;
- case RTE_MAC_HASH_MATCH:
+ case I40E_MAC_HASH_MATCH:
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
break;
- case RTE_MACVLAN_HASH_MATCH:
+ case I40E_MACVLAN_HASH_MATCH:
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
req_list[i].flags = rte_cpu_to_le_16(flags);
}
- ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
- actual_num, NULL);
+ ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list,
+ actual_num, NULL, &aq_status);
+
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
- goto DONE;
+ /* Do not report as an error when firmware returns ENOENT */
+ if (aq_status == I40E_AQ_RC_ENOENT) {
+ ret = I40E_SUCCESS;
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+ goto DONE;
+ }
}
num += actual_num;
} while (num < total);
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return 0;
vid_idx = I40E_VFTA_IDX(vlan_id);
struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
int ret;
- if (vlan_id > ETH_VLAN_ID_MAX)
+ if (vlan_id > RTE_ETH_VLAN_ID_MAX)
return;
i40e_store_vlan_filter(vsi, vlan_id, on);
f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
if (f != NULL)
return I40E_SUCCESS;
- if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
- (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
+ mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
/**
* If vlan_num is 0, that's the first time to add mac,
vsi->vlan_num = 1;
}
vlan_num = vsi->vlan_num;
- } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
- (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
+ } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
+ mac_filter->filter_type == I40E_MAC_HASH_MATCH)
vlan_num = 1;
mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
ETH_ADDR_LEN);
}
- if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
- mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
+ mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
&mac_filter->mac_addr);
if (ret != I40E_SUCCESS)
struct i40e_mac_filter *f;
struct i40e_macvlan_filter *mv_f;
int i, vlan_num;
- enum rte_mac_filter_type filter_type;
+ enum i40e_mac_filter_type filter_type;
int ret = I40E_SUCCESS;
/* Can't find it, return an error */
vlan_num = vsi->vlan_num;
filter_type = f->mac_info.filter_type;
- if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
- filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
+ filter_type == I40E_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
- } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
- filter_type == RTE_MAC_HASH_MATCH)
+ } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
+ filter_type == I40E_MAC_HASH_MATCH)
vlan_num = 1;
mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
- if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
- filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
+ filter_type == I40E_MACVLAN_HASH_MATCH) {
ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
if (ret != I40E_SUCCESS)
goto DONE;
}
/* Disable RSS */
-static void
+void
i40e_pf_disable_rss(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
I40E_VFQF_HKEY_MAX_INDEX :
I40E_PFQF_HKEY_MAX_INDEX;
- int ret = 0;
if (!key || key_len == 0) {
PMD_DRV_LOG(DEBUG, "No key to be configured");
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
struct i40e_aqc_get_set_rss_key_data *key_dw =
- (struct i40e_aqc_get_set_rss_key_data *)key;
+ (struct i40e_aqc_get_set_rss_key_data *)key;
+ enum i40e_status_code status =
+ i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "Failed to configure RSS key via AQ, error status: %d",
+ status);
+ return -EIO;
+ }
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
I40E_WRITE_FLUSH(hw);
}
- return ret;
+ return 0;
}
static int
i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
{
switch (filter_type) {
- case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
break;
- case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_IMAC_TENID:
+ case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
break;
- case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
break;
- case ETH_TUNNEL_FILTER_IMAC:
+ case RTE_ETH_TUNNEL_FILTER_IMAC:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
break;
- case ETH_TUNNEL_FILTER_OIP:
+ case RTE_ETH_TUNNEL_FILTER_OIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
break;
- case ETH_TUNNEL_FILTER_IIP:
+ case RTE_ETH_TUNNEL_FILTER_IIP:
*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
break;
default:
return 0;
}
-int
-i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
- struct rte_eth_tunnel_filter_conf *tunnel_filter,
- uint8_t add)
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
+#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
+#define I40E_TR_GENEVE_KEY_MASK 0x8
+#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
+#define I40E_TR_GRE_KEY_MASK 0x400
+#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
+#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
+#define I40E_DIRECTION_INGRESS_KEY 0x8000
+#define I40E_TR_L4_TYPE_TCP 0x2
+#define I40E_TR_L4_TYPE_UDP 0x4
+#define I40E_TR_L4_TYPE_SCTP 0x8
+
+static enum
+i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
{
- uint16_t ip_type;
- uint32_t ipv4_addr, ipv4_addr_le;
- uint8_t i, tun_type = 0;
- /* internal varialbe to convert ipv6 byte order */
- uint32_t convert_ipv6[4];
- int val, ret = 0;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_cloud_filters_element_bb *cld_filter;
- struct i40e_aqc_cloud_filters_element_bb *pfilter;
- struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
- struct i40e_tunnel_filter *tunnel, *node;
- struct i40e_tunnel_filter check_filter; /* Check if filter exists */
-
- cld_filter = rte_zmalloc("tunnel_filter",
- sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
- 0);
-
- if (NULL == cld_filter) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -ENOMEM;
- }
- pfilter = cld_filter;
-
- rte_ether_addr_copy(&tunnel_filter->outer_mac,
- (struct rte_ether_addr *)&pfilter->element.outer_mac);
- rte_ether_addr_copy(&tunnel_filter->inner_mac,
- (struct rte_ether_addr *)&pfilter->element.inner_mac);
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
+ enum i40e_status_code status = I40E_SUCCESS;
- pfilter->element.inner_vlan =
- rte_cpu_to_le_16(tunnel_filter->inner_vlan);
- if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
- ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
- ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
- ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
- rte_memcpy(&pfilter->element.ipaddr.v4.data,
- &ipv4_addr_le,
- sizeof(pfilter->element.ipaddr.v4.data));
- } else {
- ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
- for (i = 0; i < 4; i++) {
- convert_ipv6[i] =
- rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
- }
- rte_memcpy(&pfilter->element.ipaddr.v6.data,
- &convert_ipv6,
- sizeof(pfilter->element.ipaddr.v6.data));
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
}
- /* check tunneled type */
- switch (tunnel_filter->tunnel_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
- break;
- case RTE_TUNNEL_TYPE_NVGRE:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
- break;
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
- break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
- break;
- default:
- /* Other tunnel types is not supported. */
- PMD_DRV_LOG(ERR, "tunnel type is not supported.");
- rte_free(cld_filter);
- return -EINVAL;
- }
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
- val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
- &pfilter->element.flags);
- if (val < 0) {
- rte_free(cld_filter);
- return -EINVAL;
- }
-
- pfilter->element.flags |= rte_cpu_to_le_16(
- I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
- ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
- pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
- pfilter->element.queue_number =
- rte_cpu_to_le_16(tunnel_filter->queue_id);
-
- /* Check if there is the filter in SW list */
- memset(&check_filter, 0, sizeof(check_filter));
- i40e_tunnel_filter_convert(cld_filter, &check_filter);
- node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
- if (add && node) {
- PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
- rte_free(cld_filter);
- return -EINVAL;
- }
-
- if (!add && !node) {
- PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
- rte_free(cld_filter);
- return -EINVAL;
- }
-
- if (add) {
- ret = i40e_aq_add_cloud_filters(hw,
- vsi->seid, &cld_filter->element, 1);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
- rte_free(cld_filter);
- return -ENOTSUP;
- }
- tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
- if (tunnel == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- rte_free(cld_filter);
- return -ENOMEM;
- }
-
- rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
- ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
- if (ret < 0)
- rte_free(tunnel);
- } else {
- ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
- &cld_filter->element, 1);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
- rte_free(cld_filter);
- return -ENOTSUP;
- }
- ret = i40e_sw_tunnel_filter_del(pf, &node->input);
- }
-
- rte_free(cld_filter);
- return ret;
-}
-
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
-#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
-#define I40E_TR_GENEVE_KEY_MASK 0x8
-#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
-#define I40E_TR_GRE_KEY_MASK 0x400
-#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
-#define I40E_TR_GRE_NO_KEY_MASK 0x8000
-
-static enum
-i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
-{
- struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
- struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
- enum i40e_status_code status = I40E_SUCCESS;
-
- if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
- return I40E_NOT_SUPPORTED;
- }
-
- memset(&filter_replace, 0,
- sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
- memset(&filter_replace_buf, 0,
- sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
-
- /* create L1 filter */
- filter_replace.old_filter_type =
- I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
- filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
- filter_replace.tr_bit = 0;
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace.tr_bit = 0;
/* Prepare the buffer, 3 entries */
filter_replace_buf.data[0] =
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
return status;
}
+static enum i40e_status_code
+i40e_replace_port_l1_filter(struct i40e_pf *pf,
+ enum i40e_l4_port_type l4_port_type)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[8] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
+ } else {
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
+ filter_replace_buf.data[8] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
+ }
+
+ filter_replace.tr_bit = 0;
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0x00;
+ filter_replace_buf.data[3] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[5] = 0x00;
+ filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
+ I40E_TR_L4_TYPE_TCP |
+ I40E_TR_L4_TYPE_SCTP;
+ filter_replace_buf.data[7] = 0x00;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[9] = 0x00;
+ filter_replace_buf.data[10] = 0xFF;
+ filter_replace_buf.data[11] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (!status && filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_port_cloud_filter(struct i40e_pf *pf,
+ enum i40e_l4_port_type l4_port_type)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ } else {
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ }
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.tr_bit = 0;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+
+ if (!status && filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+
+ return status;
+}
+
int
i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_tunnel_filter_conf *tunnel_filter,
pfilter->general_fields[0] = tunnel_filter->inner_vlan;
pfilter->general_fields[1] = tunnel_filter->outer_vlan;
big_buffer = 1;
+ break;
+ case I40E_CLOUD_TYPE_UDP:
+ case I40E_CLOUD_TYPE_TCP:
+ case I40E_CLOUD_TYPE_SCTP:
+ if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
+ if (!pf->sport_replace_flag) {
+ i40e_replace_port_l1_filter(pf,
+ tunnel_filter->l4_port_type);
+ i40e_replace_port_cloud_filter(pf,
+ tunnel_filter->l4_port_type);
+ pf->sport_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ I40E_DIRECTION_INGRESS_KEY;
+
+ if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_UDP;
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_TCP;
+ else
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ I40E_TR_L4_TYPE_SCTP;
+
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ (teid_le >> 16) & 0xFFFF;
+ big_buffer = 1;
+ } else {
+ if (!pf->dport_replace_flag) {
+ i40e_replace_port_l1_filter(pf,
+ tunnel_filter->l4_port_type);
+ i40e_replace_port_cloud_filter(pf,
+ tunnel_filter->l4_port_type);
+ pf->dport_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
+ I40E_DIRECTION_INGRESS_KEY;
+
+ if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_UDP;
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_TCP;
+ else
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
+ I40E_TR_L4_TYPE_SCTP;
+
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
+ (teid_le >> 16) & 0xFFFF;
+ big_buffer = 1;
+ }
+
break;
default:
/* Other tunnel types is not supported. */
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
pfilter->element.flags |=
I40E_AQC_ADD_CLOUD_FILTER_0X10;
- else {
+ else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
+ tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
+ tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
+ if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
+ } else {
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
&pfilter->element.flags);
if (val < 0) {
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
ret = -1;
break;
}
/* Calculate the maximum number of contiguous PF queues that are configured */
-static int
+int
i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
{
struct rte_eth_dev_data *data = pf->dev_data;
return num;
}
-/* Configure RSS */
-static int
-i40e_pf_config_rss(struct i40e_pf *pf)
+/* Reset the global configure of hash function and input sets */
+static void
+i40e_pf_global_rss_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_rss_conf rss_conf;
- uint32_t i, lut = 0;
- uint16_t j, num;
-
- /*
- * If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
- PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
+ uint32_t reg, reg_val;
+ int i;
- if (num == 0) {
- PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
- return -ENOTSUP;
+ /* Reset global RSS function sets */
+ reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
+ if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
+ reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
}
- if (pf->adapter->rss_reta_updated == 0) {
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (j & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
- rte_bswap32(lut));
- }
- }
+ for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
+ uint64_t inset;
+ int j, pctype;
- rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
- i40e_pf_disable_rss(pf);
- return 0;
- }
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
- static uint32_t rss_key_default[] = {0x6b793944,
- 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
- 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
- 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
+ else
+ pctype = i;
+
+ /* Reset pctype insets */
+ inset = i40e_get_default_input_set(i);
+ if (inset) {
+ pf->hash_input_set[pctype] = inset;
+ inset = i40e_translate_input_set_reg(hw->mac.type,
+ inset);
+
+ reg = I40E_GLQF_HASH_INSET(0, pctype);
+ i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
+ reg = I40E_GLQF_HASH_INSET(1, pctype);
+ i40e_check_write_global_reg(hw, reg,
+ (uint32_t)(inset >> 32));
+
+ /* Clear unused mask registers of the pctype */
+ for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
+ reg = I40E_GLQF_HASH_MSK(j, pctype);
+ i40e_check_write_global_reg(hw, reg, 0);
+ }
+ }
- rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
+ /* Reset pctype symmetric sets */
+ reg = I40E_GLQF_HSYM(pctype);
+ reg_val = i40e_read_rx_ctl(hw, reg);
+ if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
+ reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
+ i40e_write_global_rx_ctl(hw, reg, reg_val);
+ }
}
-
- return i40e_hw_rss_hash_set(pf, &rss_conf);
+ I40E_WRITE_FLUSH(hw);
}
-static int
-i40e_tunnel_filter_param_check(struct i40e_pf *pf,
- struct rte_eth_tunnel_filter_conf *filter)
+int
+i40e_pf_reset_rss_reta(struct i40e_pf *pf)
{
- if (pf == NULL || filter == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameter");
- return -EINVAL;
- }
-
- if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
- PMD_DRV_LOG(ERR, "Invalid queue ID");
- return -EINVAL;
- }
+ struct i40e_hw *hw = &pf->adapter->hw;
+ uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
+ uint32_t i;
+ int num;
- if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
- PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
- return -EINVAL;
- }
+ /* If both VMDQ and RSS enabled, not all of PF queues are
+ * configured. It's necessary to calculate the actual PF
+ * queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
- if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
- (rte_is_zero_ether_addr(&filter->outer_mac))) {
- PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
- return -EINVAL;
- }
+ num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
+ if (num <= 0)
+ return 0;
- if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
- (rte_is_zero_ether_addr(&filter->inner_mac))) {
- PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
- return -EINVAL;
- }
+ for (i = 0; i < hw->func_caps.rss_table_size; i++)
+ lut[i] = (uint8_t)(i % (uint32_t)num);
- return 0;
+ return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
}
-#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
-#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
-static int
-i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
+int
+i40e_pf_reset_rss_key(struct i40e_pf *pf)
{
- struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
- uint32_t val, reg;
- int ret = -EINVAL;
-
- if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
- return -ENOTSUP;
- }
-
- val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
+ const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ uint8_t *rss_key;
- if (len == 3) {
- reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
- } else if (len == 4) {
- reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
- } else {
- PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
- return ret;
- }
+ /* Reset key */
+ rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ if (!rss_key ||
+ pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
- if (reg != val) {
- ret = i40e_aq_debug_write_global_register(hw,
- I40E_GL_PRS_FVBM(2),
- reg, NULL);
- if (ret != 0)
- return ret;
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
- "with value 0x%08x",
- I40E_GL_PRS_FVBM(2), reg);
- } else {
- ret = 0;
+ rss_key = (uint8_t *)rss_key_default;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
- I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
- return ret;
+ return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
}
static int
-i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
+i40e_pf_rss_reset(struct i40e_pf *pf)
{
- int ret = -EINVAL;
-
- if (!hw || !cfg)
- return -EINVAL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- switch (cfg->cfg_type) {
- case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
- ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
- break;
- default:
- PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
- break;
- }
+ int ret;
- return ret;
-}
+ pf->hash_filter_enabled = 0;
+ i40e_pf_disable_rss(pf);
+ i40e_set_symmetric_hash_enable_per_port(hw, 0);
-static int
-i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret = I40E_ERR_PARAM;
+ if (!pf->support_multi_driver)
+ i40e_pf_global_rss_reset(pf);
- switch (filter_op) {
- case RTE_ETH_FILTER_SET:
- ret = i40e_dev_global_config_set(hw,
- (struct rte_eth_global_cfg *)arg);
- break;
- default:
- PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
- break;
+ /* Reset RETA table */
+ if (pf->adapter->rss_reta_updated == 0) {
+ ret = i40e_pf_reset_rss_reta(pf);
+ if (ret)
+ return ret;
}
- return ret;
+ return i40e_pf_reset_rss_key(pf);
}
-static int
-i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg)
+/* Configure RSS */
+int
+i40e_pf_config_rss(struct i40e_pf *pf)
{
- struct rte_eth_tunnel_filter_conf *filter;
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int ret = I40E_SUCCESS;
-
- filter = (struct rte_eth_tunnel_filter_conf *)(arg);
-
- if (i40e_tunnel_filter_param_check(pf, filter) < 0)
- return I40E_ERR_PARAM;
+ struct i40e_hw *hw;
+ enum rte_eth_rx_mq_mode mq_mode;
+ uint64_t rss_hf, hena;
+ int ret;
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- if (!(pf->flags & I40E_FLAG_VXLAN))
- ret = I40E_NOT_SUPPORTED;
- break;
- case RTE_ETH_FILTER_ADD:
- ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
- break;
- default:
- PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
- ret = I40E_ERR_PARAM;
- break;
+ ret = i40e_pf_rss_reset(pf);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
+ return ret;
}
- return ret;
-}
-
-static int
-i40e_pf_config_mq_rx(struct i40e_pf *pf)
-{
- int ret = 0;
- enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
-
- /* RSS setup */
- if (mq_mode & ETH_MQ_RX_RSS_FLAG)
- ret = i40e_pf_config_rss(pf);
- else
- i40e_pf_disable_rss(pf);
-
- return ret;
-}
-
-/* Get the symmetric hash enable configurations per port */
-static void
-i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
-{
- uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
-
- *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
-}
-
-/* Set the symmetric hash enable configurations per port */
-static void
-i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
-{
- uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
+ rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+ if (!(rss_hf & pf->adapter->flow_types_mask) ||
+ !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
+ return 0;
- if (enable > 0) {
- if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been enabled");
- return;
- }
- reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
- } else {
- if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO,
- "Symmetric hash has already been disabled");
- return;
- }
- reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
- }
- i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
+ hw = I40E_PF_TO_HW(pf);
+ hena = i40e_config_hena(pf->adapter, rss_hf);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
-}
-
-/*
- * Get global configurations of hash function type and symmetric hash enable
- * per flow type (pctype). Note that global configuration means it affects all
- * the ports on the same NIC.
- */
-static int
-i40e_get_hash_filter_global_config(struct i40e_hw *hw,
- struct rte_eth_hash_global_conf *g_cfg)
-{
- struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
- uint32_t reg;
- uint16_t i, j;
-
- memset(g_cfg, 0, sizeof(*g_cfg));
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (reg & I40E_GLQF_CTL_HTOEP_MASK)
- g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
- else
- g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
- PMD_DRV_LOG(DEBUG, "Hash function is %s",
- (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
-
- /*
- * As i40e supports less than 64 flow types, only first 64 bits need to
- * be checked.
- */
- for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
- g_cfg->valid_bit_mask[i] = 0ULL;
- g_cfg->sym_hash_enable_mask[i] = 0ULL;
- }
-
- g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
-
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
- if (!adapter->pctypes_tbl[i])
- continue;
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (adapter->pctypes_tbl[i] & (1ULL << j)) {
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
- if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
- g_cfg->sym_hash_enable_mask[0] |=
- (1ULL << i);
- }
- }
- }
- }
return 0;
}
-static int
-i40e_hash_global_config_check(const struct i40e_adapter *adapter,
- const struct rte_eth_hash_global_conf *g_cfg)
-{
- uint32_t i;
- uint64_t mask0, i40e_mask = adapter->flow_types_mask;
-
- if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
- g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
- g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
- PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
- g_cfg->hash_func);
- return -EINVAL;
- }
-
- /*
- * As i40e supports less than 64 flow types, only first 64 bits need to
- * be checked.
- */
- mask0 = g_cfg->valid_bit_mask[0];
- for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
- if (i == 0) {
- /* Check if any unsupported flow type configured */
- if ((mask0 | i40e_mask) ^ i40e_mask)
- goto mask_err;
- } else {
- if (g_cfg->valid_bit_mask[i])
- goto mask_err;
- }
- }
-
- return 0;
-
-mask_err:
- PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
-
- return -EINVAL;
-}
-
-/*
- * Set global configurations of hash function type and symmetric hash enable
- * per flow type (pctype). Note any modifying global configuration will affect
- * all the ports on the same NIC.
- */
-static int
-i40e_set_hash_filter_global_config(struct i40e_hw *hw,
- struct rte_eth_hash_global_conf *g_cfg)
+#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
+int
+i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
{
- struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
- int ret;
- uint16_t i, j;
- uint32_t reg;
- uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
+ uint32_t val, reg;
+ int ret = -EINVAL;
if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+ PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
return -ENOTSUP;
}
- /* Check the input parameters */
- ret = i40e_hash_global_config_check(adapter, g_cfg);
- if (ret < 0)
+ val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
+
+ if (len == 3) {
+ reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
+ } else if (len == 4) {
+ reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
+ } else {
+ PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
return ret;
+ }
- /*
- * As i40e supports less than 64 flow types, only first 64 bits need to
- * be configured.
- */
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
- if (mask0 & (1UL << i)) {
- reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
- I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
-
- for (j = I40E_FILTER_PCTYPE_INVALID + 1;
- j < I40E_FILTER_PCTYPE_MAX; j++) {
- if (adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_global_rx_ctl(hw,
- I40E_GLQF_HSYM(j),
- reg);
- }
- }
+ if (reg != val) {
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
+ reg, NULL);
+ if (ret != 0)
+ return ret;
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+ "with value 0x%08x",
+ I40E_GL_PRS_FVBM(2), reg);
+ } else {
+ ret = 0;
}
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
+ I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
- if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
- /* Toeplitz */
- if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG,
- "Hash function already set to Toeplitz");
- goto out;
- }
- reg |= I40E_GLQF_CTL_HTOEP_MASK;
- } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
- /* Simple XOR */
- if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG,
- "Hash function already set to Simple XOR");
- goto out;
- }
- reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
- } else
- /* Use the default, and keep it as it is */
- goto out;
+ return ret;
+}
- i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+/* Set the symmetric hash enable configurations per port */
+void
+i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
-out:
- I40E_WRITE_FLUSH(hw);
+ if (enable > 0) {
+ if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
+ return;
- return 0;
+ reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ } else {
+ if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
+ return;
+
+ reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
+ }
+ i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
+ I40E_WRITE_FLUSH(hw);
}
/**
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
I40E_INSET_IPV4_TTL,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_SCTP_VT,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
return default_inset_table[pctype];
}
-/**
- * Parse the input set from index to logical bit masks
- */
-static int
-i40e_parse_input_set(uint64_t *inset,
- enum i40e_filter_pctype pctype,
- enum rte_eth_input_set_field *field,
- uint16_t size)
-{
- uint16_t i, j;
- int ret = -EINVAL;
-
- static const struct {
- enum rte_eth_input_set_field field;
- uint64_t inset;
- } inset_convert_table[] = {
- {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
- {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
- {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
- {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
- {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
- {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
- {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
- {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
- {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
- {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
- {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
- {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
- {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
- {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
- {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
- I40E_INSET_IPV6_NEXT_HDR},
- {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
- I40E_INSET_IPV6_HOP_LIMIT},
- {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
- {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
- {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
- {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
- {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
- {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
- {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
- I40E_INSET_SCTP_VT},
- {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
- I40E_INSET_TUNNEL_DMAC},
- {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
- I40E_INSET_VLAN_TUNNEL},
- {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
- I40E_INSET_TUNNEL_ID},
- {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
- I40E_INSET_FLEX_PAYLOAD_W1},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
- I40E_INSET_FLEX_PAYLOAD_W2},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
- I40E_INSET_FLEX_PAYLOAD_W3},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
- I40E_INSET_FLEX_PAYLOAD_W4},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
- I40E_INSET_FLEX_PAYLOAD_W5},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
- I40E_INSET_FLEX_PAYLOAD_W6},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
- I40E_INSET_FLEX_PAYLOAD_W7},
- {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
- I40E_INSET_FLEX_PAYLOAD_W8},
- };
-
- if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
- return ret;
-
- /* Only one item allowed for default or all */
- if (size == 1) {
- if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
- *inset = i40e_get_default_input_set(pctype);
- return 0;
- } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
- *inset = I40E_INSET_NONE;
- return 0;
- }
- }
-
- for (i = 0, *inset = 0; i < size; i++) {
- for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
- if (field[i] == inset_convert_table[j].field) {
- *inset |= inset_convert_table[j].inset;
- break;
- }
- }
-
- /* It contains unsupported input set, return immediately */
- if (j == RTE_DIM(inset_convert_table))
- return ret;
- }
-
- return 0;
-}
-
/**
* Translate the input set from bit masks to register aware bit masks
* and vice versa
return val;
}
+static int
+i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
+ uint32_t pit_reg_count, uint32_t hdr_off)
+{
+ const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
+ uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
+ uint32_t i, reg_val, src_off, count;
+
+ for (i = pit_reg_start; i < pit_reg_end; i++) {
+ reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
+
+ src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
+ count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
+
+ if (src_off <= field_off && (src_off + count) > field_off)
+ break;
+ }
+
+ if (i >= pit_reg_end) {
+ PMD_DRV_LOG(ERR,
+ "Hardware GLQF_PIT configuration does not support this field mask");
+ return -1;
+ }
+
+ return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
+}
+
int
-i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
+ uint32_t *mask, uint8_t nb_elem)
{
- uint8_t i, idx = 0;
- uint64_t inset_need_mask = inset;
+ static const uint64_t mask_inset[] = {
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
+ I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
static const struct {
uint64_t inset;
uint32_t mask;
- } inset_mask_map[] = {
- {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
- {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
- {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
- {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
- {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
- {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
- {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
- {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
+ uint32_t offset;
+ } inset_mask_offset_map[] = {
+ { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
+ offsetof(struct rte_ipv4_hdr, type_of_service) },
+
+ { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
+ offsetof(struct rte_ipv4_hdr, next_proto_id) },
+
+ { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
+ offsetof(struct rte_ipv4_hdr, time_to_live) },
+
+ { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
+ offsetof(struct rte_ipv6_hdr, vtc_flow) },
+
+ { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
+ offsetof(struct rte_ipv6_hdr, proto) },
+
+ { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
+ offsetof(struct rte_ipv6_hdr, hop_limits) },
};
- if (!inset || !mask || !nb_elem)
+ uint32_t i;
+ int idx = 0;
+
+ assert(mask);
+ if (!inset)
return 0;
- for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+ for (i = 0; i < RTE_DIM(mask_inset); i++) {
/* Clear the inset bit, if no MASK is required,
* for example proto + ttl
*/
- if ((inset & inset_mask_map[i].inset) ==
- inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
- inset_need_mask &= ~inset_mask_map[i].inset;
- if (!inset_need_mask)
- return 0;
- }
- for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
- if ((inset_need_mask & inset_mask_map[i].inset) ==
- inset_mask_map[i].inset) {
- if (idx >= nb_elem) {
- PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
- return -EINVAL;
- }
- mask[idx] = inset_mask_map[i].mask;
- idx++;
+ if ((mask_inset[i] & inset) == mask_inset[i]) {
+ inset &= ~mask_inset[i];
+ if (!inset)
+ return 0;
+ }
+ }
+
+ for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
+ uint32_t pit_start, pit_count;
+ int offset;
+
+ if (!(inset_mask_offset_map[i].inset & inset))
+ continue;
+
+ if (inset_mask_offset_map[i].inset &
+ (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+ I40E_INSET_IPV4_TTL)) {
+ pit_start = I40E_GLQF_PIT_IPV4_START;
+ pit_count = I40E_GLQF_PIT_IPV4_COUNT;
+ } else {
+ pit_start = I40E_GLQF_PIT_IPV6_START;
+ pit_count = I40E_GLQF_PIT_IPV6_COUNT;
+ }
+
+ offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
+ inset_mask_offset_map[i].offset);
+
+ if (offset < 0)
+ return -EINVAL;
+
+ if (idx >= nb_elem) {
+ PMD_DRV_LOG(ERR,
+ "Configuration of inset mask out of range %u",
+ nb_elem);
+ return -ERANGE;
}
+
+ mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
+ inset_mask_offset_map[i].mask);
+ idx++;
}
return idx;
i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *dev_data =
+ ((struct i40e_adapter *)hw->back)->pf.dev_data;
+ struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
- dev = ((struct i40e_adapter *)hw->back)->eth_dev;
if (reg != val) {
i40e_write_rx_ctl(hw, addr, val);
PMD_DRV_LOG(WARNING,
input_set = i40e_get_default_input_set(pctype);
- num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
}
int
-i40e_hash_filter_inset_select(struct i40e_hw *hw,
- struct rte_eth_input_set_conf *conf)
+i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
+ uint32_t pctype, bool add)
{
struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
- enum i40e_filter_pctype pctype;
- uint64_t input_set, inset_reg = 0;
uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
- int ret, i, num;
-
- if (!conf) {
- PMD_DRV_LOG(ERR, "Invalid pointer");
- return -EFAULT;
- }
- if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
- conf->op != RTE_ETH_INPUT_SET_ADD) {
- PMD_DRV_LOG(ERR, "Unsupported input set operation");
- return -EINVAL;
- }
+ uint64_t inset_reg = 0;
+ int num, i;
if (pf->support_multi_driver) {
- PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
- return -ENOTSUP;
- }
-
- pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
- if (pctype == I40E_FILTER_PCTYPE_INVALID) {
- PMD_DRV_LOG(ERR, "invalid flow_type input.");
- return -EINVAL;
- }
-
- if (hw->mac.type == I40E_MAC_X722) {
- /* get translated pctype value in fd pctype register */
- pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
- I40E_GLQF_FD_PCTYPES((int)pctype));
+ PMD_DRV_LOG(ERR,
+ "Modify input set is not permitted when multi-driver enabled.");
+ return -EPERM;
}
- ret = i40e_parse_input_set(&input_set, pctype, conf->field,
- conf->inset_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to parse input set");
- return -EINVAL;
- }
+ /* For X722, get translated pctype in fd pctype register */
+ if (hw->mac.type == I40E_MAC_X722)
+ pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
- if (conf->op == RTE_ETH_INPUT_SET_ADD) {
+ if (add) {
/* get inset value in register */
inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
inset_reg <<= I40E_32_BIT_WIDTH;
inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
input_set |= pf->hash_input_set[pctype];
}
- num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return -EINVAL;
return 0;
}
-int
-i40e_fdir_filter_inset_select(struct i40e_pf *pf,
- struct rte_eth_input_set_conf *conf)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- enum i40e_filter_pctype pctype;
- uint64_t input_set, inset_reg = 0;
- uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
- int ret, i, num;
-
- if (!hw || !conf) {
- PMD_DRV_LOG(ERR, "Invalid pointer");
- return -EFAULT;
- }
- if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
- conf->op != RTE_ETH_INPUT_SET_ADD) {
- PMD_DRV_LOG(ERR, "Unsupported input set operation");
- return -EINVAL;
- }
-
- pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
-
- if (pctype == I40E_FILTER_PCTYPE_INVALID) {
- PMD_DRV_LOG(ERR, "invalid flow_type input.");
- return -EINVAL;
- }
-
- ret = i40e_parse_input_set(&input_set, pctype, conf->field,
- conf->inset_size);
- if (ret) {
- PMD_DRV_LOG(ERR, "Failed to parse input set");
- return -EINVAL;
- }
-
- /* get inset value in register */
- inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
- inset_reg <<= I40E_32_BIT_WIDTH;
- inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
-
- /* Can not change the inset reg for flex payload for fdir,
- * it is done by writing I40E_PRTQF_FD_FLXINSET
- * in i40e_set_flex_mask_on_pctype.
- */
- if (conf->op == RTE_ETH_INPUT_SET_SELECT)
- inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
- else
- input_set |= pf->fdir.input_set[pctype];
- num = i40e_generate_inset_mask_reg(input_set, mask_reg,
- I40E_INSET_MASK_NUM_REG);
- if (num < 0)
- return -EINVAL;
- if (pf->support_multi_driver && num > 0) {
- PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
- return -ENOTSUP;
- }
-
- inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
-
- i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
-
- if (!pf->support_multi_driver) {
- for (i = 0; i < num; i++)
- i40e_check_write_global_reg(hw,
- I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_global_reg(hw,
- I40E_GLQF_FD_MSK(i, pctype),
- 0);
- } else {
- PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
- }
- I40E_WRITE_FLUSH(hw);
-
- pf->fdir.input_set[pctype] = input_set;
- return 0;
-}
-
-static int
-i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
-{
- int ret = 0;
-
- if (!hw || !info) {
- PMD_DRV_LOG(ERR, "Invalid pointer");
- return -EFAULT;
- }
-
- switch (info->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- i40e_get_symmetric_hash_enable_per_port(hw,
- &(info->info.enable));
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- ret = i40e_get_hash_filter_global_config(hw,
- &(info->info.global_conf));
- break;
- default:
- PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
- info->info_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int
-i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
-{
- int ret = 0;
-
- if (!hw || !info) {
- PMD_DRV_LOG(ERR, "Invalid pointer");
- return -EFAULT;
- }
-
- switch (info->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- ret = i40e_set_hash_filter_global_config(hw,
- &(info->info.global_conf));
- break;
- case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
- ret = i40e_hash_filter_inset_select(hw,
- &(info->info.input_set_conf));
- break;
-
- default:
- PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
- info->info_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-/* Operations for hash function */
-static int
-i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret = 0;
-
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- break;
- case RTE_ETH_FILTER_GET:
- ret = i40e_hash_filter_get(hw,
- (struct rte_eth_hash_filter_info *)arg);
- break;
- case RTE_ETH_FILTER_SET:
- ret = i40e_hash_filter_set(hw,
- (struct rte_eth_hash_filter_info *)arg);
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
- filter_op);
- ret = -ENOTSUP;
- break;
- }
-
- return ret;
-}
-
/* Convert ethertype filter structure */
static int
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
return 0;
}
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
if (ethertype_filter == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
return -ENOMEM;
- }
-
- rte_memcpy(ethertype_filter, &check_filter,
- sizeof(check_filter));
- ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
- if (ret < 0)
- rte_free(ethertype_filter);
- } else {
- ret = i40e_sw_ethertype_filter_del(pf, &node->input);
- }
-
- return ret;
-}
-
-/*
- * Handle operations for ethertype filter.
- */
-static int
-i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int ret = 0;
-
- if (filter_op == RTE_ETH_FILTER_NOP)
- return ret;
+ }
- if (arg == NULL) {
- PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
- filter_op);
- return -EINVAL;
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ if (ret < 0)
+ rte_free(ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
}
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- ret = i40e_ethertype_filter_set(pf,
- (struct rte_eth_ethertype_filter *)arg,
- TRUE);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = i40e_ethertype_filter_set(pf,
- (struct rte_eth_ethertype_filter *)arg,
- FALSE);
- break;
- default:
- PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
- ret = -ENOSYS;
- break;
- }
return ret;
}
static int
-i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (dev == NULL)
return -EINVAL;
- switch (filter_type) {
- case RTE_ETH_FILTER_NONE:
- /* For global configuration */
- ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_HASH:
- ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_MACVLAN:
- ret = i40e_mac_filter_handle(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_ETHERTYPE:
- ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_TUNNEL:
- ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_FDIR:
- ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
- break;
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &i40e_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &i40e_flow_ops;
+ return 0;
}
/*
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
}
}
-#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
return 0;
}
-/**
- * i40e_aq_add_mirror_rule
- * @hw: pointer to the hardware structure
- * @seid: VEB seid to add mirror rule to
- * @dst_id: destination vsi seid
- * @entries: Buffer which contains the entities to be mirrored
- * @count: number of entities contained in the buffer
- * @rule_id:the rule_id of the rule to be added
- *
- * Add a mirror rule for a given veb.
- *
- **/
-static enum i40e_status_code
-i40e_aq_add_mirror_rule(struct i40e_hw *hw,
- uint16_t seid, uint16_t dst_id,
- uint16_t rule_type, uint16_t *entries,
- uint16_t count, uint16_t *rule_id)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule cmd;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)
- &desc.params.raw;
- uint16_t buff_len;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_add_mirror_rule);
- memset(&cmd, 0, sizeof(cmd));
-
- buff_len = sizeof(uint16_t) * count;
- desc.datalen = rte_cpu_to_le_16(buff_len);
- if (buff_len > 0)
- desc.flags |= rte_cpu_to_le_16(
- (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- cmd.rule_type = rte_cpu_to_le_16(rule_type <<
- I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
- cmd.num_entries = rte_cpu_to_le_16(count);
- cmd.seid = rte_cpu_to_le_16(seid);
- cmd.destination = rte_cpu_to_le_16(dst_id);
-
- rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
- status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
- PMD_DRV_LOG(INFO,
- "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
- hw->aq.asq_last_status, resp->rule_id,
- resp->mirror_rules_used, resp->mirror_rules_free);
- *rule_id = rte_le_to_cpu_16(resp->rule_id);
-
- return status;
-}
-
-/**
- * i40e_aq_del_mirror_rule
- * @hw: pointer to the hardware structure
- * @seid: VEB seid to add mirror rule to
- * @entries: Buffer which contains the entities to be mirrored
- * @count: number of entities contained in the buffer
- * @rule_id:the rule_id of the rule to be delete
- *
- * Delete a mirror rule for a given veb.
- *
- **/
-static enum i40e_status_code
-i40e_aq_del_mirror_rule(struct i40e_hw *hw,
- uint16_t seid, uint16_t rule_type, uint16_t *entries,
- uint16_t count, uint16_t rule_id)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule cmd;
- uint16_t buff_len = 0;
- enum i40e_status_code status;
- void *buff = NULL;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_delete_mirror_rule);
- memset(&cmd, 0, sizeof(cmd));
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- cmd.num_entries = count;
- buff_len = sizeof(uint16_t) * count;
- desc.datalen = rte_cpu_to_le_16(buff_len);
- buff = (void *)entries;
- } else
- /* rule id is filled in destination field for deleting mirror rule */
- cmd.destination = rte_cpu_to_le_16(rule_id);
-
- cmd.rule_type = rte_cpu_to_le_16(rule_type <<
- I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
- cmd.seid = rte_cpu_to_le_16(seid);
-
- rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
- status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
-
- return status;
-}
-
-/**
- * i40e_mirror_rule_set
- * @dev: pointer to the hardware structure
- * @mirror_conf: mirror rule info
- * @sw_id: mirror rule's sw_id
- * @on: enable/disable
- *
- * set a mirror rule.
- *
- **/
-static int
-i40e_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t sw_id, uint8_t on)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_mirror_rule *it, *mirr_rule = NULL;
- struct i40e_mirror_rule *parent = NULL;
- uint16_t seid, dst_seid, rule_id;
- uint16_t i, j = 0;
- int ret;
-
- PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
-
- if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
- PMD_DRV_LOG(ERR,
- "mirror rule can not be configured without veb or vfs.");
- return -ENOSYS;
- }
- if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
- PMD_DRV_LOG(ERR, "mirror table is full.");
- return -ENOSPC;
- }
- if (mirror_conf->dst_pool > pf->vf_num) {
- PMD_DRV_LOG(ERR, "invalid destination pool %u.",
- mirror_conf->dst_pool);
- return -EINVAL;
- }
-
- seid = pf->main_vsi->veb->seid;
-
- TAILQ_FOREACH(it, &pf->mirror_list, rules) {
- if (sw_id <= it->index) {
- mirr_rule = it;
- break;
- }
- parent = it;
- }
- if (mirr_rule && sw_id == mirr_rule->index) {
- if (on) {
- PMD_DRV_LOG(ERR, "mirror rule exists.");
- return -EEXIST;
- } else {
- ret = i40e_aq_del_mirror_rule(hw, seid,
- mirr_rule->rule_type,
- mirr_rule->entries,
- mirr_rule->num_entries, mirr_rule->id);
- if (ret < 0) {
- PMD_DRV_LOG(ERR,
- "failed to remove mirror rule: ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
- return -ENOSYS;
- }
- TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
- rte_free(mirr_rule);
- pf->nb_mirror_rule--;
- return 0;
- }
- } else if (!on) {
- PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
- return -ENOENT;
- }
-
- mirr_rule = rte_zmalloc("i40e_mirror_rule",
- sizeof(struct i40e_mirror_rule) , 0);
- if (!mirr_rule) {
- PMD_DRV_LOG(ERR, "failed to allocate memory");
- return I40E_ERR_NO_MEMORY;
- }
- switch (mirror_conf->rule_type) {
- case ETH_MIRROR_VLAN:
- for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
- if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
- mirr_rule->entries[j] =
- mirror_conf->vlan.vlan_id[i];
- j++;
- }
- }
- if (j == 0) {
- PMD_DRV_LOG(ERR, "vlan is not specified.");
- rte_free(mirr_rule);
- return -EINVAL;
- }
- mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
- break;
- case ETH_MIRROR_VIRTUAL_POOL_UP:
- case ETH_MIRROR_VIRTUAL_POOL_DOWN:
- /* check if the specified pool bit is out of range */
- if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
- PMD_DRV_LOG(ERR, "pool mask is out of range.");
- rte_free(mirr_rule);
- return -EINVAL;
- }
- for (i = 0, j = 0; i < pf->vf_num; i++) {
- if (mirror_conf->pool_mask & (1ULL << i)) {
- mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
- j++;
- }
- }
- if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
- /* add pf vsi to entries */
- mirr_rule->entries[j] = pf->main_vsi_seid;
- j++;
- }
- if (j == 0) {
- PMD_DRV_LOG(ERR, "pool is not specified.");
- rte_free(mirr_rule);
- return -EINVAL;
- }
- /* egress and ingress in aq commands means from switch but not port */
- mirr_rule->rule_type =
- (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
- I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
- I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
- break;
- case ETH_MIRROR_UPLINK_PORT:
- /* egress and ingress in aq commands means from switch but not port*/
- mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
- break;
- case ETH_MIRROR_DOWNLINK_PORT:
- mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
- break;
- default:
- PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
- mirror_conf->rule_type);
- rte_free(mirr_rule);
- return -EINVAL;
- }
-
- /* If the dst_pool is equal to vf_num, consider it as PF */
- if (mirror_conf->dst_pool == pf->vf_num)
- dst_seid = pf->main_vsi_seid;
- else
- dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
-
- ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
- mirr_rule->rule_type, mirr_rule->entries,
- j, &rule_id);
- if (ret < 0) {
- PMD_DRV_LOG(ERR,
- "failed to add mirror rule: ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
- rte_free(mirr_rule);
- return -ENOSYS;
- }
-
- mirr_rule->index = sw_id;
- mirr_rule->num_entries = j;
- mirr_rule->id = rule_id;
- mirr_rule->dst_vsi_seid = dst_seid;
-
- if (parent)
- TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
- else
- TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
-
- pf->nb_mirror_rule++;
- return 0;
-}
-
-/**
- * i40e_mirror_rule_reset
- * @dev: pointer to the device
- * @sw_id: mirror rule's sw_id
- *
- * reset a mirror rule.
- *
- **/
-static int
-i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
-{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_mirror_rule *it, *mirr_rule = NULL;
- uint16_t seid;
- int ret;
-
- PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
-
- seid = pf->main_vsi->veb->seid;
-
- TAILQ_FOREACH(it, &pf->mirror_list, rules) {
- if (sw_id == it->index) {
- mirr_rule = it;
- break;
- }
- }
- if (mirr_rule) {
- ret = i40e_aq_del_mirror_rule(hw, seid,
- mirr_rule->rule_type,
- mirr_rule->entries,
- mirr_rule->num_entries, mirr_rule->id);
- if (ret < 0) {
- PMD_DRV_LOG(ERR,
- "failed to remove mirror rule: status = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
- return -ENOSYS;
- }
- TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
- rte_free(mirr_rule);
- pf->nb_mirror_rule--;
- } else {
- PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
- return -ENOENT;
- }
- return 0;
-}
-
static uint64_t
i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
{
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_40G:
- case ETH_SPEED_NUM_25G:
+ case RTE_ETH_SPEED_NUM_40G:
+ case RTE_ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
break;
else
*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
- if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
dcb_cfg->pfc.willing = 0;
dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
dcb_cfg->pfc.pfcenable = *tc_map;
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
- if (i40e_need_stop_lldp(dev)) {
- ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
- if (ret != I40E_SUCCESS)
- PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
- }
+ /* Stopping lldp is necessary for DPDK, but it will cause
+ * DCB init failed. For i40e_init_dcb(), the prerequisite
+ * for successful initialization of DCB is that LLDP is
+ * enabled. So it is needed to start lldp before DCB init
+ * and stop it after initialization.
+ */
+ ret = i40e_aq_start_lldp(hw, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to start lldp");
- ret = i40e_init_dcb(hw);
+ ret = i40e_init_dcb(hw, true);
/* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
* adminq status. Otherwise, it should return success.
ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
+
+ if (i40e_need_stop_lldp(dev)) {
+ ret = i40e_aq_stop_lldp(hw, true, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
+ }
} else {
- ret = i40e_aq_start_lldp(hw, NULL);
+ ret = i40e_aq_start_lldp(hw, true, NULL);
if (ret != I40E_SUCCESS)
PMD_INIT_LOG(DEBUG, "Failed to start lldp");
- ret = i40e_init_dcb(hw);
+ ret = i40e_init_dcb(hw, true);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
PMD_INIT_LOG(ERR,
uint16_t bsf, tc_mapping;
int i, j = 0;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
else
dcb_info->nb_tcs = 1;
dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
}
j++;
- } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
+ } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
return 0;
}
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = intr_handle->intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
- rte_intr_ack(&pci_dev->intr_handle);
+ rte_intr_ack(pci_dev->intr_handle);
return 0;
}
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
- msix_intr = intr_handle->intr_vec[queue_id];
+ msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
uint32_t value = 0;
uint32_t i;
- if (!info || !info->length || !info->data)
- return -EINVAL;
-
if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
is_sfp = true;
}
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- addr, offset, 1, &value, NULL);
+ addr, 1, offset, &value, NULL);
if (status)
return -EIO;
data[i] = (uint8_t)value;
}
static int
-i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
{
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
- int ret = 0;
-
- /* check if mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
- return -EINVAL;
-
/* mtu setting is forbidden if port is start */
- if (dev_data->dev_started) {
+ if (dev->data->dev_started != 0) {
PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
- dev_data->port_id);
+ dev->data->port_id);
return -EBUSY;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev_data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- return ret;
+ return 0;
}
/* Restore ethertype filter */
}
}
-/* Restore rss filter */
-static inline void
-i40e_rss_filter_restore(struct i40e_pf *pf)
-{
- struct i40e_rte_flow_rss_conf *conf =
- &pf->rss_info;
- if (conf->conf.queue_num)
- i40e_config_rss_filter(pf, conf, TRUE);
-}
-
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
- i40e_rss_filter_restore(pf);
+ (void)i40e_hash_filter_restore(pf);
}
bool
}
}
name[strlen(name) - 1] = '\0';
+ PMD_DRV_LOG(INFO, "name = %s\n", name);
if (!strcmp(name, "GTPC"))
new_pctype =
i40e_find_customized_pctype(pf,
new_pctype =
i40e_find_customized_pctype(pf,
I40E_CUSTOMIZED_GTPU);
+ else if (!strcmp(name, "IPV4_L2TPV3"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV4_L2TPV3);
+ else if (!strcmp(name, "IPV6_L2TPV3"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_IPV6_L2TPV3);
+ else if (!strcmp(name, "IPV4_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4);
+ else if (!strcmp(name, "IPV6_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6);
+ else if (!strcmp(name, "IPV4_UDP_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV4_UDP);
+ else if (!strcmp(name, "IPV6_UDP_ESP"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_ESP_IPV6_UDP);
+ else if (!strcmp(name, "IPV4_AH"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_AH_IPV4);
+ else if (!strcmp(name, "IPV6_AH"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_AH_IPV6);
if (new_pctype) {
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
new_pctype->pctype = pctype_value;
continue;
memset(name, 0, sizeof(name));
strcpy(name, proto[n].name);
+ PMD_DRV_LOG(INFO, "name = %s\n", name);
if (!strncasecmp(name, "PPPOE", 5))
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_L2_ETHER_PPPOE;
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GTPU;
in_tunnel = true;
+ } else if (!strncasecmp(name, "ESP", 3)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_ESP;
+ in_tunnel = true;
} else if (!strncasecmp(name, "GRENAT", 6)) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GRENAT;
in_tunnel = true;
} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
- !strncasecmp(name, "L2TPV2", 6)) {
+ !strncasecmp(name, "L2TPV2", 6) ||
+ !strncasecmp(name, "L2TPV3", 6)) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_L2TP;
in_tunnel = true;
ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
ptype_num, 0);
if (ret)
- PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+ PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
rte_free(ptype_mapping);
rte_free(ptype);
}
}
+ /* Check if ESP is supported. */
+ for (i = 0; i < proto_num; i++) {
+ if (!strncmp(proto[i].name, "ESP", 3)) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->esp_support = true;
+ else
+ pf->esp_support = false;
+ break;
+ }
+ }
+
/* Update customized pctype info */
ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
proto_num, proto, op);
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
if (pf->support_multi_driver) {
PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
return ret;
}
-int
-i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
- const struct rte_flow_action_rss *in)
-{
- if (in->key_len > RTE_DIM(out->key) ||
- in->queue_num > RTE_DIM(out->queue))
- return -EINVAL;
- if (!in->key && in->key_len)
- return -EINVAL;
- out->conf = (struct rte_flow_action_rss){
- .func = in->func,
- .level = in->level,
- .types = in->types,
- .key_len = in->key_len,
- .queue_num = in->queue_num,
- .queue = memcpy(out->queue, in->queue,
- sizeof(*in->queue) * in->queue_num),
- };
- if (in->key)
- out->conf.key = memcpy(out->key, in->key, in->key_len);
- return 0;
-}
-
-int
-i40e_action_rss_same(const struct rte_flow_action_rss *comp,
- const struct rte_flow_action_rss *with)
-{
- return (comp->func == with->func &&
- comp->level == with->level &&
- comp->types == with->types &&
- comp->key_len == with->key_len &&
- comp->queue_num == with->queue_num &&
- !memcmp(comp->key, with->key, with->key_len) &&
- !memcmp(comp->queue, with->queue,
- sizeof(*with->queue) * with->queue_num));
-}
-
-int
-i40e_config_rss_filter(struct i40e_pf *pf,
- struct i40e_rte_flow_rss_conf *conf, bool add)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t i, lut = 0;
- uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = {
- .rss_key = conf->conf.key_len ?
- (void *)(uintptr_t)conf->conf.key : NULL,
- .rss_key_len = conf->conf.key_len,
- .rss_hf = conf->conf.types,
- };
- struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
-
- if (!add) {
- if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
- i40e_pf_disable_rss(pf);
- memset(rss_info, 0,
- sizeof(struct i40e_rte_flow_rss_conf));
- return 0;
- }
- return -EINVAL;
- }
-
- /* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calculate the actual PF queues that are configured.
- */
- if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
- num = i40e_pf_calc_configured_queues_num(pf);
- else
- num = pf->dev_data->nb_rx_queues;
-
- num = RTE_MIN(num, conf->conf.queue_num);
- PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
- num);
-
- if (num == 0) {
- PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
- return -ENOTSUP;
- }
-
- /* Fill in redirection table */
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
- if (j == num)
- j = 0;
- lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
- hw->func_caps.rss_table_entry_width) - 1));
- if ((i & 3) == 3)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
-
- if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
- i40e_pf_disable_rss(pf);
- return 0;
- }
- if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
- (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
- /* Random default keys */
- static uint32_t rss_key_default[] = {0x6b793944,
- 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
- 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
- 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
-
- rss_conf.rss_key = (uint8_t *)rss_key_default;
- rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
- sizeof(uint32_t);
- PMD_DRV_LOG(INFO,
- "No valid RSS key config for i40e, using default\n");
- }
-
- i40e_hw_rss_hash_set(pf, &rss_conf);
-
- if (i40e_rss_conf_init(rss_info, &conf->conf))
- return -EINVAL;
-
- return 0;
-}
-
-RTE_INIT(i40e_init_log)
-{
- i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
- if (i40e_logtype_init >= 0)
- rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
- i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
- if (i40e_logtype_driver >= 0)
- rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_RX
- i40e_logtype_rx = rte_log_register("pmd.net.i40e.rx");
- if (i40e_logtype_rx >= 0)
- rte_log_set_level(i40e_logtype_rx, RTE_LOG_DEBUG);
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
+#ifdef RTE_ETHDEV_DEBUG_RX
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
#endif
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX
- i40e_logtype_tx = rte_log_register("pmd.net.i40e.tx");
- if (i40e_logtype_tx >= 0)
- rte_log_set_level(i40e_logtype_tx, RTE_LOG_DEBUG);
-#endif
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
- i40e_logtype_tx_free = rte_log_register("pmd.net.i40e.tx_free");
- if (i40e_logtype_tx_free >= 0)
- rte_log_set_level(i40e_logtype_tx_free, RTE_LOG_DEBUG);
+#ifdef RTE_ETHDEV_DEBUG_TX
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
#endif
-}
RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
ETH_I40E_FLOATING_VEB_ARG "=1"
ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
- ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
- ETH_I40E_USE_LATEST_VEC "=0|1");
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=1");