/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_hash_crc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
+static int is_ixgbe_pmd(const char *driver_name);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
-static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
-static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
-static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
- .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
- .set_vf_rx = ixgbe_set_pool_rx,
- .set_vf_tx = ixgbe_set_pool_tx,
- .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
- .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#ifdef RTE_NIC_BYPASS
/* enable support intr */
ixgbe_enable_intr(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct ixgbe_filter_info));
+
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ /* initialize flow director filter list & hash */
+ ixgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ ixgbe_l2_tn_filter_init(eth_dev);
+
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
return 0;
}
rte_free(eth_dev->data->hash_mac_addrs);
eth_dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ ixgbe_fdir_filter_uninit(eth_dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ ixgbe_l2_tn_filter_uninit(eth_dev);
+
+ /* Remove all ntuple filters of the device */
+ ixgbe_ntuple_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ ixgbe_filterlist_flush();
+
+ return 0;
+}
+
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = IXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(union ixgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", eth_dev->data->name);
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_fdir_filter *) *
+ IXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
return 0;
}
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct ixgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", eth_dev->data->name);
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_l2_tn_filter *) *
+ IXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
* After reset API version is always set to the basic one (ixgbe_mbox_api_10).
for (vf = 0; vf < pci_dev->max_vfs; vf++)
for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
+ rte_pmd_ixgbe_set_vf_rate_limit(
+ dev->data->port_id, vf,
vfinfo[vf].tx_rate[idx],
1 << idx);
}
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
+ ixgbe_l2_tunnel_conf(dev);
+ ixgbe_filter_restore(dev);
return 0;
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct ixgbe_filter_info *filter_info =
- IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
memset(&link, 0, sizeof(link));
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
uint32_t delta_gprc = 0;
unsigned i;
/* Workaround for RX byte count not including CRC bytes when CRC
-+ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * strip is enabled. CRC bytes are removed from counters when crc_strip
* is disabled.
-+ */
+ */
int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
IXGBE_HLREG0_RXCRCSTRP);
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
struct rte_eth_link link;
- int intr_enable_delay = false;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
-
- intr_enable_delay = true;
- }
-
- if (intr_enable_delay) {
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- } else {
- PMD_DRV_LOG(DEBUG, "enable intr immediately");
- ixgbe_enable_intr(dev);
- rte_intr_enable(intr_handle);
+ else
+ intr->mask = intr->mask_original;
}
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
return 0;
}
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t eicr;
+ ixgbe_disable_intr(hw);
+
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_pf_mbx_process(dev);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
+ /* restore original mask */
+ intr->mask = intr->mask_original;
+ intr->mask_original = 0;
+
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
rte_intr_enable(intr_handle);
ixgbe_add_rar(dev, addr, 0, 0);
}
+static int
+is_ixgbe_pmd(const char *driver_name)
+{
+ if (!strstr(driver_name, "ixgbe"))
+ return -ENOTSUP;
+
+ if (strstr(driver_name, "ixgbe_vf"))
+ return -ENOTSUP;
+
+ return 0;
+}
+
int
rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
struct ether_addr *mac_addr)
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
if (vf >= dev_info.max_vfs)
return -EINVAL;
}
static int
-ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+ixgbe_vt_check(struct ixgbe_hw *hw)
{
uint32_t reg_val;
- /* we only need to do this if VMDq is enabled */
+ /* if Virtualization Technology is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
return -1;
}
return new_val;
}
-static int
-ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on)
-{
- int val = 0;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer");
- return -ENOTSUP;
- }
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
- if (on)
- vmolr |= val;
- else
- vmolr &= ~val;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFRE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFRE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFTE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFTE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on)
-{
- int ret = 0;
- uint16_t pool_idx;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
- for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
- if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
- vlan_on, false);
- if (ret < 0)
- return ret;
- }
- }
-
- return ret;
-}
int
rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
if (vf >= dev_info.max_vfs)
return -EINVAL;
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
if (vf >= dev_info.max_vfs)
return -EINVAL;
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
if (vf >= dev_info.max_vfs)
return -EINVAL;
- if (vlan_id > 4095)
+ if (vlan_id > ETHER_MAX_VLAN_ID)
return -EINVAL;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw *hw;
uint32_t ctrl;
struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
if (on > 1)
return -EINVAL;
int i;
int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
if (on > 1)
return -EINVAL;
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
/* only support VF's 0 to 63 */
if ((vf >= dev_info.max_vfs) || (vf > 63))
return -EINVAL;
dev = &rte_eth_devices[port];
rte_eth_dev_info_get(port, &dev_info);
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
if (vf >= dev_info.max_vfs)
return -EINVAL;
return 0;
}
-#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
-#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
-#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
-#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
-#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
- ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
- ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
-
-static int
-ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on)
{
- uint32_t mr_ctl, vlvf;
- uint32_t mp_lsb = 0;
- uint32_t mv_msb = 0;
- uint32_t mv_lsb = 0;
- uint32_t mp_msb = 0;
- uint8_t i = 0;
- int reg_index = 0;
- uint64_t vlan_mask = 0;
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct ixgbe_hw *hw;
+ uint32_t vmolr;
- const uint8_t pool_mask_offset = 32;
- const uint8_t vlan_mask_offset = 32;
- const uint8_t dst_pool_offset = 8;
- const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask = 0x0F;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
- struct ixgbe_mirror_info *mr_info =
- (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint8_t mirror_type = 0;
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
return -ENOTSUP;
- if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ if (vf >= dev_info.max_vfs)
return -EINVAL;
- if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
- PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return -ENOTSUP;
+ }
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
+ if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+ rte_eth_link_get_nowait(port, &link);
+
+ if (is_ixgbe_pmd(dev_info.driver_name) != 0)
+ return -ENOTSUP;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
+#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+ ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+ ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl, vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask = 0x0F;
+
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t mirror_type = 0;
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
+ if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+ PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
mirror_conf->rule_type);
return -EINVAL;
}
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
memset(&mr_info->mr_conf[rule_id], 0,
return 0;
}
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
-{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vf_info *vfinfo =
- *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
- uint32_t queue_stride =
- IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
- uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
- uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
- uint16_t total_rate = 0;
-
- if (queue_end >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (vfinfo != NULL) {
- for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
- if (vf_idx == vf)
- continue;
- for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
- idx++)
- total_rate += vfinfo[vf_idx].tx_rate[idx];
- }
- } else
- return -EINVAL;
-
- /* Store tx_rate for this vf. */
- for (idx = 0; idx < nb_q_per_pool; idx++) {
- if (((uint64_t)0x1 << idx) & q_msk) {
- if (vfinfo[vf].tx_rate[idx] != tx_rate)
- vfinfo[vf].tx_rate[idx] = tx_rate;
- total_rate += tx_rate;
- }
- }
-
- if (total_rate > dev->data->dev_link.link_speed) {
- /*
- * Reset stored TX rate of the VF if it causes exceed
- * link speed.
- */
- memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
- return -EINVAL;
- }
-
- /* Set RTTBCNRC of each queue/pool for vf X */
- for (; queue_idx <= queue_end; queue_idx++) {
- if (0x1 & q_msk)
- ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
- q_msk = q_msk >> 1;
- }
-
- return 0;
-}
-
static void
ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
__attribute__((unused)) uint32_t index,
return -ENOTSUP;\
} while (0)
-static int
+int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
uint32_t synqf;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ syn_info = filter_info->syn_info;
if (add) {
- if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
return -EINVAL;
synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
else
synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
} else {
- if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
return -ENOENT;
synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
}
+
+ filter_info->syn_info = synqf;
IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
-static inline enum ixgbe_5tuple_protocol
-convert_protocol_type(uint8_t protocol_value)
-{
- if (protocol_value == IPPROTO_TCP)
- return IXGBE_FILTER_PROTOCOL_TCP;
- else if (protocol_value == IPPROTO_UDP)
- return IXGBE_FILTER_PROTOCOL_UDP;
- else if (protocol_value == IPPROTO_SCTP)
- return IXGBE_FILTER_PROTOCOL_SCTP;
- else
- return IXGBE_FILTER_PROTOCOL_NONE;
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
}
/*
ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
int i, idx, shift;
- uint32_t ftqf, sdpqf;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
/*
* look for an unused 5tuple filter index,
return -ENOSYS;
}
- sdpqf = (uint32_t)(filter->filter_info.dst_port <<
- IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
- ftqf = (uint32_t)(filter->filter_info.proto &
- IXGBE_FTQF_PROTOCOL_MASK);
- ftqf |= (uint32_t)((filter->filter_info.priority &
- IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
- if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
- mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
- if (filter->filter_info.dst_ip_mask == 0)
- mask &= IXGBE_FTQF_DEST_ADDR_MASK;
- if (filter->filter_info.src_port_mask == 0)
- mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
- if (filter->filter_info.dst_port_mask == 0)
- mask &= IXGBE_FTQF_DEST_PORT_MASK;
- if (filter->filter_info.proto_mask == 0)
- mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
- ftqf |= IXGBE_FTQF_POOL_MASK_EN;
- ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+ ixgbe_inject_5tuple_filter(dev, filter);
- l34timir |= IXGBE_L34T_IMIR_RESERVE;
- l34timir |= (uint32_t)(filter->queue <<
- IXGBE_L34T_IMIR_QUEUE_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
return 0;
}
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
return ret;
}
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
- (filter_info->ethertype_mask & (1 << i)))
- return i;
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
- return i;
- }
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
- uint8_t idx)
-{
- if (idx >= IXGBE_MAX_ETQF_FILTERS)
- return -1;
- filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
- return idx;
-}
-
-static int
+int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
uint32_t etqf = 0;
uint32_t etqs = 0;
int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
}
if (add) {
- ret = ixgbe_ethertype_filter_insert(filter_info,
- filter->ether_type);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "ethertype filters are full.");
- return -ENOSYS;
- }
etqf = IXGBE_ETQF_FILTER_EN;
etqf |= (uint32_t)filter->ether_type;
etqs |= (uint32_t)((filter->queue <<
IXGBE_ETQS_RX_QUEUE_SHIFT) &
IXGBE_ETQS_RX_QUEUE);
etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
} else {
ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
case RTE_ETH_FILTER_L2_TUNNEL:
ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
break;
}
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
if (l2_tunnel == NULL)
return -EINVAL;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
break;
default:
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
ret = ixgbe_e_tag_enable(hw);
break;
default:
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
ret = ixgbe_e_tag_disable(hw);
break;
default:
return -EINVAL;
}
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
/* Add l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ (void)rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
break;
}
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
return ret;
}
/* Delete l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
enum rte_filter_op filter_op,
void *arg)
{
- int ret = 0;
+ int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
case RTE_ETH_FILTER_ADD:
ret = ixgbe_dev_l2_tunnel_filter_add
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg);
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_dev_l2_tunnel_filter_del
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
break;
default:
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
break;
default:
return 0;
}
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ ixgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+ filter_info->ethertype_filters[i].etqs);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ ixgbe_ntuple_filter_restore(dev);
+ ixgbe_ethertype_filter_restore(dev);
+ ixgbe_syn_filter_restore(dev);
+ ixgbe_fdir_filter_restore(dev);
+ ixgbe_l2_tn_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tn_info->e_tag_en)
+ (void)ixgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)ixgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");