#include <stdint.h>
#include <string.h>
#include <rte_common.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
txgbe_regs_diagnostic,
NULL};
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
/* Unlock any pending hardware semaphore */
txgbe_swfw_lock_reset(hw);
+#ifdef RTE_LIB_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (txgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
txgbe_dcb_init(hw, dcb_config);
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
+ /* initialize flow director filter list & hash */
+ txgbe_fdir_filter_init(eth_dev);
+
/* initialize l2 tunnel filter list & hash */
txgbe_l2_tn_filter_init(eth_dev);
+ /* initialize flow filter lists */
+ txgbe_filterlist_init();
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+ /* initialize Traffic Manager configuration */
+ txgbe_tm_conf_init(eth_dev);
+
return 0;
}
return 0;
}
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ struct txgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
{
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
return 0;
}
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct txgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", TDEV_NAME(eth_dev));
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_fdir_filter *) *
+ TXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
{
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
PMD_INIT_FUNC_TRACE();
txgbe_configure_port(dev);
txgbe_configure_dcb(dev);
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = txgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
txgbe_l2_tunnel_conf(dev);
txgbe_filter_restore(dev);
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
/*
* Update link status right before return, because it may
* start link configuration process in a separate thread.
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
if (hw->adapter_stopped)
return 0;
intr_handle->intr_vec = NULL;
}
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
+
adapter->rss_reta_updated = 0;
wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ txgbe_fdir_filter_uninit(dev);
+
/* remove all the L2 tunnel filters & hash */
txgbe_l2_tn_filter_uninit(dev);
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
+ /* clear all the filters list */
+ txgbe_filterlist_flush();
+
+ /* Remove all Traffic Manager configuration */
+ txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIB_SECURITY
+ rte_free(dev->security_ctx);
+#endif
+
return ret;
}
if (!mask)
continue;
- reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
for (j = 0; j < 4; j++) {
if (RS8(mask, j, 0x1)) {
reta &= ~(MS32(8 * j, 0xFF));
8 * j, 0xFF);
}
}
- wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
}
adapter->rss_reta_updated = 1;
if (!mask)
continue;
- reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
for (j = 0; j < 4; j++) {
if (RS8(mask, j, 0x1))
reta_conf[idx].reta[shift + j] =
hw = TXGBE_DEV_HW(dev);
mc_addr_list = (u8 *)mc_addr_set;
- return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
txgbe_dev_addr_list_itr, TRUE);
}
{
switch (mac_type) {
case txgbe_mac_raptor:
+ case txgbe_mac_raptor_vf:
return 1;
default:
return 0;
return ret;
}
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+ wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+ uint16_t cur_port;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, 0);
+ wr32(hw, TXGBE_VXLANPORTGPE, 0);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, 0);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
/* restore n-tuple filter */
static inline void
txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
static int
txgbe_filter_restore(struct rte_eth_dev *dev)
{
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
txgbe_syn_filter_restore(dev);
+ txgbe_fdir_filter_restore(dev);
txgbe_l2_tn_filter_restore(dev);
+ txgbe_rss_filter_restore(dev);
return 0;
}
(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
}
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)txgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ wr32(hw, TXGBE_ETFLT(i), 0);
+ wr32(hw, TXGBE_ETCLS(i), 0);
+ txgbe_flush(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+ filter_info->syn_info = 0;
+
+ wr32(hw, TXGBE_SYNCLS, 0);
+ txgbe_flush(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+ struct txgbe_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
.timesync_adjust_time = txgbe_timesync_adjust_time,
.timesync_read_time = txgbe_timesync_read_time,
.timesync_write_time = txgbe_timesync_write_time,
+ .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = txgbe_tm_ops_get,
.tx_done_cleanup = txgbe_dev_tx_done_cleanup,
};