net/txgbe: support VF interrupt
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 5f1ba8f..23f9d17 100644 (file)
@@ -7,7 +7,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <rte_common.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
 
 #include <rte_interrupts.h>
 #include <rte_log.h>
@@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = {
                                txgbe_regs_diagnostic,
                                NULL};
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
@@ -112,6 +114,7 @@ static void txgbe_dev_interrupt_delayed_handler(void *param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
 static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 
 #define TXGBE_SET_HWSTRIP(h, q) do {\
                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
@@ -544,6 +547,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* Unlock any pending hardware semaphore */
        txgbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIB_SECURITY
+       /* Initialize security_ctx only for primary process*/
+       if (txgbe_ipsec_ctx_create(eth_dev))
+               return -ENOMEM;
+#endif
+
        /* Initialize DCB configuration*/
        memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
        txgbe_dcb_init(hw, dcb_config);
@@ -689,12 +698,21 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* initialize 5tuple filter list */
        TAILQ_INIT(&filter_info->fivetuple_list);
 
+       /* initialize flow director filter list & hash */
+       txgbe_fdir_filter_init(eth_dev);
+
        /* initialize l2 tunnel filter list & hash */
        txgbe_l2_tn_filter_init(eth_dev);
 
+       /* initialize flow filter lists */
+       txgbe_filterlist_init();
+
        /* initialize bandwidth configuration info */
        memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
+       /* initialize Traffic Manager configuration */
+       txgbe_tm_conf_init(eth_dev);
+
        return 0;
 }
 
@@ -728,6 +746,26 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       struct txgbe_fdir_filter *fdir_filter;
+
+       if (fdir_info->hash_map)
+               rte_free(fdir_info->hash_map);
+       if (fdir_info->hash_handle)
+               rte_hash_free(fdir_info->hash_handle);
+
+       while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+               TAILQ_REMOVE(&fdir_info->fdir_list,
+                            fdir_filter,
+                            entries);
+               rte_free(fdir_filter);
+       }
+
+       return 0;
+}
+
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
 {
        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -748,6 +786,41 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       char fdir_hash_name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters fdir_hash_params = {
+               .name = fdir_hash_name,
+               .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+               .key_len = sizeof(struct txgbe_atr_input),
+               .hash_func = rte_hash_crc,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       TAILQ_INIT(&fdir_info->fdir_list);
+       snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+                "fdir_%s", TDEV_NAME(eth_dev));
+       fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+       if (!fdir_info->hash_handle) {
+               PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+               return -EINVAL;
+       }
+       fdir_info->hash_map = rte_zmalloc("txgbe",
+                                         sizeof(struct txgbe_fdir_filter *) *
+                                         TXGBE_MAX_FDIR_FILTER_NUM,
+                                         0);
+       if (!fdir_info->hash_map) {
+               PMD_INIT_LOG(ERR,
+                            "Failed to allocate memory for fdir hash map!");
+               return -ENOMEM;
+       }
+       fdir_info->mask_added = FALSE;
+
+       return 0;
+}
+
 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 {
        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -1481,6 +1554,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        int status;
        uint16_t vf, idx;
        uint32_t *link_speeds;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1572,6 +1646,12 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        txgbe_configure_port(dev);
        txgbe_configure_dcb(dev);
 
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               err = txgbe_fdir_configure(dev);
+               if (err)
+                       goto error;
+       }
+
        /* Restore vf rate limit */
        if (vfinfo != NULL) {
                for (vf = 0; vf < pci_dev->max_vfs; vf++)
@@ -1675,8 +1755,14 @@ skip_link_setup:
 
        /* resume enabled intr since hw reset */
        txgbe_enable_intr(dev);
+       txgbe_l2_tunnel_conf(dev);
        txgbe_filter_restore(dev);
 
+       if (tm_conf->root && !tm_conf->committed)
+               PMD_DRV_LOG(WARNING,
+                           "please call hierarchy_commit() "
+                           "before starting the port");
+
        /*
         * Update link status right before return, because it may
         * start link configuration process in a separate thread.
@@ -1709,6 +1795,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int vf;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        if (hw->adapter_stopped)
                return 0;
@@ -1761,6 +1848,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                intr_handle->intr_vec = NULL;
        }
 
+       /* reset hierarchy commit */
+       tm_conf->committed = false;
+
        adapter->rss_reta_updated = 0;
        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
 
@@ -1864,12 +1954,25 @@ txgbe_dev_close(struct rte_eth_dev *dev)
        rte_free(dev->data->hash_mac_addrs);
        dev->data->hash_mac_addrs = NULL;
 
+       /* remove all the fdir filters & hash */
+       txgbe_fdir_filter_uninit(dev);
+
        /* remove all the L2 tunnel filters & hash */
        txgbe_l2_tn_filter_uninit(dev);
 
        /* Remove all ntuple filters of the device */
        txgbe_ntuple_filter_uninit(dev);
 
+       /* clear all the filters list */
+       txgbe_filterlist_flush();
+
+       /* Remove all Traffic Manager configuration */
+       txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIB_SECURITY
+       rte_free(dev->security_ctx);
+#endif
+
        return ret;
 }
 
@@ -4517,6 +4620,368 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+                           uint16_t ether_type)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_EXTAG);
+       etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+       etag_etype |= ether_type;
+       wr32(hw, TXGBE_EXTAG, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_PORTCTL);
+       etag_etype |= TXGBE_PORTCTL_ETAG;
+       wr32(hw, TXGBE_PORTCTL, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf  *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               rar_low  = rd32(hw, TXGBE_ETHADDRL);
+               if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+                   (rar_high & TXGBE_ETHADDRH_ETAG) &&
+                   (TXGBE_ETHADDRL_ETAG(rar_low) ==
+                    l2_tunnel->tunnel_id)) {
+                       wr32(hw, TXGBE_ETHADDRL, 0);
+                       wr32(hw, TXGBE_ETHADDRH, 0);
+
+                       txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       /* One entry for one tunnel. Try to remove potential existing entry. */
+       txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               if (rar_high & TXGBE_ETHADDRH_VLD) {
+                       continue;
+               } else {
+                       txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+                       rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+                       rar_low = l2_tunnel->tunnel_id;
+
+                       wr32(hw, TXGBE_ETHADDRL, rar_low);
+                       wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+                       return ret;
+               }
+       }
+
+       PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+                    " Please remove a rule before adding a new one.");
+       return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+
+       ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+       if (ret < 0)
+               return NULL;
+
+       return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+       int ret;
+
+       ret = rte_hash_add_key(l2_tn_info->hash_handle,
+                              &l2_tn_filter->key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "Failed to insert L2 tunnel filter"
+                           " to hash table %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+       TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+       return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+
+       ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "No such L2 tunnel filter to delete %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_filter = l2_tn_info->hash_map[ret];
+       l2_tn_info->hash_map[ret] = NULL;
+
+       TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+       rte_free(l2_tn_filter);
+
+       return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel,
+                              bool restore)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+       struct txgbe_l2_tn_filter *node;
+
+       if (!restore) {
+               key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+               key.tn_id = l2_tunnel->tunnel_id;
+
+               node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+               if (node) {
+                       PMD_DRV_LOG(ERR,
+                                   "The L2 tunnel filter already exists!");
+                       return -EINVAL;
+               }
+
+               node = rte_zmalloc("txgbe_l2_tn",
+                                  sizeof(struct txgbe_l2_tn_filter),
+                                  0);
+               if (!node)
+                       return -ENOMEM;
+
+               rte_memcpy(&node->key,
+                                &key,
+                                sizeof(struct txgbe_l2_tn_key));
+               node->pool = l2_tunnel->pool;
+               ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
+               if (ret < 0) {
+                       rte_free(node);
+                       return ret;
+               }
+       }
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       if (!restore && ret < 0)
+               (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+       return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+
+       key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+       key.tn_id = l2_tunnel->tunnel_id;
+       ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+       if (ret < 0)
+               return ret;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+       int ret = 0;
+       uint32_t ctrl;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       ctrl = rd32(hw, TXGBE_POOLCTL);
+       ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+       if (en)
+               ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+       wr32(hw, TXGBE_POOLCTL, ctrl);
+
+       return ret;
+}
+
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_TEREDO:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+       uint16_t cur_port;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, 0);
+               wr32(hw, TXGBE_VXLANPORTGPE, 0);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, 0);
+               break;
+       case RTE_TUNNEL_TYPE_TEREDO:
+               cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
 /* restore n-tuple filter */
 static inline void
 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -4564,12 +5029,124 @@ txgbe_syn_filter_restore(struct rte_eth_dev *dev)
        }
 }
 
+/* restore L2 tunnel filter */
+static inline void
+txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *node;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+
+       TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+               l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = node->key.tn_id;
+               l2_tn_conf.pool           = node->pool;
+               (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+       }
+}
+
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->rss_info.conf.queue_num)
+               txgbe_config_rss_filter(dev,
+                       &filter_info->rss_info, TRUE);
+}
+
 static int
 txgbe_filter_restore(struct rte_eth_dev *dev)
 {
        txgbe_ntuple_filter_restore(dev);
        txgbe_ethertype_filter_restore(dev);
        txgbe_syn_filter_restore(dev);
+       txgbe_fdir_filter_restore(dev);
+       txgbe_l2_tn_filter_restore(dev);
+       txgbe_rss_filter_restore(dev);
+
+       return 0;
+}
+
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       if (l2_tn_info->e_tag_en)
+               (void)txgbe_e_tag_enable(hw);
+
+       if (l2_tn_info->e_tag_fwd_en)
+               (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+       (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter *p_5tuple;
+
+       while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+               txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i;
+
+       for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+               if (filter_info->ethertype_mask & (1 << i) &&
+                   !filter_info->ethertype_filters[i].conf) {
+                       (void)txgbe_ethertype_filter_remove(filter_info,
+                                                           (uint8_t)i);
+                       wr32(hw, TXGBE_ETFLT(i), 0);
+                       wr32(hw, TXGBE_ETCLS(i), 0);
+                       txgbe_flush(hw);
+               }
+       }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+               filter_info->syn_info = 0;
+
+               wr32(hw, TXGBE_SYNCLS, 0);
+               txgbe_flush(hw);
+       }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+       int ret = 0;
+
+       while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+               l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
+               l2_tn_conf.pool           = l2_tn_filter->pool;
+               ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+               if (ret < 0)
+                       return ret;
+       }
 
        return 0;
 }
@@ -4646,6 +5223,9 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .timesync_adjust_time       = txgbe_timesync_adjust_time,
        .timesync_read_time         = txgbe_timesync_read_time,
        .timesync_write_time        = txgbe_timesync_write_time,
+       .udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
+       .udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
+       .tm_ops_get                 = txgbe_tm_ops_get,
        .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
 };