Add support to flush all the filters.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
return status;
}
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ usec_delay(10);
+ }
+
+ return TXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ u32 fdircmd;
+ fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+
+ DEBUGFUNC("txgbe_reinit_fdir_tables");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRPICMD.OP must be zero.
+ */
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ wr32(hw, TXGBE_FDIRFREE, 0);
+ txgbe_flush(hw);
+ /*
+ * adapters flow director init flow cannot be restarted,
+ * Workaround silicon errata by performing the following steps
+ * before re-writing the FDIRCTL control register with the same value.
+ * - write 1 to bit 8 of FDIRPICMD register &
+ * - write 0 to bit 8 of FDIRPICMD register
+ */
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR);
+ txgbe_flush(hw);
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0);
+ txgbe_flush(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ wr32(hw, TXGBE_FDIRPIHASH, 0x00);
+ txgbe_flush(hw);
+
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+
+ /* Poll init-done after we write FDIRCTL register */
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE))
+ break;
+ msec_delay(1);
+ }
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return TXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRFAIL);
+ rd32(hw, TXGBE_FDIRMATCH);
+ rd32(hw, TXGBE_FDIRMISS);
+ rd32(hw, TXGBE_FDIRLEN);
+
+ return 0;
+}
+
/**
* txgbe_start_hw_raptor - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value);
s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value);
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
#endif /* _TXGBE_HW_H_ */
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
+ /* clear all the filters list */
+ txgbe_filterlist_flush();
+
return ret;
}
(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
}
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)txgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ wr32(hw, TXGBE_ETFLT(i), 0);
+ wr32(hw, TXGBE_ETCLS(i), 0);
+ txgbe_flush(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+ filter_info->syn_info = 0;
+
+ wr32(hw, TXGBE_SYNCLS, 0);
+ txgbe_flush(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+ struct txgbe_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct txgbe_l2_tunnel_conf *l2_tunnel);
void txgbe_filterlist_init(void);
+void txgbe_filterlist_flush(void);
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
extern const struct rte_flow_ops txgbe_flow_ops;
+void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
return err;
}
+static int
+txgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ int ret;
+
+ ret = txgbe_reinit_fdir_tables(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
/* restore flow director filter */
void
txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
}
}
+/* remove all the flow director filters */
+int
+txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *fdir_filter;
+ struct txgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = txgbe_fdir_flush(dev);
+
+ return ret;
+}
return 0;
}
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
txgbe_filterlist_init(void)
{
TAILQ_INIT(&txgbe_flow_list);
}
+void
+txgbe_filterlist_flush(void)
+{
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
+ while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr,
+ entries);
+ rte_free(txgbe_flow_mem_ptr->flow);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+}
+
/**
* Create or destroy a flow rule.
* Theorically one rule can match more than one filters.
{
int ret = 0;
- return ret;
+ txgbe_clear_all_ntuple_filter(dev);
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
+ ret = txgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = txgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ txgbe_clear_rss_filter(dev);
+
+ txgbe_filterlist_flush();
+
+ return 0;
}
const struct rte_flow_ops txgbe_flow_ops = {