Flow API
--------
-Supports the DPDK Flow API for generic filtering.
+Supports flow API family.
-* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_GENERIC``.
+* **[implements] eth_dev_ops**: ``flow_ops_get``.
* **[implements] rte_flow_ops**: ``All``.
queues, to virtual/physical device functions or ports, performing tunnel
offloads, adding marks and so on.
-It is slightly higher-level than the legacy filtering framework which it
-encompasses and supersedes (including all functions and filter types) in
-order to expose a single interface with an unambiguous behavior that is
-common to all poll-mode drivers (PMDs).
-
Flow rule
---------
- Configuring MAC addresses.
- Configuring multicast addresses.
- Configuring VLAN filters.
-- Configuring Rx filters through the legacy API (e.g. FDIR).
- Configuring global RSS settings.
.. code-block:: c
API/ABI versioning constraints as it is not exposed to applications and may
evolve independently.
-It is currently implemented on top of the legacy filtering framework through
-filter type *RTE_ETH_FILTER_GENERIC* that accepts the single operation
-*RTE_ETH_FILTER_GET* to return PMD-specific *rte_flow* callbacks wrapped
-inside ``struct rte_flow_ops``.
-
-This overhead is temporarily necessary in order to keep compatibility with
-the legacy filtering framework, which should eventually disappear.
+The PMD interface is based on callbacks pointed by the ``struct rte_flow_ops``.
- PMD callbacks implement exactly the interface described in `Rules
management`_, except for the port ID argument which has already been
int bnxt_flow_stats_req(struct bnxt *bp);
int bnxt_flow_stats_cnt(struct bnxt *bp);
uint32_t bnxt_get_speed_capabilities(struct bnxt *bp);
+int bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
-int
-bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
#endif
}
int
-bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
bp = vfr->parent_dev->data->dev_private;
/* parent is deleted while children are still valid */
if (!bp) {
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
- dev->data->port_id,
- filter_type,
- filter_op);
+ PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n",
+ dev->data->port_id);
return -EIO;
}
}
if (ret)
return ret;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
+ /* PMD supports thread-safe flow operations. rte_flow API
+ * functions can avoid mutex for multi-thread safety.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
- /* PMD supports thread-safe flow operations. rte_flow API
- * functions can avoid mutex for multi-thread safety.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+ if (BNXT_TRUFLOW_EN(bp))
+ *ops = &bnxt_ulp_rte_flow_ops;
+ else
+ *ops = &bnxt_flow_ops;
- if (BNXT_TRUFLOW_EN(bp))
- *(const void **)arg = &bnxt_ulp_rte_flow_ops;
- else
- *(const void **)arg = &bnxt_flow_ops;
- break;
- default:
- PMD_DRV_LOG(ERR,
- "Filter type (%d) not supported", filter_type);
- ret = -EINVAL;
- break;
- }
return ret;
}
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
.tx_queue_stop = bnxt_tx_queue_stop,
- .filter_ctrl = bnxt_filter_ctrl_op,
+ .flow_ops_get = bnxt_flow_ops_get_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,
.dev_stop = bnxt_rep_dev_stop_op,
.stats_get = bnxt_rep_stats_get_op,
.stats_reset = bnxt_rep_stats_reset_op,
- .filter_ctrl = bnxt_filter_ctrl_op
+ .flow_ops_get = bnxt_flow_ops_get_op
};
uint16_t
}
static int
-bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type type, enum rte_filter_op op, void *arg)
+bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &bond_flow_ops;
- return 0;
- }
- return -ENOTSUP;
+ *ops = &bond_flow_ops;
+ return 0;
}
static int
.mac_addr_set = bond_ethdev_mac_address_set,
.mac_addr_add = bond_ethdev_mac_addr_add,
.mac_addr_remove = bond_ethdev_mac_addr_remove,
- .filter_ctrl = bond_filter_ctrl
+ .flow_ops_get = bond_flow_ops_get
};
static int
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
- .filter_ctrl = cxgbe_dev_filter_ctrl,
+ .flow_ops_get = cxgbe_dev_flow_ops_get,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
};
int
-cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
- RTE_SET_USED(dev);
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &cxgbe_flow_ops;
- break;
- default:
- ret = -ENOTSUP;
- break;
- }
- return ret;
+ *ops = &cxgbe_flow_ops;
+ return 0;
}
struct rte_eth_dev *dev;
};
-int
-cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+int cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
#endif /* _CXGBE_FLOW_H_ */
{"cgr_reject_bytes", 4, 1},
};
-static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
- RTE_ETH_FILTER_GET
-};
-
static struct rte_dpaa2_driver rte_dpaa2_pmd;
static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
return ret;
}
-static inline int
-dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
-{
- unsigned int i;
-
- for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
- if (dpaa2_supported_filter_ops[i] == filter_op)
- return 0;
- }
- return -ENOTSUP;
-}
-
static int
-dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (!dev)
return -ENODEV;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
- ret = -ENOTSUP;
- break;
- }
- *(const void **)arg = &dpaa2_flow_ops;
- dpaa2_filter_type |= filter_type;
- break;
- default:
- RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
- filter_type);
- ret = -ENOTSUP;
- break;
- }
- return ret;
+ *ops = &dpaa2_flow_ops;
+ return 0;
}
static void
.mac_addr_set = dpaa2_dev_set_mac_addr,
.rss_hash_update = dpaa2_dev_rss_hash_update,
.rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
- .filter_ctrl = dpaa2_dev_flow_ctrl,
+ .flow_ops_get = dpaa2_dev_flow_ops_get,
.rxq_info_get = dpaa2_rxq_info_get,
.txq_info_get = dpaa2_txq_info_get,
.tm_ops_get = dpaa2_tm_ops_get,
/*Externaly defined*/
extern const struct rte_flow_ops dpaa2_flow_ops;
-extern enum rte_filter_type dpaa2_filter_type;
extern const struct rte_tm_ops dpaa2_tm_ops;
/* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
#define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
-enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
-
#ifndef __cplusplus
static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
flow->ipaddr_rule.fs_ipdst_offset =
IP_ADDRESS_OFFSET_INVALID;
- switch (dpaa2_filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
- actions, error);
- if (ret < 0) {
- if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
- rte_flow_error_set(error, EPERM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- attr, "unknown");
- DPAA2_PMD_ERR(
- "Failure to create flow, return code (%d)", ret);
- goto creation_error;
- }
- break;
- default:
- DPAA2_PMD_ERR("Filter type (%d) not supported",
- dpaa2_filter_type);
- break;
+ ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
+ actions, error);
+ if (ret < 0) {
+ if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
+ rte_flow_error_set(error, EPERM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ attr, "unknown");
+ DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
+ goto creation_error;
}
return flow;
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int eth_igb_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
static int eth_igb_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
.reta_query = eth_igb_rss_reta_query,
.rss_hash_update = eth_igb_rss_hash_update,
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
- .filter_ctrl = eth_igb_filter_ctrl,
+ .flow_ops_get = eth_igb_flow_ops_get,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
.rxq_info_get = igb_rxq_info_get,
.txq_info_get = igb_txq_info_get,
}
static int
-eth_igb_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &igb_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- break;
- }
-
- return ret;
+ *ops = &igb_flow_ops;
+ return 0;
}
static int
RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
static int
-enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
struct enic *enic = pmd_priv(dev);
- int ret = 0;
ENICPMD_FUNC_TRACE();
*/
if (enic->geneve_opt_enabled)
return -ENOTSUP;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- if (enic->flow_filter_mode == FILTER_FLOWMAN)
- *(const void **)arg = &enic_fm_flow_ops;
- else
- *(const void **)arg = &enic_flow_ops;
- break;
- default:
- dev_warning(enic, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
- return ret;
+ if (enic->flow_filter_mode == FILTER_FLOWMAN)
+ *ops = &enic_fm_flow_ops;
+ else
+ *ops = &enic_flow_ops;
+ return 0;
}
static void enicpmd_dev_tx_queue_release(void *txq)
.mac_addr_remove = enicpmd_remove_mac_addr,
.mac_addr_set = enicpmd_set_mac_addr,
.set_mc_addr_list = enicpmd_set_mc_addr_list,
- .filter_ctrl = enicpmd_dev_filter_ctrl,
+ .flow_ops_get = enicpmd_dev_flow_ops_get,
.reta_query = enicpmd_dev_rss_reta_query,
.reta_update = enicpmd_dev_rss_reta_update,
.rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
};
static int
-enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_ops **ops)
{
struct enic_vf_representor *vf;
- int ret = 0;
ENICPMD_FUNC_TRACE();
vf = eth_dev->data->dev_private;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
- *(const void **)arg = &enic_vf_flow_ops;
- } else {
- ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
- ret = -EINVAL;
- }
- break;
- default:
- ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
+ if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) {
+ ENICPMD_LOG(WARNING,
+ "VF representors require flowman support for rte_flow API");
+ return -EINVAL;
}
- return ret;
+
+ *ops = &enic_vf_flow_ops;
+ return 0;
}
static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
.dev_start = enic_vf_dev_start,
.dev_stop = enic_vf_dev_stop,
.dev_close = enic_vf_dev_close,
- .filter_ctrl = enic_vf_filter_ctrl,
+ .flow_ops_get = enic_vf_flow_ops_get,
.link_update = enic_vf_link_update,
.promiscuous_enable = enic_vf_promiscuous_enable,
.promiscuous_disable = enic_vf_promiscuous_disable,
}
static int
-fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type type,
- enum rte_filter_op op,
- void *arg)
+fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- if (type == RTE_ETH_FILTER_GENERIC &&
- op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &fs_flow_ops;
- return 0;
- }
- return -ENOTSUP;
+ *ops = &fs_flow_ops;
+ return 0;
}
const struct eth_dev_ops failsafe_ops = {
.mac_addr_set = fs_mac_addr_set,
.set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
- .filter_ctrl = fs_filter_ctrl,
+ .flow_ops_get = fs_flow_ops_get,
};
}
/**
- * DPDK callback to manage filter control operations
+ * DPDK callback to get flow operations
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type, which just supports generic type.
- * @param filter_op
- * Filter operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative error value otherwise.
*/
-static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- int func_id = hinic_global_func_id(nic_dev->hwdev);
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &hinic_flow_ops;
- break;
- default:
- PMD_DRV_LOG(INFO, "Filter type (%d) not supported",
- filter_type);
- return -EINVAL;
- }
-
- PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,"
- "filter_op: 0x%x.", func_id, filter_type, filter_op);
+ *ops = &hinic_flow_ops;
return 0;
}
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
.set_mc_addr_list = hinic_set_mc_addr_list,
- .filter_ctrl = hinic_dev_filter_ctrl,
+ .flow_ops_get = hinic_dev_flow_ops_get,
};
static const struct eth_dev_ops hinic_pmd_vf_ops = {
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
.set_mc_addr_list = hinic_set_mc_addr_list,
- .filter_ctrl = hinic_dev_filter_ctrl,
+ .flow_ops_get = hinic_dev_flow_ops_get,
};
static int hinic_func_init(struct rte_eth_dev *eth_dev)
.rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
.reta_update = hns3_dev_rss_reta_update,
.reta_query = hns3_dev_rss_reta_query,
- .filter_ctrl = hns3_dev_filter_ctrl,
+ .flow_ops_get = hns3_dev_flow_ops_get,
.vlan_filter_set = hns3_vlan_filter_set,
.vlan_tpid_set = hns3_vlan_tpid_set,
.vlan_offload_set = hns3_vlan_offload_set,
}
int hns3_buffer_alloc(struct hns3_hw *hw);
-int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
+int hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
bool hns3_is_reset_pending(struct hns3_adapter *hns);
bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
void hns3_update_link_status_and_event(struct hns3_hw *hw);
.rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
.reta_update = hns3_dev_rss_reta_update,
.reta_query = hns3_dev_rss_reta_query,
- .filter_ctrl = hns3_dev_filter_ctrl,
+ .flow_ops_get = hns3_dev_flow_ops_get,
.vlan_filter_set = hns3vf_vlan_filter_set,
.vlan_offload_set = hns3vf_vlan_offload_set,
.get_reg = hns3_get_regs,
.isolate = NULL,
};
-/*
- * The entry of flow API.
- * @param dev
- * Pointer to Ethernet device.
- * @return
- * 0 on success, a negative errno value otherwise is set.
- */
int
-hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
struct hns3_hw *hw;
- int ret = 0;
hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- if (hw->adapter_state >= HNS3_NIC_CLOSED)
- return -ENODEV;
- *(const void **)arg = &hns3_flow_ops;
- break;
- default:
- hns3_err(hw, "Filter type (%d) not supported", filter_type);
- ret = -EOPNOTSUPP;
- break;
- }
+ if (hw->adapter_state >= HNS3_NIC_CLOSED)
+ return -ENODEV;
- return ret;
+ *ops = &hns3_flow_ops;
+ return 0;
}
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
.rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
.udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
- .filter_ctrl = i40e_dev_filter_ctrl,
+ .flow_ops_get = i40e_dev_flow_ops_get,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.rx_burst_mode_get = i40e_rx_burst_mode_get,
}
static int
-i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (dev == NULL)
return -EINVAL;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &i40e_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &i40e_flow_ops;
+ return 0;
}
/*
uint16_t queue_id);
static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
-static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num);
.mtu_set = iavf_dev_mtu_set,
.rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
- .filter_ctrl = iavf_dev_filter_ctrl,
+ .flow_ops_get = iavf_dev_flow_ops_get,
.tx_done_cleanup = iavf_dev_tx_done_cleanup,
};
}
static int
-iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (!dev)
return -EINVAL;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &iavf_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &iavf_flow_ops;
+ return 0;
}
static void
}
static int
-ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (!dev)
return -EINVAL;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &ice_flow_ops;
- break;
-
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &ice_flow_ops;
+ return 0;
}
#define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
.promiscuous_disable = ice_dcf_dev_promiscuous_disable,
.allmulticast_enable = ice_dcf_dev_allmulticast_enable,
.allmulticast_disable = ice_dcf_dev_allmulticast_disable,
- .filter_ctrl = ice_dcf_dev_filter_ctrl,
+ .flow_ops_get = ice_dcf_dev_flow_ops_get,
.udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
};
static int ice_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned int limit);
-static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
.xstats_get = ice_xstats_get,
.xstats_get_names = ice_xstats_get_names,
.xstats_reset = ice_stats_reset,
- .filter_ctrl = ice_dev_filter_ctrl,
+ .flow_ops_get = ice_dev_flow_ops_get,
.udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
.tx_done_cleanup = ice_tx_done_cleanup,
}
static int
-ice_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+ice_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
if (!dev)
return -EINVAL;
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &ice_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &ice_flow_ops;
+ return 0;
}
/* Add UDP tunneling port */
.vlan_offload_set = eth_igc_vlan_offload_set,
.vlan_tpid_set = eth_igc_vlan_tpid_set,
.vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
- .filter_ctrl = eth_igc_filter_ctrl,
+ .flow_ops_get = eth_igc_flow_ops_get,
};
/*
}
int
-eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+eth_igc_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
- RTE_SET_USED(dev);
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &igc_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- }
-
- return ret;
+ *ops = &igc_flow_ops;
+ return 0;
}
const struct igc_syn_filter *filter);
void igc_clear_syn_filter(struct rte_eth_dev *dev);
void igc_clear_all_filter(struct rte_eth_dev *dev);
-int
-eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
+int eth_igc_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
#ifdef __cplusplus
}
}
static int
-ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
- enum rte_filter_type filter_type, enum rte_filter_op filter_op,
- void *arg)
+ipn3ke_afu_flow_ops_get(struct rte_eth_dev *ethdev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
struct ipn3ke_hw *hw;
struct ipn3ke_rpst *rpst;
rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
if (hw->acc_flow)
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &ipn3ke_flow_ops;
- break;
- default:
- IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
+ *ops = &ipn3ke_flow_ops;
else if (rpst->i40e_pf_eth)
- (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
- filter_type,
- filter_op,
- arg);
+ (*rpst->i40e_pf_eth->dev_ops->flow_ops_get)(ethdev, ops);
else
return -EINVAL;
- return ret;
+ return 0;
}
static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
.stats_reset = ipn3ke_rpst_stats_reset,
.xstats_reset = ipn3ke_rpst_stats_reset,
- .filter_ctrl = ipn3ke_afu_filter_ctrl,
+ .flow_ops_get = ipn3ke_afu_flow_ops_get,
.rx_queue_start = ipn3ke_rpst_rx_queue_start,
.rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
-static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
.reta_query = ixgbe_dev_rss_reta_query,
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
- .filter_ctrl = ixgbe_dev_filter_ctrl,
+ .flow_ops_get = ixgbe_dev_flow_ops_get,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
}
static int
-ixgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &ixgbe_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &ixgbe_flow_ops;
+ return 0;
}
static u8 *
.flow_ctrl_get = mlx4_flow_ctrl_get,
.flow_ctrl_set = mlx4_flow_ctrl_set,
.mtu_set = mlx4_mtu_set,
- .filter_ctrl = mlx4_filter_ctrl,
+ .flow_ops_get = mlx4_flow_ops_get,
.rx_queue_intr_enable = mlx4_rx_intr_enable,
.rx_queue_intr_disable = mlx4_rx_intr_disable,
.is_removed = mlx4_is_removed,
};
/**
- * Manage filter operations.
+ * Get rte_flow callbacks.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
+ * @return 0
*/
int
-mlx4_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+mlx4_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- break;
- *(const void **)arg = &mlx4_flow_ops;
- return 0;
- default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
- break;
- }
- rte_errno = ENOTSUP;
- return -rte_errno;
+ *ops = &mlx4_flow_ops;
+ return 0;
}
int verbs_to_dpdk);
int mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct mlx4_priv *priv);
-int mlx4_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+int mlx4_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
#endif /* RTE_PMD_MLX4_FLOW_H_ */
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
- .filter_ctrl = mlx5_dev_filter_ctrl,
+ .flow_ops_get = mlx5_flow_ops_get,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
- .filter_ctrl = mlx5_dev_filter_ctrl,
+ .flow_ops_get = mlx5_flow_ops_get,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
struct rte_flow_error *error);
int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
struct rte_flow_error *error);
-int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+int mlx5_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
int mlx5_flow_start_default(struct rte_eth_dev *dev);
void mlx5_flow_stop_default(struct rte_eth_dev *dev);
int mlx5_flow_verify(struct rte_eth_dev *dev);
}
/**
- * Manage filter operations.
+ * Get rte_flow callbacks.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @return 0
*/
int
-mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- *(const void **)arg = &mlx5_flow_ops;
- return 0;
- default:
- DRV_LOG(ERR, "port %u filter type (%d) not supported",
- dev->data->port_id, filter_type);
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
+mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &mlx5_flow_ops;
return 0;
}
*
* @param dev
* Pointer to the device structure.
- * @param filer_type
- * Flow filter type.
- * @param filter_op
- * Flow filter operation.
- * @param arg
+ * @param ops
* Pointer to pass the flow ops.
*
* @return
* 0 on success, negative error value otherwise.
*/
static int
-mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &mrvl_flow_ops;
- return 0;
- default:
- MRVL_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- return -EINVAL;
- }
+ *ops = &mrvl_flow_ops;
+ return 0;
}
/**
.flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
- .filter_ctrl = mrvl_eth_filter_ctrl,
+ .flow_ops_get = mrvl_eth_flow_ops_get,
.mtr_ops_get = mrvl_mtr_ops_get,
.tm_ops_get = mrvl_tm_ops_get,
};
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
- .filter_ctrl = otx2_nix_dev_filter_ctrl,
+ .flow_ops_get = otx2_nix_dev_flow_ops_get,
.get_module_info = otx2_nix_get_module_info,
.get_module_eeprom = otx2_nix_get_module_eeprom,
.fw_version_get = otx2_nix_fw_version_get,
/* Ops */
int otx2_nix_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info);
-int otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
+int otx2_nix_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_ops **ops);
int otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
size_t fw_size);
int otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
}
int
-otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+otx2_nix_dev_flow_ops_get(struct rte_eth_dev *eth_dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- RTE_SET_USED(eth_dev);
-
- if (filter_type != RTE_ETH_FILTER_GENERIC) {
- otx2_err("Unsupported filter type %d", filter_type);
- return -ENOTSUP;
- }
-
- if (filter_op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &otx2_flow_ops;
- return 0;
- }
-
- otx2_err("Invalid filter_op %d", filter_op);
- return -EINVAL;
+ *ops = &otx2_flow_ops;
+ return 0;
}
static struct cgx_fw_data *
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
- .filter_ctrl = qede_dev_filter_ctrl,
+ .flow_ops_get = qede_dev_flow_ops_get,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
.fw_version_get = qede_fw_version_get,
int qede_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete);
-int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
- enum rte_filter_op op, void *arg);
-
-int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op, void *arg);
+int qede_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
.flush = qede_flow_flush,
};
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+int
+qede_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_ops **ops)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
-
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
-
- *(const void **)arg = &qede_flow_ops;
- return 0;
- default:
- DP_ERR(edev, "Unsupported filter type %d\n",
- filter_type);
- return -EINVAL;
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
}
+ *ops = &qede_flow_ops;
return 0;
}
}
static int
-sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- int rc = ENOTSUP;
-
- sfc_log_init(sa, "entry");
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET) {
- rc = EINVAL;
- } else {
- *(const void **)arg = &sfc_flow_ops;
- rc = 0;
- }
- break;
- default:
- sfc_err(sa, "Unknown filter type %u", filter_type);
- break;
- }
-
- sfc_log_init(sa, "exit: %d", -rc);
- SFC_ASSERT(rc >= 0);
- return -rc;
+ *ops = &sfc_flow_ops;
+ return 0;
}
static int
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
- .filter_ctrl = sfc_dev_filter_ctrl,
+ .flow_ops_get = sfc_dev_flow_ops_get,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,
}
static int
-pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+pmd_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- if (filter_type == RTE_ETH_FILTER_GENERIC &&
- filter_op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &pmd_flow_ops;
- return 0;
- }
-
- return -ENOTSUP;
+ *ops = &pmd_flow_ops;
+ return 0;
}
static int
.dev_infos_get = pmd_dev_infos_get,
.rx_queue_setup = pmd_rx_queue_setup,
.tx_queue_setup = pmd_tx_queue_setup,
- .filter_ctrl = pmd_filter_ctrl,
+ .flow_ops_get = pmd_flow_ops_get,
.tm_ops_get = pmd_tm_ops_get,
.mtr_ops_get = pmd_mtr_ops_get,
};
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
.rss_hash_update = tap_rss_hash_update,
- .filter_ctrl = tap_dev_filter_ctrl,
+ .flow_ops_get = tap_dev_flow_ops_get,
};
static int
}
/**
- * Manage filter operations.
+ * Get rte_flow operations.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
-tap_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+tap_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &tap_flow_ops;
- return 0;
- default:
- TAP_LOG(ERR, "%p: filter type (%d) not supported",
- dev, filter_type);
- }
- return -EINVAL;
+ *ops = &tap_flow_ops;
+ return 0;
}
SEC_MAX,
};
-int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
+int tap_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
int tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
int tap_flow_implicit_create(struct pmd_internals *pmd,
}
static int
-txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
{
- int ret = 0;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &txgbe_flow_ops;
- break;
- default:
- PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- ret = -EINVAL;
- break;
- }
-
- return ret;
+ *ops = &txgbe_flow_ops;
+ return 0;
}
static u8 *
.reta_query = txgbe_dev_rss_reta_query,
.rss_hash_update = txgbe_dev_rss_hash_update,
.rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
- .filter_ctrl = txgbe_dev_filter_ctrl,
+ .flow_ops_get = txgbe_dev_flow_ops_get,
.set_mc_addr_list = txgbe_dev_set_mc_addr_list,
.rxq_info_get = txgbe_rxq_info_get,
.txq_info_get = txgbe_txq_info_get,
struct rte_dev_eeprom_info *info);
/**< @internal Retrieve plugin module eeprom data */
+struct rte_flow_ops;
/**
- * Feature filter types
- */
-enum rte_filter_type {
- RTE_ETH_FILTER_NONE = 0,
- RTE_ETH_FILTER_ETHERTYPE,
- RTE_ETH_FILTER_FLEXIBLE,
- RTE_ETH_FILTER_SYN,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_TUNNEL,
- RTE_ETH_FILTER_FDIR,
- RTE_ETH_FILTER_HASH,
- RTE_ETH_FILTER_L2_TUNNEL,
- RTE_ETH_FILTER_GENERIC,
-};
-
-/**
- * Generic operations on filters
+ * @internal
+ * Get flow operations.
+ *
+ * If the flow API is not supported for the specified device,
+ * the driver can return NULL.
*/
-enum rte_filter_op {
- RTE_ETH_FILTER_GET, /**< get flow API ops */
-};
-
-typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
-/**< @internal Take operations to assigned filter type on an Ethernet device */
+typedef int (*eth_flow_ops_get_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops);
typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
eth_get_module_eeprom_t get_module_eeprom;
/** Get plugin module eeprom data. */
- eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
+ eth_flow_ops_get_t flow_ops_get; /**< Get flow operations. */
eth_get_dcb_info get_dcb_info; /** Get DCB information. */
* Legacy ethdev API used internally by drivers.
*/
+enum rte_filter_type {
+ RTE_ETH_FILTER_NONE = 0,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_FLEXIBLE,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_TUNNEL,
+ RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_L2_TUNNEL,
+};
+
/**
* Define all structures for Ethertype Filter type.
*/
};
/**
- * A structure used to define the flow director filter entry by filter_ctrl API.
+ * A structure used to define the flow director filter entry.
*/
struct rte_eth_fdir_filter {
uint32_t soft_id;
if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
code = ENODEV;
- else if (unlikely(!dev->dev_ops->filter_ctrl ||
- dev->dev_ops->filter_ctrl(dev,
- RTE_ETH_FILTER_GENERIC,
- RTE_ETH_FILTER_GET,
- &ops) ||
- !ops))
+ else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
+ /* flow API not supported with this driver dev_ops */
code = ENOSYS;
else
- return ops;
- rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(code));
- return NULL;
+ code = dev->dev_ops->flow_ops_get(dev, &ops);
+ if (code == 0 && ops == NULL)
+ /* flow API not supported with this device */
+ code = ENOSYS;
+
+ if (code != 0) {
+ rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(code));
+ return NULL;
+ }
+ return ops;
}
/* Check whether a flow rule can be created on a given port. */
/**
* Generic flow operations structure implemented and returned by PMDs.
*
- * To implement this API, PMDs must handle the RTE_ETH_FILTER_GENERIC filter
- * type in their .filter_ctrl callback function (struct eth_dev_ops) as well
- * as the RTE_ETH_FILTER_GET filter operation.
- *
- * If successful, this operation must result in a pointer to a PMD-specific
- * struct rte_flow_ops written to the argument address as described below:
- *
- * \code
- *
- * // PMD filter_ctrl callback
- *
- * static const struct rte_flow_ops pmd_flow_ops = { ... };
- *
- * switch (filter_type) {
- * case RTE_ETH_FILTER_GENERIC:
- * if (filter_op != RTE_ETH_FILTER_GET)
- * return -EINVAL;
- * *(const void **)arg = &pmd_flow_ops;
- * return 0;
- * }
- *
- * \endcode
- *
- * See also rte_flow_ops_get().
- *
* These callback functions are not supposed to be used by applications
* directly, which must rely on the API defined in rte_flow.h.
*