net: add rte prefix to UDP structure
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 97dc3e1..9887018 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <rte_common.h>
 #include <rte_ether.h>
-#include <rte_eth_ctrl.h>
 #include <rte_ethdev_driver.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
 
 #include "mlx5.h"
 #include "mlx5_defs.h"
-#include "mlx5_prm.h"
-#include "mlx5_glue.h"
 #include "mlx5_flow.h"
+#include "mlx5_glue.h"
+#include "mlx5_prm.h"
+#include "mlx5_rxtx.h"
 
 /* Dev ops structure defined in mlx5.c */
 extern const struct eth_dev_ops mlx5_dev_ops;
@@ -315,6 +315,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
 int
 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct {
                struct ibv_flow_attr attr;
                struct ibv_flow_spec_eth eth;
@@ -322,6 +323,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
        } flow_attr = {
                .attr = {
                        .num_of_specs = 2,
+                       .port = (uint8_t)priv->ibv_port,
                },
                .eth = {
                        .type = IBV_FLOW_SPEC_ETH,
@@ -350,6 +352,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
                claim_zero(mlx5_glue->destroy_flow(flow));
                priority = vprio[i];
        }
+       mlx5_hrxq_drop_release(dev);
        switch (priority) {
        case 8:
                priority = RTE_DIM(priority_map_3);
@@ -361,10 +364,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
                rte_errno = ENOTSUP;
                DRV_LOG(ERR,
                        "port %u verbs maximum priority: %d expected 8/16",
-                       dev->data->port_id, vprio[i]);
+                       dev->data->port_id, priority);
                return -rte_errno;
        }
-       mlx5_hrxq_drop_release(dev);
        DRV_LOG(INFO, "port %u flow maximum priority: %d",
                dev->data->port_id, priority);
        return priority;
@@ -387,7 +389,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
                                   uint32_t subpriority)
 {
        uint32_t res = 0;
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        switch (priv->config.flow_prio) {
        case RTE_DIM(priority_map_3):
@@ -536,7 +538,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
 static void
 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = dev_flow->flow;
        const int mark = !!(flow->actions &
                            (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -599,7 +601,7 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 static void
 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = dev_flow->flow;
        const int mark = !!(flow->actions &
                            (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -661,7 +663,7 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 static void
 flow_rxq_flags_clear(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int i;
 
        for (i = 0; i != priv->rxqs_n; ++i) {
@@ -786,7 +788,7 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
  *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_validate_action_drop(uint64_t action_flags,
@@ -829,7 +831,7 @@ mlx5_flow_validate_action_drop(uint64_t action_flags,
  *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
@@ -838,7 +840,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                const struct rte_flow_attr *attr,
                                struct rte_flow_error *error)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_queue *queue = action->conf;
 
        if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -846,6 +848,10 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't have 2 fate actions in"
                                          " same flow");
+       if (!priv->rxqs_n)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         NULL, "No Rx queues configured");
        if (queue->index >= priv->rxqs_n)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -875,21 +881,25 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
  *   Pointer to the Ethernet device structure.
  * @param[in] attr
  *   Attributes of flow that includes this action.
+ * @param[in] item_flags
+ *   Items that were detected.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
                              uint64_t action_flags,
                              struct rte_eth_dev *dev,
                              const struct rte_flow_attr *attr,
+                             uint64_t item_flags,
                              struct rte_flow_error *error)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = action->conf;
+       int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
        if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -939,6 +949,14 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
+       if (!priv->rxqs_n)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         NULL, "No Rx queues configured");
+       if (!rss->queue_num)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         NULL, "No queues configured");
        for (i = 0; i != rss->queue_num; ++i) {
                if (!(*priv->rxqs)[rss->queue[i]])
                        return rte_flow_error_set
@@ -950,6 +968,11 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
                                          "rss action not supported for "
                                          "egress");
+       if (rss->level > 1 &&  !tunnel)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+                                         "inner RSS is not supported for "
+                                         "non-tunnel flows");
        return 0;
 }
 
@@ -964,7 +987,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
  *   Pointer to error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
@@ -998,7 +1021,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
                              const struct rte_flow_attr *attributes,
                              struct rte_flow_error *error)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t priority_max = priv->config.flow_prio - 1;
 
        if (attributes->group)
@@ -1141,6 +1164,9 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ *   Acceptable mask, if NULL default internal default mask
+ *   will be used to check whether item fields are supported.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1150,6 +1176,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 int
 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                             uint64_t item_flags,
+                            const struct rte_flow_item_ipv4 *acc_mask,
                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -1185,7 +1212,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                                          "partial mask is not supported"
                                          " for protocol");
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
-                                       (const uint8_t *)&nic_mask,
+                                       acc_mask ? (const uint8_t *)acc_mask
+                                                : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv4),
                                        error);
        if (ret < 0)
@@ -1200,6 +1228,9 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] acc_mask
+ *   Acceptable mask, if NULL default internal default mask
+ *   will be used to check whether item fields are supported.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1209,6 +1240,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 int
 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                             uint64_t item_flags,
+                            const struct rte_flow_item_ipv6 *acc_mask,
                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_ipv6 *mask = item->mask;
@@ -1243,7 +1275,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
        if (!mask)
                mask = &rte_flow_item_ipv6_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
-                                       (const uint8_t *)&nic_mask,
+                                       acc_mask ? (const uint8_t *)acc_mask
+                                                : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv6),
                                        error);
        if (ret < 0)
@@ -1462,7 +1495,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                                  struct rte_eth_dev *dev,
                                  struct rte_flow_error *error)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_item_vxlan_gpe *spec = item->spec;
        const struct rte_flow_item_vxlan_gpe *mask = item->mask;
        int ret;
@@ -1616,7 +1649,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
        const struct rte_flow_item_mpls *mask = item->mask;
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
        if (!priv->config.mpls_en)
@@ -1747,10 +1780,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
 static enum mlx5_flow_drv_type
 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
 
-       if (attr->transfer)
+       if (attr->transfer && !priv->config.dv_esw_en)
                type = MLX5_FLOW_TYPE_TCF;
        else
                type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
@@ -1776,7 +1809,7 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
  *   Pointer to the error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static inline int
 flow_drv_validate(struct rte_eth_dev *dev,
@@ -1815,7 +1848,7 @@ flow_drv_validate(struct rte_eth_dev *dev,
  *   Pointer to the error structure.
  *
  * @return
- *   Pointer to device flow on success, otherwise NULL and rte_ernno is set.
+ *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
  */
 static inline struct mlx5_flow *
 flow_drv_prepare(const struct rte_flow *flow,
@@ -1859,7 +1892,7 @@ flow_drv_prepare(const struct rte_flow *flow,
  *   Pointer to the error structure.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static inline int
 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
@@ -2060,6 +2093,8 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
        flow = rte_calloc(__func__, 1, flow_size, 0);
        flow->drv_type = flow_get_drv_type(dev, attr);
+       flow->ingress = attr->ingress;
+       flow->transfer = attr->transfer;
        assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
               flow->drv_type < MLX5_FLOW_TYPE_MAX);
        flow->queue = (void *)(flow + 1);
@@ -2121,8 +2156,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
-       return flow_list_create(dev,
-                               &((struct priv *)dev->data->dev_private)->flows,
+       struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
+
+       return flow_list_create(dev, &priv->flows,
                                attr, items, actions, error);
 }
 
@@ -2232,7 +2268,7 @@ error:
 int
 mlx5_flow_verify(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow;
        int ret = 0;
 
@@ -2268,7 +2304,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
                    struct rte_flow_item_vlan *vlan_spec,
                    struct rte_flow_item_vlan *vlan_mask)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
                .ingress = 1,
                .priority = MLX5_FLOW_PRIO_RSVD,
@@ -2314,9 +2350,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        struct rte_flow_error error;
        unsigned int i;
 
-       if (!priv->reta_idx_n) {
-               rte_errno = EINVAL;
-               return -rte_errno;
+       if (!priv->reta_idx_n || !priv->rxqs_n) {
+               return 0;
        }
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
@@ -2359,7 +2394,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow *flow,
                  struct rte_flow_error *error __rte_unused)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        flow_list_destroy(dev, &priv->flows, flow);
        return 0;
@@ -2375,7 +2410,7 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
                struct rte_flow_error *error __rte_unused)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        mlx5_flow_list_flush(dev, &priv->flows);
        return 0;
@@ -2392,7 +2427,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
                  int enable,
                  struct rte_flow_error *error)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (dev->data->dev_started) {
                rte_flow_error_set(error, EBUSY,
@@ -2470,7 +2505,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                         const struct rte_eth_fdir_filter *fdir_filter,
                         struct mlx5_fdir *attributes)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_eth_fdir_input *input = &fdir_filter->input;
        const struct rte_eth_fdir_masks *mask =
                &dev->data->dev_conf.fdir_conf.mask;
@@ -2513,13 +2548,13 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-               attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+               attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
                        .src_addr = input->flow.ip4_flow.src_ip,
                        .dst_addr = input->flow.ip4_flow.dst_ip,
                        .time_to_live = input->flow.ip4_flow.ttl,
                        .type_of_service = input->flow.ip4_flow.tos,
                };
-               attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+               attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
                        .src_addr = mask->ipv4_mask.src_ip,
                        .dst_addr = mask->ipv4_mask.dst_ip,
                        .time_to_live = mask->ipv4_mask.ttl,
@@ -2535,7 +2570,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
        case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
        case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-               attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+               attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
                        .hop_limits = input->flow.ipv6_flow.hop_limits,
                        .proto = input->flow.ipv6_flow.proto,
                };
@@ -2567,11 +2602,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
        /* Handle L4. */
        switch (fdir_filter->input.flow_type) {
        case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-               attributes->l4.udp.hdr = (struct udp_hdr){
+               attributes->l4.udp.hdr = (struct rte_udp_hdr){
                        .src_port = input->flow.udp4_flow.src_port,
                        .dst_port = input->flow.udp4_flow.dst_port,
                };
-               attributes->l4_mask.udp.hdr = (struct udp_hdr){
+               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2582,11 +2617,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-               attributes->l4.tcp.hdr = (struct tcp_hdr){
+               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = input->flow.tcp4_flow.src_port,
                        .dst_port = input->flow.tcp4_flow.dst_port,
                };
-               attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2597,11 +2632,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-               attributes->l4.udp.hdr = (struct udp_hdr){
+               attributes->l4.udp.hdr = (struct rte_udp_hdr){
                        .src_port = input->flow.udp6_flow.src_port,
                        .dst_port = input->flow.udp6_flow.dst_port,
                };
-               attributes->l4_mask.udp.hdr = (struct udp_hdr){
+               attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2612,11 +2647,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-               attributes->l4.tcp.hdr = (struct tcp_hdr){
+               attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = input->flow.tcp6_flow.src_port,
                        .dst_port = input->flow.tcp6_flow.dst_port,
                };
-               attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+               attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
                        .src_port = mask->src_port_mask,
                        .dst_port = mask->dst_port_mask,
                };
@@ -2687,7 +2722,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
 static struct rte_flow *
 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = NULL;
 
        assert(fdir_flow);
@@ -2716,7 +2751,7 @@ static int
 flow_fdir_filter_add(struct rte_eth_dev *dev,
                     const struct rte_eth_fdir_filter *fdir_filter)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_fdir *fdir_flow;
        struct rte_flow *flow;
        int ret;
@@ -2763,7 +2798,7 @@ static int
 flow_fdir_filter_delete(struct rte_eth_dev *dev,
                        const struct rte_eth_fdir_filter *fdir_filter)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow;
        struct mlx5_fdir fdir_flow = {
                .attr.group = 0,
@@ -2816,7 +2851,7 @@ flow_fdir_filter_update(struct rte_eth_dev *dev,
 static void
 flow_fdir_filter_flush(struct rte_eth_dev *dev)
 {
-       struct priv *priv = dev->data->dev_private;
+       struct mlx5_priv *priv = dev->data->dev_private;
 
        mlx5_flow_list_flush(dev, &priv->flows);
 }