net/mlx5: standardize on negative errno values
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 9ffcd03..30b8573 100644 (file)
@@ -247,7 +247,8 @@ struct mlx5_flow_items {
         *   Internal structure to store the conversion.
         *
         * @return
-        *   0 on success, negative value otherwise.
+        *   0 on success, a negative errno value otherwise and rte_errno is
+        *   set.
         */
        int (*convert)(const struct rte_flow_item *item,
                       const void *default_mask,
@@ -460,45 +461,52 @@ struct ibv_spec_header {
  *   Bit-Mask size in bytes.
  *
  * @return
- *   0 on success.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_item_validate(const struct rte_flow_item *item,
                        const uint8_t *mask, unsigned int size)
 {
-       int ret = 0;
-
-       if (!item->spec && (item->mask || item->last))
-               return -1;
+       if (!item->spec && (item->mask || item->last)) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        if (item->spec && !item->mask) {
                unsigned int i;
                const uint8_t *spec = item->spec;
 
                for (i = 0; i < size; ++i)
-                       if ((spec[i] | mask[i]) != mask[i])
-                               return -1;
+                       if ((spec[i] | mask[i]) != mask[i]) {
+                               rte_errno = EINVAL;
+                               return -rte_errno;
+                       }
        }
        if (item->last && !item->mask) {
                unsigned int i;
                const uint8_t *spec = item->last;
 
                for (i = 0; i < size; ++i)
-                       if ((spec[i] | mask[i]) != mask[i])
-                               return -1;
+                       if ((spec[i] | mask[i]) != mask[i]) {
+                               rte_errno = EINVAL;
+                               return -rte_errno;
+                       }
        }
        if (item->mask) {
                unsigned int i;
                const uint8_t *spec = item->spec;
 
                for (i = 0; i < size; ++i)
-                       if ((spec[i] | mask[i]) != mask[i])
-                               return -1;
+                       if ((spec[i] | mask[i]) != mask[i]) {
+                               rte_errno = EINVAL;
+                               return -rte_errno;
+                       }
        }
        if (item->spec && item->last) {
                uint8_t spec[size];
                uint8_t last[size];
                const uint8_t *apply = mask;
                unsigned int i;
+               int ret;
 
                if (item->mask)
                        apply = item->mask;
@@ -507,8 +515,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
                        last[i] = ((const uint8_t *)item->last)[i] & apply[i];
                }
                ret = memcmp(spec, last, size);
+               if (ret != 0) {
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
        }
-       return ret;
+       return 0;
 }
 
 /**
@@ -521,7 +533,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
  *   User RSS configuration to save.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
@@ -533,10 +545,14 @@ mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
         * device default RSS configuration.
         */
        if (rss_conf) {
-               if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
-                       return EINVAL;
-               if (rss_conf->rss_key_len != 40)
-                       return EINVAL;
+               if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
+               if (rss_conf->rss_key_len != 40) {
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
                if (rss_conf->rss_key_len && rss_conf->rss_key) {
                        parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
                        memcpy(parser->rss_key, rss_conf->rss_key,
@@ -616,14 +632,17 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
                          struct mlx5_flow_parse *parser)
 {
        struct priv *priv = dev->data->dev_private;
+       int ret;
 
        /*
         * Add default RSS configuration necessary for Verbs to create QP even
         * if no RSS is necessary.
         */
-       mlx5_flow_convert_rss_conf(parser,
-                                  (const struct rte_eth_rss_conf *)
-                                  &priv->rss_conf);
+       ret = mlx5_flow_convert_rss_conf(parser,
+                                        (const struct rte_eth_rss_conf *)
+                                        &priv->rss_conf);
+       if (ret)
+               return ret;
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
                if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
                        continue;
@@ -772,6 +791,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
 {
        const struct mlx5_flow_items *cur_item = mlx5_flow_items;
        unsigned int i;
+       int ret = 0;
 
        /* Initialise the offsets to start after verbs attribute. */
        for (i = 0; i != hash_rxq_init_n; ++i)
@@ -779,7 +799,6 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
                const struct mlx5_flow_items *token = NULL;
                unsigned int n;
-               int err;
 
                if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
                        continue;
@@ -795,10 +814,10 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
                if (!token)
                        goto exit_item_not_supported;
                cur_item = token;
-               err = mlx5_flow_item_validate(items,
+               ret = mlx5_flow_item_validate(items,
                                              (const uint8_t *)cur_item->mask,
                                              cur_item->mask_sz);
-               if (err)
+               if (ret)
                        goto exit_item_not_supported;
                if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
                        if (parser->inner) {
@@ -835,9 +854,8 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
        }
        return 0;
 exit_item_not_supported:
-       rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
-                          items, "item not supported");
-       return -rte_errno;
+       return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
+                                 items, "item not supported");
 }
 
 /**
@@ -851,7 +869,7 @@ exit_item_not_supported:
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A verbs flow attribute on success, NULL otherwise.
+ *   A verbs flow attribute on success, NULL otherwise and rte_errno is set.
  */
 static struct ibv_flow_attr *
 mlx5_flow_convert_allocate(unsigned int priority,
@@ -1055,7 +1073,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
                parser->queue[HASH_RXQ_ETH].ibv_attr =
                        mlx5_flow_convert_allocate(priority, offset, error);
                if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
-                       return ENOMEM;
+                       goto exit_enomem;
                parser->queue[HASH_RXQ_ETH].offset =
                        sizeof(struct ibv_flow_attr);
        } else {
@@ -1090,7 +1108,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
                                         cur_item->mask),
                                        parser);
                if (ret) {
-                       rte_flow_error_set(error, ret,
+                       rte_flow_error_set(error, rte_errno,
                                           RTE_FLOW_ERROR_TYPE_ITEM,
                                           items, "item not supported");
                        goto exit_free;
@@ -1132,13 +1150,13 @@ exit_enomem:
                        parser->queue[i].ibv_attr = NULL;
                }
        }
-       rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+       rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                           NULL, "cannot allocate verbs spec attributes.");
-       return ret;
+       return -rte_errno;
 exit_count_error:
        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                           NULL, "cannot create counter.");
-       return rte_errno;
+       return -rte_errno;
 }
 
 /**
@@ -1184,6 +1202,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_eth(const struct rte_flow_item *item,
@@ -1233,6 +1254,9 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_vlan(const struct rte_flow_item *item,
@@ -1273,6 +1297,9 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
@@ -1325,6 +1352,9 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
@@ -1397,6 +1427,9 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_udp(const struct rte_flow_item *item,
@@ -1443,6 +1476,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_tcp(const struct rte_flow_item *item,
@@ -1489,6 +1525,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
  *   Default bit-masks to use when item->mask is not provided.
  * @param data[in, out]
  *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
@@ -1528,8 +1567,10 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
         * before will also match this rule.
         * To avoid such situation, VNI 0 is currently refused.
         */
-       if (!vxlan.val.tunnel_id)
-               return EINVAL;
+       if (!vxlan.val.tunnel_id) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        mlx5_flow_create_copy(parser, &vxlan, size);
        return 0;
 }
@@ -1541,6 +1582,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
  *   Internal parser structure.
  * @param mark_id
  *   Mark identifier.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
@@ -1566,7 +1610,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
  *   Pointer to MLX5 flow parser structure.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
@@ -1584,8 +1628,10 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
 
        init_attr.counter_set_id = 0;
        parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
-       if (!parser->cs)
-               return EINVAL;
+       if (!parser->cs) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        counter.counter_set_handle = parser->cs->handle;
        mlx5_flow_create_copy(parser, &counter, size);
 #endif
@@ -1605,7 +1651,7 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
@@ -1616,7 +1662,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
        struct priv *priv = dev->data->dev_private;
        struct ibv_flow_spec_action_drop *drop;
        unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
-       int err = 0;
 
        assert(priv->pd);
        assert(priv->ctx);
@@ -1642,7 +1687,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
        if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
                rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
                                   NULL, "flow rule creation failure");
-               err = ENOMEM;
                goto error;
        }
        return 0;
@@ -1662,7 +1706,7 @@ error:
                flow->cs = NULL;
                parser->cs = NULL;
        }
-       return err;
+       return -rte_errno;
 }
 
 /**
@@ -1678,7 +1722,7 @@ error:
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   0 on success, a errno value otherwise and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
@@ -1716,10 +1760,10 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
                                      parser->queues,
                                      parser->queues_n);
                if (!flow->frxq[i].hrxq) {
-                       rte_flow_error_set(error, ENOMEM,
-                                          RTE_FLOW_ERROR_TYPE_HANDLE,
-                                          NULL, "cannot create hash rxq");
-                       return ENOMEM;
+                       return rte_flow_error_set(error, ENOMEM,
+                                                 RTE_FLOW_ERROR_TYPE_HANDLE,
+                                                 NULL,
+                                                 "cannot create hash rxq");
                }
        }
        return 0;
@@ -1738,7 +1782,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   0 on success, a errno value otherwise and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
@@ -1747,15 +1791,15 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
                              struct rte_flow_error *error)
 {
        struct priv *priv = dev->data->dev_private;
-       int err = 0;
+       int ret;
        unsigned int i;
        unsigned int flows_n = 0;
 
        assert(priv->pd);
        assert(priv->ctx);
        assert(!parser->drop);
-       err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
-       if (err)
+       ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
+       if (ret)
                goto error;
        if (parser->count)
                flow->cs = parser->cs;
@@ -1771,7 +1815,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
                        rte_flow_error_set(error, ENOMEM,
                                           RTE_FLOW_ERROR_TYPE_HANDLE,
                                           NULL, "flow rule creation failure");
-                       err = ENOMEM;
                        goto error;
                }
                ++flows_n;
@@ -1793,6 +1836,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
        }
        return 0;
 error:
+       ret = rte_errno; /* Save rte_errno before cleanup. */
        assert(flow);
        for (i = 0; i != hash_rxq_init_n; ++i) {
                if (flow->frxq[i].ibv_flow) {
@@ -1810,7 +1854,8 @@ error:
                flow->cs = NULL;
                parser->cs = NULL;
        }
-       return err;
+       rte_errno = ret; /* Restore rte_errno. */
+       return -rte_errno;
 }
 
 /**
@@ -1830,7 +1875,7 @@ error:
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A flow on success, NULL otherwise.
+ *   A flow on success, NULL otherwise and rte_errno is set.
  */
 static struct rte_flow *
 mlx5_flow_list_create(struct rte_eth_dev *dev,
@@ -1843,10 +1888,10 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
        struct mlx5_flow_parse parser = { .create = 1, };
        struct rte_flow *flow = NULL;
        unsigned int i;
-       int err;
+       int ret;
 
-       err = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
-       if (err)
+       ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
+       if (ret)
                goto exit;
        flow = rte_calloc(__func__, 1,
                          sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
@@ -1869,11 +1914,11 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
        memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
        /* finalise the flow. */
        if (parser.drop)
-               err = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
+               ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
                                                         error);
        else
-               err = mlx5_flow_create_action_queue(dev, &parser, flow, error);
-       if (err)
+               ret = mlx5_flow_create_action_queue(dev, &parser, flow, error);
+       if (ret)
                goto exit;
        TAILQ_INSERT_TAIL(list, flow, next);
        DEBUG("Flow created %p", (void *)flow);
@@ -1901,11 +1946,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
                   const struct rte_flow_action actions[],
                   struct rte_flow_error *error)
 {
-       int ret;
        struct mlx5_flow_parse parser = { .create = 0, };
 
-       ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
-       return ret;
+       return mlx5_flow_convert(dev, attr, items, actions, error, &parser);
 }
 
 /**
@@ -2029,7 +2072,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
  *   Pointer to Ethernet device.
  *
  * @return
- *   0 on success.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
@@ -2042,11 +2085,13 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
        fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
        if (!fdq) {
                WARN("cannot allocate memory for drop queue");
-               goto error;
+               rte_errno = ENOMEM;
+               return -rte_errno;
        }
        fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
        if (!fdq->cq) {
                WARN("cannot allocate CQ for drop queue");
+               rte_errno = errno;
                goto error;
        }
        fdq->wq = mlx5_glue->create_wq
@@ -2060,6 +2105,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
                 });
        if (!fdq->wq) {
                WARN("cannot allocate WQ for drop queue");
+               rte_errno = errno;
                goto error;
        }
        fdq->ind_table = mlx5_glue->create_rwq_ind_table
@@ -2071,6 +2117,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
                 });
        if (!fdq->ind_table) {
                WARN("cannot allocate indirection table for drop queue");
+               rte_errno = errno;
                goto error;
        }
        fdq->qp = mlx5_glue->create_qp_ex
@@ -2093,6 +2140,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
                 });
        if (!fdq->qp) {
                WARN("cannot allocate QP for drop queue");
+               rte_errno = errno;
                goto error;
        }
        priv->flow_drop_queue = fdq;
@@ -2109,7 +2157,7 @@ error:
        if (fdq)
                rte_free(fdq);
        priv->flow_drop_queue = NULL;
-       return -1;
+       return -rte_errno;
 }
 
 /**
@@ -2208,7 +2256,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
  *   Pointer to a TAILQ flow list.
  *
  * @return
- *   0 on success, a errno value otherwise and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
@@ -2228,7 +2276,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
                                DEBUG("Flow %p cannot be applied",
                                      (void *)flow);
                                rte_errno = EINVAL;
-                               return rte_errno;
+                               return -rte_errno;
                        }
                        DEBUG("Flow %p applied", (void *)flow);
                        /* Next flow. */
@@ -2255,7 +2303,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
                                DEBUG("Flow %p cannot be applied",
                                      (void *)flow);
                                rte_errno = EINVAL;
-                               return rte_errno;
+                               return -rte_errno;
                        }
 flow_create:
                        flow->frxq[i].ibv_flow =
@@ -2265,7 +2313,7 @@ flow_create:
                                DEBUG("Flow %p cannot be applied",
                                      (void *)flow);
                                rte_errno = EINVAL;
-                               return rte_errno;
+                               return -rte_errno;
                        }
                        DEBUG("Flow %p applied", (void *)flow);
                }
@@ -2315,7 +2363,7 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
  *   A VLAN flow mask to apply.
  *
  * @return
- *   0 on success.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
@@ -2367,8 +2415,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
                } local;
        } action_rss;
 
-       if (!priv->reta_idx_n)
-               return EINVAL;
+       if (!priv->reta_idx_n) {
+               rte_errno = EINVAL;
+               return -rte_errno;
+       }
        for (i = 0; i != priv->reta_idx_n; ++i)
                action_rss.local.queue[i] = (*priv->reta_idx)[i];
        action_rss.local.rss_conf = &priv->rss_conf;
@@ -2377,7 +2427,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
                                     actions, &error);
        if (!flow)
-               return rte_errno;
+               return -rte_errno;
        return 0;
 }
 
@@ -2392,7 +2442,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
  *   An Ethernet flow mask to apply.
  *
  * @return
- *   0 on success.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_ctrl_flow(struct rte_eth_dev *dev,
@@ -2445,7 +2495,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
  *   returned data from the counter.
  *
  * @return
- *   0 on success, a errno value otherwise and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_query_count(struct ibv_counter_set *cs,
@@ -2462,15 +2512,13 @@ mlx5_flow_query_count(struct ibv_counter_set *cs,
                .out = counters,
                .outlen = 2 * sizeof(uint64_t),
        };
-       int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
+       int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
 
-       if (res) {
-               rte_flow_error_set(error, -res,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL,
-                                  "cannot read counter");
-               return -res;
-       }
+       if (err)
+               return rte_flow_error_set(error, err,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "cannot read counter");
        query_count->hits_set = 1;
        query_count->bytes_set = 1;
        query_count->hits = counters[0] - counter_stats->hits;
@@ -2495,20 +2543,22 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
                void *data,
                struct rte_flow_error *error)
 {
-       int res = EINVAL;
-
        if (flow->cs) {
-               res = mlx5_flow_query_count(flow->cs,
-                                       &flow->counter_stats,
-                                       (struct rte_flow_query_count *)data,
-                                       error);
+               int ret;
+
+               ret = mlx5_flow_query_count(flow->cs,
+                                           &flow->counter_stats,
+                                           (struct rte_flow_query_count *)data,
+                                           error);
+               if (ret)
+                       return ret;
        } else {
-               rte_flow_error_set(error, res,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL,
-                                  "no counter found for flow");
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "no counter found for flow");
        }
-       return -res;
+       return 0;
 }
 #endif
 
@@ -2551,7 +2601,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
  *   Generic flow parameters structure.
  *
  * @return
- *  0 on success, errno value on error.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
@@ -2564,7 +2614,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
        /* Validate queue number. */
        if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
                ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
-               return EINVAL;
+               rte_errno = EINVAL;
+               return -rte_errno;
        }
        attributes->attr.ingress = 1;
        attributes->items[0] = (struct rte_flow_item) {
@@ -2586,7 +2637,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
                break;
        default:
                ERROR("invalid behavior %d", fdir_filter->action.behavior);
-               return ENOTSUP;
+               rte_errno = ENOTSUP;
+               return -rte_errno;
        }
        attributes->queue.index = fdir_filter->action.rx_queue;
        switch (fdir_filter->input.flow_type) {
@@ -2720,9 +2772,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
                };
                break;
        default:
-               ERROR("invalid flow type%d",
-                     fdir_filter->input.flow_type);
-               return ENOTSUP;
+               ERROR("invalid flow type%d", fdir_filter->input.flow_type);
+               rte_errno = ENOTSUP;
+               return -rte_errno;
        }
        return 0;
 }
@@ -2736,7 +2788,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
  *   Flow director filter to add.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
@@ -2760,11 +2812,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
 
        ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
        if (ret)
-               return -ret;
+               return ret;
        ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
                                attributes.actions, &error, &parser);
        if (ret)
-               return -ret;
+               return ret;
        flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
                                     attributes.items, attributes.actions,
                                     &error);
@@ -2772,7 +2824,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
                DEBUG("FDIR created %p", (void *)flow);
                return 0;
        }
-       return ENOTSUP;
+       return -rte_errno;
 }
 
 /**
@@ -2784,7 +2836,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
  *   Filter to be deleted.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
@@ -2805,7 +2857,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
 
        ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
        if (ret)
-               return -ret;
+               return ret;
        ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
                                attributes.actions, &error, &parser);
        if (ret)
@@ -2863,6 +2915,7 @@ wrong_flow:
                /* The flow does not match. */
                continue;
        }
+       ret = rte_errno; /* Save rte_errno before cleanup. */
        if (flow)
                mlx5_flow_list_destroy(dev, &priv->flows, flow);
 exit:
@@ -2870,7 +2923,8 @@ exit:
                if (parser.queue[i].ibv_attr)
                        rte_free(parser.queue[i].ibv_attr);
        }
-       return -ret;
+       rte_errno = ret; /* Restore rte_errno. */
+       return -rte_errno;
 }
 
 /**
@@ -2882,7 +2936,7 @@ exit:
  *   Filter to be updated.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
@@ -2893,8 +2947,7 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev,
        ret = mlx5_fdir_filter_delete(dev, fdir_filter);
        if (ret)
                return ret;
-       ret = mlx5_fdir_filter_add(dev, fdir_filter);
-       return ret;
+       return mlx5_fdir_filter_add(dev, fdir_filter);
 }
 
 /**
@@ -2948,7 +3001,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
  *   Pointer to operation-specific structure.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
@@ -2957,7 +3010,6 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
        struct priv *priv = dev->data->dev_private;
        enum rte_fdir_mode fdir_mode =
                priv->dev->data->dev_conf.fdir_conf.mode;
-       int ret = 0;
 
        if (filter_op == RTE_ETH_FILTER_NOP)
                return 0;
@@ -2965,18 +3017,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
            fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
                ERROR("%p: flow director mode %d not supported",
                      (void *)dev, fdir_mode);
-               return EINVAL;
+               rte_errno = EINVAL;
+               return -rte_errno;
        }
        switch (filter_op) {
        case RTE_ETH_FILTER_ADD:
-               ret = mlx5_fdir_filter_add(dev, arg);
-               break;
+               return mlx5_fdir_filter_add(dev, arg);
        case RTE_ETH_FILTER_UPDATE:
-               ret = mlx5_fdir_filter_update(dev, arg);
-               break;
+               return mlx5_fdir_filter_update(dev, arg);
        case RTE_ETH_FILTER_DELETE:
-               ret = mlx5_fdir_filter_delete(dev, arg);
-               break;
+               return mlx5_fdir_filter_delete(dev, arg);
        case RTE_ETH_FILTER_FLUSH:
                mlx5_fdir_filter_flush(dev);
                break;
@@ -2984,12 +3034,11 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
                mlx5_fdir_info_get(dev, arg);
                break;
        default:
-               DEBUG("%p: unknown operation %u", (void *)dev,
-                     filter_op);
-               ret = EINVAL;
-               break;
+               DEBUG("%p: unknown operation %u", (void *)dev, filter_op);
+               rte_errno = EINVAL;
+               return -rte_errno;
        }
-       return ret;
+       return 0;
 }
 
 /**
@@ -3005,7 +3054,7 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
  *   Pointer to operation-specific structure.
  *
  * @return
- *   0 on success, negative errno value on failure.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
@@ -3013,21 +3062,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
                     enum rte_filter_op filter_op,
                     void *arg)
 {
-       int ret = EINVAL;
-
        switch (filter_type) {
        case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
+               if (filter_op != RTE_ETH_FILTER_GET) {
+                       rte_errno = EINVAL;
+                       return -rte_errno;
+               }
                *(const void **)arg = &mlx5_flow_ops;
                return 0;
        case RTE_ETH_FILTER_FDIR:
-               ret = mlx5_fdir_ctrl_func(dev, filter_op, arg);
-               break;
+               return mlx5_fdir_ctrl_func(dev, filter_op, arg);
        default:
                ERROR("%p: filter type (%d) not supported",
                      (void *)dev, filter_type);
-               break;
+               rte_errno = ENOTSUP;
+               return -rte_errno;
        }
-       return -ret;
+       return 0;
 }