X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=3a0ca114eecc2131a60f7fee942cf6d8952ac5c7;hb=a7c528e5d71ff3f569898d268f9de129fdfc152b;hp=370cf7792b6c092f03cf0808e9f63845dc5b612e;hpb=2720f833d4616fe3084604cb35f028c3e61bf996;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 370cf7792b..3a0ca114ee 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -30,9 +29,10 @@ #include "mlx5.h" #include "mlx5_defs.h" -#include "mlx5_prm.h" -#include "mlx5_glue.h" #include "mlx5_flow.h" +#include "mlx5_glue.h" +#include "mlx5_prm.h" +#include "mlx5_rxtx.h" /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; @@ -294,7 +294,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { }, { .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, - .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, }, { .tunnel = MLX5_FLOW_LAYER_MPLS, @@ -315,6 +315,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { int mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { + struct mlx5_priv *priv = dev->data->dev_private; struct { struct ibv_flow_attr attr; struct ibv_flow_spec_eth eth; @@ -322,6 +323,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) } flow_attr = { .attr = { .num_of_specs = 2, + .port = (uint8_t)priv->ibv_port, }, .eth = { .type = IBV_FLOW_SPEC_ETH, @@ -350,6 +352,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) claim_zero(mlx5_glue->destroy_flow(flow)); priority = vprio[i]; } + mlx5_hrxq_drop_release(dev); switch (priority) { case 8: priority = RTE_DIM(priority_map_3); @@ -361,10 +364,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev) rte_errno = ENOTSUP; DRV_LOG(ERR, "port %u verbs maximum priority: %d expected 8/16", - dev->data->port_id, vprio[i]); + dev->data->port_id, priority); return -rte_errno; } - mlx5_hrxq_drop_release(dev); DRV_LOG(INFO, "port %u flow maximum priority: %d", dev->data->port_id, priority); return priority; @@ -387,7 +389,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint32_t subpriority) { uint32_t res = 0; - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; switch (priv->config.flow_prio) { case RTE_DIM(priority_map_3): @@ -536,7 +538,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) static void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); @@ -599,7 +601,7 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); @@ -661,7 +663,7 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_rxq_flags_clear(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) { @@ -786,7 +788,7 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_drop(uint64_t action_flags, @@ -829,7 +831,7 @@ mlx5_flow_validate_action_drop(uint64_t action_flags, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, @@ -838,7 +840,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, const struct rte_flow_attr *attr, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_queue *queue = action->conf; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -846,6 +848,10 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't have 2 fate actions in" " same flow"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); if (queue->index >= priv->rxqs_n) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -875,21 +881,25 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action, * Pointer to the Ethernet device structure. * @param[in] attr * Attributes of flow that includes this action. + * @param[in] item_flags + * Items that were detected. * @param[out] error * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, uint64_t action_flags, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + uint64_t item_flags, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; if (action_flags & MLX5_FLOW_FATE_ACTIONS) @@ -912,7 +922,13 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->level, "tunnel RSS is not supported"); - if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) + /* allow RSS key_len 0 in case of NULL (default) RSS key. */ + if (rss->key_len == 0 && rss->key != NULL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key length 0"); + if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->key_len, @@ -933,6 +949,14 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, &rss->types, "some RSS protocols are not" " supported"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (!rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); for (i = 0; i != rss->queue_num; ++i) { if (!(*priv->rxqs)[rss->queue[i]]) return rte_flow_error_set @@ -944,6 +968,11 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "rss action not supported for " "egress"); + if (rss->level > 1 && !tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "inner RSS is not supported for " + "non-tunnel flows"); return 0; } @@ -958,7 +987,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action, * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, @@ -992,7 +1021,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t priority_max = priv->config.flow_prio - 1; if (attributes->group) @@ -1135,6 +1164,9 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1144,6 +1176,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, uint64_t item_flags, + const struct rte_flow_item_ipv4 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv4 *mask = item->mask; @@ -1172,8 +1205,15 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, "L3 cannot follow an L4 layer."); if (!mask) mask = &rte_flow_item_ipv4_mask; + else if (mask->hdr.next_proto_id != 0 && + mask->hdr.next_proto_id != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "partial mask is not supported" + " for protocol"); ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv4), error); if (ret < 0) @@ -1188,6 +1228,9 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. * @param[out] error * Pointer to error structure. * @@ -1197,6 +1240,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, uint64_t item_flags, + const struct rte_flow_item_ipv6 *acc_mask, struct rte_flow_error *error) { const struct rte_flow_item_ipv6 *mask = item->mask; @@ -1228,21 +1272,11 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 cannot follow an L4 layer."); - /* - * IPv6 is not recognised by the NIC inside a GRE tunnel. - * Such support has to be disabled as the rule will be - * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and - * Mellanox OFED 4.4-1.0.0.0. - */ - if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "IPv6 inside a GRE tunnel is" - " not recognised."); if (!mask) mask = &rte_flow_item_ipv6_mask; ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, - (const uint8_t *)&nic_mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) @@ -1461,7 +1495,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, struct rte_eth_dev *dev, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_item_vxlan_gpe *spec = item->spec; const struct rte_flow_item_vxlan_gpe *mask = item->mask; int ret; @@ -1592,12 +1626,14 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, /** * Validate MPLS item. * + * @param[in] dev + * Pointer to the rte_eth_dev structure. * @param[in] item * Item specification. * @param[in] item_flags * Bit-fields that holds the items detected until now. - * @param[in] target_protocol - * The next protocol in the previous item. + * @param[in] prev_layer + * The protocol layer indicated in previous item. * @param[out] error * Pointer to error structure. * @@ -1605,21 +1641,34 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused, +mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item __rte_unused, uint64_t item_flags __rte_unused, - uint8_t target_protocol __rte_unused, + uint64_t prev_layer __rte_unused, struct rte_flow_error *error) { #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS) + if (!priv->config.mpls_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | + MLX5_FLOW_LAYER_GRE))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "protocol filtering not compatible" " with MPLS layer"); - if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && + !(item_flags & MLX5_FLOW_LAYER_GRE)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple tunnel layers not" @@ -1655,8 +1704,6 @@ static struct mlx5_flow * flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - uint64_t *item_flags __rte_unused, - uint64_t *action_flags __rte_unused, struct rte_flow_error *error __rte_unused) { rte_errno = ENOTSUP; @@ -1733,10 +1780,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { static enum mlx5_flow_drv_type flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; - if (attr->transfer) + if (attr->transfer && !priv->config.dv_esw_en) type = MLX5_FLOW_TYPE_TCF; else type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : @@ -1762,7 +1809,7 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_validate(struct rte_eth_dev *dev, @@ -1784,29 +1831,30 @@ flow_drv_validate(struct rte_eth_dev *dev, * calculates the size of memory required for device flow, allocates the memory, * initializes the device flow and returns the pointer. * + * @note + * This function initializes device flow structure such as dv, tcf or verbs in + * struct mlx5_flow. However, it is caller's responsibility to initialize the + * rest. For example, adding returning device flow to flow->dev_flow list and + * setting backward reference to the flow should be done out of this function. + * layers field is not filled either. + * * @param[in] attr * Pointer to the flow attributes. * @param[in] items * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. - * @param[out] item_flags - * Pointer to bit mask of all items detected. - * @param[out] action_flags - * Pointer to bit mask of all actions detected. * @param[out] error * Pointer to the error structure. * * @return - * Pointer to device flow on success, otherwise NULL and rte_ernno is set. + * Pointer to device flow on success, otherwise NULL and rte_errno is set. */ static inline struct mlx5_flow * -flow_drv_prepare(struct rte_flow *flow, +flow_drv_prepare(const struct rte_flow *flow, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], - uint64_t *item_flags, - uint64_t *action_flags, struct rte_flow_error *error) { const struct mlx5_flow_driver_ops *fops; @@ -1814,8 +1862,7 @@ flow_drv_prepare(struct rte_flow *flow, assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); fops = flow_get_drv_ops(type); - return fops->prepare(attr, items, actions, item_flags, action_flags, - error); + return fops->prepare(attr, items, actions, error); } /** @@ -1824,6 +1871,12 @@ flow_drv_prepare(struct rte_flow *flow, * translates a generic flow into a driver flow. flow_drv_prepare() must * precede. * + * @note + * dev_flow->layers could be filled as a result of parsing during translation + * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled + * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, + * flow->actions could be overwritten even though all the expanded dev_flows + * have the same actions. * * @param[in] dev * Pointer to the rte dev structure. @@ -1839,7 +1892,7 @@ flow_drv_prepare(struct rte_flow *flow, * Pointer to the error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_ernno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static inline int flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, @@ -1887,7 +1940,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, * Flow driver remove API. This abstracts calling driver specific functions. * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow * on device. All the resources of the flow should be freed by calling - * flow_dv_destroy(). + * flow_drv_destroy(). * * @param[in] dev * Pointer to Ethernet device. @@ -2018,8 +2071,6 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, { struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; - uint64_t action_flags = 0; - uint64_t item_flags = 0; const struct rte_flow_action_rss *rss; union { struct rte_flow_expand_rss buf; @@ -2042,6 +2093,8 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow = rte_calloc(__func__, 1, flow_size, 0); flow->drv_type = flow_get_drv_type(dev, attr); + flow->ingress = attr->ingress; + flow->transfer = attr->transfer; assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); flow->queue = (void *)(flow + 1); @@ -2062,16 +2115,10 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, } for (i = 0; i < buf->entries; ++i) { dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern, - actions, &item_flags, &action_flags, - error); + actions, error); if (!dev_flow) goto error; dev_flow->flow = flow; - dev_flow->layers = item_flags; - /* Store actions once as expanded flows have same actions. */ - if (i == 0) - flow->actions = action_flags; - assert(flow->actions == action_flags); LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); ret = flow_drv_translate(dev, dev_flow, attr, buf->entry[i].pattern, @@ -2109,8 +2156,9 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - return flow_list_create(dev, - &((struct priv *)dev->data->dev_private)->flows, + struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private; + + return flow_list_create(dev, &priv->flows, attr, items, actions, error); } @@ -2128,14 +2176,14 @@ static void flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, struct rte_flow *flow) { - flow_drv_destroy(dev, flow); - TAILQ_REMOVE(list, flow, next); /* * Update RX queue flags only if port is started, otherwise it is * already clean. */ if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); + flow_drv_destroy(dev, flow); + TAILQ_REMOVE(list, flow, next); rte_free(flow->fdir); rte_free(flow); } @@ -2220,7 +2268,7 @@ error: int mlx5_flow_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; @@ -2256,7 +2304,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_item_vlan *vlan_spec, struct rte_flow_item_vlan *vlan_mask) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, .priority = MLX5_FLOW_PRIO_RSVD, @@ -2302,9 +2350,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_error error; unsigned int i; - if (!priv->reta_idx_n) { - rte_errno = EINVAL; - return -rte_errno; + if (!priv->reta_idx_n || !priv->rxqs_n) { + return 0; } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; @@ -2347,7 +2394,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; flow_list_destroy(dev, &priv->flows, flow); return 0; @@ -2363,7 +2410,7 @@ int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); return 0; @@ -2380,7 +2427,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, @@ -2458,7 +2505,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; const struct rte_eth_fdir_masks *mask = &dev->data->dev_conf.fdir_conf.mask; @@ -2501,13 +2548,13 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = input->flow.ip4_flow.src_ip, .dst_addr = input->flow.ip4_flow.dst_ip, .time_to_live = input->flow.ip4_flow.ttl, .type_of_service = input->flow.ip4_flow.tos, }; - attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ .src_addr = mask->ipv4_mask.src_ip, .dst_addr = mask->ipv4_mask.dst_ip, .time_to_live = mask->ipv4_mask.ttl, @@ -2523,7 +2570,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev, case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ .hop_limits = input->flow.ipv6_flow.hop_limits, .proto = input->flow.ipv6_flow.proto, }; @@ -2653,7 +2700,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) FLOW_FDIR_CMP(f1, f2, l3_mask) || FLOW_FDIR_CMP(f1, f2, l4) || FLOW_FDIR_CMP(f1, f2, l4_mask) || - FLOW_FDIR_CMP(f1, f2, actions[0])) + FLOW_FDIR_CMP(f1, f2, actions[0].type)) return 1; if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && FLOW_FDIR_CMP(f1, f2, queue)) @@ -2675,7 +2722,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) static struct rte_flow * flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; assert(fdir_flow); @@ -2704,7 +2751,7 @@ static int flow_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_fdir *fdir_flow; struct rte_flow *flow; int ret; @@ -2751,7 +2798,7 @@ static int flow_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow; struct mlx5_fdir fdir_flow = { .attr.group = 0, @@ -2804,7 +2851,7 @@ flow_fdir_filter_update(struct rte_eth_dev *dev, static void flow_fdir_filter_flush(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->flows); }