X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4_flow.c;h=ac66444d24ca304217379ae70d5be6fdc58b5c12;hb=100fe44b81120949250220076b1ad02a37430cf6;hp=e1775458d2fb55c61ca805f73cdf2d52c88be077;hpb=dcafc2a64a6579a2242afee365ddcc7b25709dd6;p=dpdk.git diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c index e1775458d2..ac66444d24 100644 --- a/drivers/net/mlx4/mlx4_flow.c +++ b/drivers/net/mlx4/mlx4_flow.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include #include #include #include @@ -66,16 +68,14 @@ #include "mlx4_rxtx.h" #include "mlx4_utils.h" -/** Static initializer for items. */ -#define ITEMS(...) \ +/** Static initializer for a list of subsequent item types. */ +#define NEXT_ITEM(...) \ (const enum rte_flow_item_type []){ \ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ } -/** Structure to generate a simple graph of layers supported by the NIC. */ -struct mlx4_flow_items { - /** List of possible actions for these items. */ - const enum rte_flow_action_type *const actions; +/** Processor structure associated with a flow item. */ +struct mlx4_flow_proc_item { /** Bit-masks corresponding to the possibilities for the item. */ const void *mask; /** @@ -110,19 +110,19 @@ struct mlx4_flow_items { * rte_flow item to convert. * @param default_mask * Default bit-masks to use when item->mask is not provided. - * @param data - * Internal structure to store the conversion. + * @param flow + * Flow rule handle to update. * * @return * 0 on success, negative value otherwise. */ int (*convert)(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct rte_flow *flow); /** Size in bytes of the destination structure. */ const unsigned int dst_sz; - /** List of possible following items. */ - const enum rte_flow_item_type *const items; + /** List of possible subsequent items. */ + const enum rte_flow_item_type *const next_item; }; struct rte_flow_drop { @@ -130,13 +130,6 @@ struct rte_flow_drop { struct ibv_cq *cq; /**< Verbs completion queue. */ }; -/** Valid action for this PMD. */ -static const enum rte_flow_action_type valid_actions[] = { - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_END, -}; - /** * Convert Ethernet item to Verbs specification. * @@ -144,24 +137,23 @@ static const enum rte_flow_action_type valid_actions[] = { * Item specification. * @param default_mask[in] * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param flow[in, out] + * Flow rule handle to update. */ static int mlx4_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct rte_flow *flow) { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx4_flow *flow = (struct mlx4_flow *)data; struct ibv_flow_spec_eth *eth; const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); unsigned int i; ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 2; - eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); *eth = (struct ibv_flow_spec_eth) { .type = IBV_FLOW_SPEC_ETH, .size = eth_size, @@ -191,21 +183,21 @@ mlx4_flow_create_eth(const struct rte_flow_item *item, * Item specification. * @param default_mask[in] * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param flow[in, out] + * Flow rule handle to update. */ static int mlx4_flow_create_vlan(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct rte_flow *flow) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx4_flow *flow = (struct mlx4_flow *)data; struct ibv_flow_spec_eth *eth; const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); - eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size); + eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size - + eth_size); if (!spec) return 0; if (!mask) @@ -223,23 +215,22 @@ mlx4_flow_create_vlan(const struct rte_flow_item *item, * Item specification. * @param default_mask[in] * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param flow[in, out] + * Flow rule handle to update. */ static int mlx4_flow_create_ipv4(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct rte_flow *flow) { const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx4_flow *flow = (struct mlx4_flow *)data; struct ibv_flow_spec_ipv4 *ipv4; unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4); ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 1; - ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); *ipv4 = (struct ibv_flow_spec_ipv4) { .type = IBV_FLOW_SPEC_IPV4, .size = ipv4_size, @@ -269,23 +260,22 @@ mlx4_flow_create_ipv4(const struct rte_flow_item *item, * Item specification. * @param default_mask[in] * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param flow[in, out] + * Flow rule handle to update. */ static int mlx4_flow_create_udp(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct rte_flow *flow) { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx4_flow *flow = (struct mlx4_flow *)data; struct ibv_flow_spec_tcp_udp *udp; unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp); ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 0; - udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); *udp = (struct ibv_flow_spec_tcp_udp) { .type = IBV_FLOW_SPEC_UDP, .size = udp_size, @@ -311,23 +301,22 @@ mlx4_flow_create_udp(const struct rte_flow_item *item, * Item specification. * @param default_mask[in] * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param flow[in, out] + * Flow rule handle to update. */ static int mlx4_flow_create_tcp(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct rte_flow *flow) { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx4_flow *flow = (struct mlx4_flow *)data; struct ibv_flow_spec_tcp_udp *tcp; unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp); ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 0; - tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); *tcp = (struct ibv_flow_spec_tcp_udp) { .type = IBV_FLOW_SPEC_TCP, .size = tcp_size, @@ -485,14 +474,13 @@ mlx4_flow_validate_tcp(const struct rte_flow_item *item, } /** Graph of supported items and associated actions. */ -static const struct mlx4_flow_items mlx4_flow_items[] = { +static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = { [RTE_FLOW_ITEM_TYPE_END] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH), }, [RTE_FLOW_ITEM_TYPE_ETH] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, - RTE_FLOW_ITEM_TYPE_IPV4), - .actions = valid_actions, + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4), .mask = &(const struct rte_flow_item_eth){ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", @@ -504,15 +492,10 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { .dst_sz = sizeof(struct ibv_flow_spec_eth), }, [RTE_FLOW_ITEM_TYPE_VLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4), - .actions = valid_actions, + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4), .mask = &(const struct rte_flow_item_vlan){ - /* rte_flow_item_vlan_mask is invalid for mlx4. */ -#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN - .tci = 0x0fff, -#else - .tci = 0xff0f, -#endif + /* Only TCI VID matching is supported. */ + .tci = RTE_BE16(0x0fff), }, .mask_sz = sizeof(struct rte_flow_item_vlan), .validate = mlx4_flow_validate_vlan, @@ -520,13 +503,12 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { .dst_sz = 0, }, [RTE_FLOW_ITEM_TYPE_IPV4] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), - .actions = valid_actions, + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP), .mask = &(const struct rte_flow_item_ipv4){ .hdr = { - .src_addr = -1, - .dst_addr = -1, + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), }, }, .default_mask = &rte_flow_item_ipv4_mask, @@ -536,11 +518,10 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { .dst_sz = sizeof(struct ibv_flow_spec_ipv4), }, [RTE_FLOW_ITEM_TYPE_UDP] = { - .actions = valid_actions, .mask = &(const struct rte_flow_item_udp){ .hdr = { - .src_port = -1, - .dst_port = -1, + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), }, }, .default_mask = &rte_flow_item_udp_mask, @@ -550,11 +531,10 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), }, [RTE_FLOW_ITEM_TYPE_TCP] = { - .actions = valid_actions, .mask = &(const struct rte_flow_item_tcp){ .hdr = { - .src_port = -1, - .dst_port = -1, + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), }, }, .default_mask = &rte_flow_item_tcp_mask, @@ -572,14 +552,15 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { * Pointer to private structure. * @param[in] attr * Flow rule attributes. - * @param[in] items + * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. - * @param[in, out] flow - * Flow structure to update. + * @param[in, out] addr + * Buffer where the resulting flow rule handle pointer must be stored. + * If NULL, stop processing after validation stage. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. @@ -587,131 +568,161 @@ static const struct mlx4_flow_items mlx4_flow_items[] = { static int mlx4_flow_prepare(struct priv *priv, const struct rte_flow_attr *attr, - const struct rte_flow_item items[], + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error, - struct mlx4_flow *flow) + struct rte_flow **addr) { - const struct mlx4_flow_items *cur_item = mlx4_flow_items; - struct mlx4_flow_action action = { - .queue = 0, - .drop = 0, - }; + const struct rte_flow_item *item; + const struct rte_flow_action *action; + const struct mlx4_flow_proc_item *proc; + struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) }; + struct rte_flow *flow = &temp; + uint32_t priority_override = 0; - (void)priv; - if (attr->group) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "groups are not supported"); - return -rte_errno; - } - if (attr->priority) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - NULL, - "priorities are not supported"); - return -rte_errno; - } - if (attr->egress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); - return -rte_errno; - } - if (!attr->ingress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "only ingress is supported"); - return -rte_errno; - } - /* Go over items list. */ - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - const struct mlx4_flow_items *token = NULL; + if (attr->group) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, "groups are not supported"); + if (priv->isolated) + priority_override = attr->priority; + else if (attr->priority) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priorities are not supported outside isolated mode"); + if (attr->priority > MLX4_FLOW_PRIORITY_LAST) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, "maximum priority level is " + MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)); + if (attr->egress) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, "egress is not supported"); + if (!attr->ingress) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, "only ingress is supported"); +fill: + proc = mlx4_flow_proc_item_list; + /* Go over pattern. */ + for (item = pattern; item->type; ++item) { + const struct mlx4_flow_proc_item *next = NULL; unsigned int i; int err; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) continue; /* * The nic can support patterns with NULL eth spec only * if eth is a single item in a rule. */ - if (!items->spec && - items->type == RTE_FLOW_ITEM_TYPE_ETH) { - const struct rte_flow_item *next = items + 1; - - if (next->type != RTE_FLOW_ITEM_TYPE_END) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "the rule requires" - " an Ethernet spec"); - return -rte_errno; - } + if (!item->spec && item->type == RTE_FLOW_ITEM_TYPE_ETH) { + const struct rte_flow_item *next = item + 1; + + if (next->type) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "the rule requires an Ethernet spec"); } - for (i = 0; - cur_item->items && - cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; - ++i) { - if (cur_item->items[i] == items->type) { - token = &mlx4_flow_items[items->type]; + for (i = 0; proc->next_item && proc->next_item[i]; ++i) { + if (proc->next_item[i] == item->type) { + next = &mlx4_flow_proc_item_list[item->type]; break; } } - if (!token) + if (!next) goto exit_item_not_supported; - cur_item = token; - err = cur_item->validate(items, - (const uint8_t *)cur_item->mask, - cur_item->mask_sz); - if (err) - goto exit_item_not_supported; - if (flow->ibv_attr && cur_item->convert) { - err = cur_item->convert(items, - (cur_item->default_mask ? - cur_item->default_mask : - cur_item->mask), - flow); + proc = next; + /* Perform validation once, while handle is not allocated. */ + if (flow == &temp) { + err = proc->validate(item, proc->mask, proc->mask_sz); + if (err) + goto exit_item_not_supported; + } else if (proc->convert) { + err = proc->convert(item, + (proc->default_mask ? + proc->default_mask : + proc->mask), + flow); if (err) goto exit_item_not_supported; } - flow->offset += cur_item->dst_sz; + flow->ibv_attr_size += proc->dst_sz; } - /* Go over actions list */ - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { - if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { + /* Use specified priority level when in isolated mode. */ + if (priv->isolated && flow != &temp) + flow->ibv_attr->priority = priority_override; + /* Go over actions list. */ + for (action = actions; action->type; ++action) { + switch (action->type) { + const struct rte_flow_action_queue *queue; + + case RTE_FLOW_ACTION_TYPE_VOID: continue; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - action.drop = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - const struct rte_flow_action_queue *queue = - (const struct rte_flow_action_queue *) - actions->conf; - - if (!queue || (queue->index > (priv->rxqs_n - 1))) + case RTE_FLOW_ACTION_TYPE_DROP: + flow->drop = 1; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = action->conf; + if (queue->index >= priv->dev->data->nb_rx_queues) goto exit_action_not_supported; - action.queue = 1; - } else { + flow->queue = 1; + flow->queue_id = queue->index; + break; + default: goto exit_action_not_supported; } } - if (!action.queue && !action.drop) { - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "no valid action"); - return -rte_errno; + if (!flow->queue && !flow->drop) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "no valid action"); + /* Validation ends here. */ + if (!addr) + return 0; + if (flow == &temp) { + /* Allocate proper handle based on collected data. */ + const struct mlx4_malloc_vec vec[] = { + { + .align = alignof(struct rte_flow), + .size = sizeof(*flow), + .addr = (void **)&flow, + }, + { + .align = alignof(struct ibv_flow_attr), + .size = temp.ibv_attr_size, + .addr = (void **)&temp.ibv_attr, + }, + }; + + if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) + return rte_flow_error_set + (error, -rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "flow rule handle allocation failure"); + /* Most fields will be updated by second pass. */ + *flow = (struct rte_flow){ + .ibv_attr = temp.ibv_attr, + .ibv_attr_size = sizeof(*flow->ibv_attr), + }; + *flow->ibv_attr = (struct ibv_flow_attr){ + .type = IBV_FLOW_ATTR_NORMAL, + .size = sizeof(*flow->ibv_attr), + .port = priv->port, + }; + goto fill; } + *addr = flow; return 0; exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "item not supported"); exit_action_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "action not supported"); - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + action, "action not supported"); } /** @@ -723,14 +734,13 @@ exit_action_not_supported: static int mlx4_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item items[], + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) }; - return mlx4_flow_prepare(priv, attr, items, actions, error, &flow); + return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL); } /** @@ -807,58 +817,66 @@ err: } /** - * Complete flow rule creation. + * Toggle a configured flow rule. * * @param priv * Pointer to private structure. - * @param ibv_attr - * Verbs flow attributes. - * @param action - * Target action structure. + * @param flow + * Flow rule handle to toggle. + * @param enable + * Whether associated Verbs flow must be created or removed. * @param[out] error * Perform verbose error reporting if not NULL. * * @return - * A flow if the rule could be created. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static struct rte_flow * -mlx4_flow_create_action_queue(struct priv *priv, - struct ibv_flow_attr *ibv_attr, - struct mlx4_flow_action *action, - struct rte_flow_error *error) +static int +mlx4_flow_toggle(struct priv *priv, + struct rte_flow *flow, + int enable, + struct rte_flow_error *error) { - struct ibv_qp *qp; - struct rte_flow *rte_flow; - - assert(priv->pd); - assert(priv->ctx); - rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); - if (!rte_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate flow memory"); - return NULL; + struct ibv_qp *qp = NULL; + const char *msg; + int err; + + if (!enable) { + if (!flow->ibv_flow) + return 0; + claim_zero(ibv_destroy_flow(flow->ibv_flow)); + flow->ibv_flow = NULL; + return 0; } - if (action->drop) { - qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL; - } else { - struct rxq *rxq = (*priv->rxqs)[action->queue_id]; + if (flow->ibv_flow) + return 0; + assert(flow->queue ^ flow->drop); + if (flow->queue) { + struct rxq *rxq; + assert(flow->queue_id < priv->dev->data->nb_rx_queues); + rxq = priv->dev->data->rx_queues[flow->queue_id]; + if (!rxq) { + err = EINVAL; + msg = "target queue must be configured first"; + goto error; + } qp = rxq->qp; - rte_flow->qp = qp; } - rte_flow->ibv_attr = ibv_attr; - if (!priv->started) - return rte_flow; - rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr); - if (!rte_flow->ibv_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - goto error; + if (flow->drop) { + assert(priv->flow_drop_queue); + qp = priv->flow_drop_queue->qp; } - return rte_flow; + assert(qp); + assert(flow->ibv_attr); + flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr); + if (flow->ibv_flow) + return 0; + err = errno; + msg = "flow rule rejected by device"; error: - rte_free(rte_flow); - return NULL; + return rte_flow_error_set + (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg); } /** @@ -870,67 +888,25 @@ error: static struct rte_flow * mlx4_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - const struct rte_flow_item items[], + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - struct rte_flow *rte_flow; - struct mlx4_flow_action action; - struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), }; + struct rte_flow *flow; int err; - err = mlx4_flow_prepare(priv, attr, items, actions, error, &flow); + err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow); if (err) return NULL; - flow.ibv_attr = rte_malloc(__func__, flow.offset, 0); - if (!flow.ibv_attr) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate ibv_attr memory"); - return NULL; + err = mlx4_flow_toggle(priv, flow, priv->started, error); + if (!err) { + LIST_INSERT_HEAD(&priv->flows, flow, next); + return flow; } - flow.offset = sizeof(struct ibv_flow_attr); - *flow.ibv_attr = (struct ibv_flow_attr){ - .comp_mask = 0, - .type = IBV_FLOW_ATTR_NORMAL, - .size = sizeof(struct ibv_flow_attr), - .priority = attr->priority, - .num_of_specs = 0, - .port = priv->port, - .flags = 0, - }; - claim_zero(mlx4_flow_prepare(priv, attr, items, actions, - error, &flow)); - action = (struct mlx4_flow_action){ - .queue = 0, - .drop = 0, - }; - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { - if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { - continue; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - action.queue = 1; - action.queue_id = - ((const struct rte_flow_action_queue *) - actions->conf)->index; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - action.drop = 1; - } else { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, "unsupported action"); - goto exit; - } - } - rte_flow = mlx4_flow_create_action_queue(priv, flow.ibv_attr, - &action, error); - if (rte_flow) { - LIST_INSERT_HEAD(&priv->flows, rte_flow, next); - DEBUG("Flow created %p", (void *)rte_flow); - return rte_flow; - } -exit: - rte_free(flow.ibv_attr); + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + error->message); + rte_free(flow); return NULL; } @@ -954,15 +930,15 @@ mlx4_flow_isolate(struct rte_eth_dev *dev, mlx4_mac_addr_del(priv); } else if (mlx4_mac_addr_add(priv) < 0) { priv->isolated = 1; - return -rte_flow_error_set(error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot leave isolated mode"); + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot leave isolated mode"); } return 0; } /** - * Destroy a flow. + * Destroy a flow rule. * * @see rte_flow_destroy() * @see rte_flow_ops @@ -972,19 +948,18 @@ mlx4_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - (void)dev; - (void)error; + struct priv *priv = dev->data->dev_private; + int err = mlx4_flow_toggle(priv, flow, 0, error); + + if (err) + return err; LIST_REMOVE(flow, next); - if (flow->ibv_flow) - claim_zero(ibv_destroy_flow(flow->ibv_flow)); - rte_free(flow->ibv_attr); - DEBUG("Flow destroyed %p", (void *)flow); rte_free(flow); return 0; } /** - * Destroy all flows. + * Destroy all flow rules. * * @see rte_flow_flush() * @see rte_flow_ops @@ -1005,9 +980,7 @@ mlx4_flow_flush(struct rte_eth_dev *dev, } /** - * Remove all flows. - * - * Called by dev_stop() to remove all flows. + * Disable flow rules. * * @param priv * Pointer to private structure. @@ -1020,27 +993,24 @@ mlx4_flow_stop(struct priv *priv) for (flow = LIST_FIRST(&priv->flows); flow; flow = LIST_NEXT(flow, next)) { - claim_zero(ibv_destroy_flow(flow->ibv_flow)); - flow->ibv_flow = NULL; - DEBUG("Flow %p removed", (void *)flow); + claim_zero(mlx4_flow_toggle(priv, flow, 0, NULL)); } mlx4_flow_destroy_drop_queue(priv); } /** - * Add all flows. + * Enable flow rules. * * @param priv * Pointer to private structure. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx4_flow_start(struct priv *priv) { int ret; - struct ibv_qp *qp; struct rte_flow *flow; ret = mlx4_flow_create_drop_queue(priv); @@ -1049,14 +1019,11 @@ mlx4_flow_start(struct priv *priv) for (flow = LIST_FIRST(&priv->flows); flow; flow = LIST_NEXT(flow, next)) { - qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp; - flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr); - if (!flow->ibv_flow) { - DEBUG("Flow %p cannot be applied", (void *)flow); - rte_errno = EINVAL; - return rte_errno; + ret = mlx4_flow_toggle(priv, flow, 1, NULL); + if (unlikely(ret)) { + mlx4_flow_stop(priv); + return ret; } - DEBUG("Flow %p applied", (void *)flow); } return 0; }