X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=e789c4c8b107dd48cdce24c7db30d964e522da60;hb=028761059aeb673fd5ef1f848ea439a97644a7af;hp=a885f6e9b1b1c35aed2a91a2bdbb8740ce577d36;hpb=63e5f0e7518f79c9b69ee4f4085f7cc5c727080a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index a885f6e9b1..e789c4c8b1 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -52,6 +52,9 @@ #include "mlx5.h" #include "mlx5_prm.h" +/* Number of Work Queue necessary for the DROP queue. */ +#define MLX5_DROP_WQ_N 4 + static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, @@ -95,8 +98,11 @@ struct rte_flow { struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */ struct ibv_exp_wq *wq; /**< Verbs work queue. */ struct ibv_cq *cq; /**< Verbs completion queue. */ - struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */ + struct rxq *(*rxqs)[]; /**< Pointer to the queues array. */ + uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */ uint32_t mark:1; /**< Set if the flow is marked. */ + uint32_t drop:1; /**< Drop queue. */ + uint64_t hash_fields; /**< Fields that participate in the hash. */ }; /** Static initializer for items. */ @@ -146,6 +152,7 @@ static const enum rte_flow_action_type valid_actions[] = { RTE_FLOW_ACTION_TYPE_DROP, RTE_FLOW_ACTION_TYPE_QUEUE, RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, RTE_FLOW_ACTION_TYPE_END, }; @@ -272,14 +279,24 @@ struct mlx5_flow { struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */ uint32_t inner; /**< Set once VXLAN is encountered. */ + uint64_t hash_fields; /**< Fields that participate in the hash. */ +}; + +/** Structure for Drop queue. */ +struct rte_flow_drop { + struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ + struct ibv_qp *qp; /**< Verbs queue pair. */ + struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */ + struct ibv_cq *cq; /**< Verbs completion queue. */ }; struct mlx5_flow_action { uint32_t queue:1; /**< Target is a receive queue. */ uint32_t drop:1; /**< Target is a drop queue. */ uint32_t mark:1; /**< Mark is present in the flow. */ - uint32_t queue_id; /**< Identifier of the queue. */ uint32_t mark_id; /**< Mark identifier. */ + uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ + uint16_t queues_n; /**< Number of entries in queue[]. */ }; /** @@ -452,10 +469,56 @@ priv_flow_validate(struct priv *priv, const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; + uint16_t n; + uint16_t found = 0; if (!queue || (queue->index > (priv->rxqs_n - 1))) goto exit_action_not_supported; + for (n = 0; n < action.queues_n; ++n) { + if (action.queues[n] == queue->index) { + found = 1; + break; + } + } + if (action.queues_n && !found) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "queue action not in RSS queues"); + return -rte_errno; + } action.queue = 1; + action.queues_n = 1; + action.queues[0] = queue->index; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + const struct rte_flow_action_rss *rss = + (const struct rte_flow_action_rss *) + actions->conf; + uint16_t n; + + if (action.queues_n == 1) { + uint16_t found = 0; + + assert(action.queues_n); + for (n = 0; n < rss->num; ++n) { + if (action.queues[0] == rss->queue[n]) { + found = 1; + break; + } + } + if (!found) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "queue action not in RSS" + " queues"); + return -rte_errno; + } + } + action.queue = 1; + for (n = 0; n < rss->num; ++n) + action.queues[n] = rss->queue[n]; + action.queues_n = rss->num; } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) @@ -476,6 +539,8 @@ priv_flow_validate(struct priv *priv, return -rte_errno; } action.mark = 1; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { + action.mark = 1; } else { goto exit_action_not_supported; } @@ -545,6 +610,7 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 2; + flow->hash_fields = 0; eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset); *eth = (struct ibv_exp_flow_spec_eth) { .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH, @@ -624,6 +690,8 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 1; + flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | + IBV_EXP_RX_HASH_DST_IPV4); ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset); *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) { .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT, @@ -676,6 +744,8 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 1; + flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6); ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset); *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) { .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT, @@ -725,6 +795,8 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 0; + flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP); udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); *udp = (struct ibv_exp_flow_spec_tcp_udp) { .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP, @@ -767,6 +839,8 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, ++flow->ibv_attr->num_of_specs; flow->ibv_attr->priority = 0; + flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP); tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); *tcp = (struct ibv_exp_flow_spec_tcp_udp) { .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP, @@ -857,13 +931,60 @@ mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id) return 0; } +/** + * Complete flow rule creation with a drop queue. + * + * @param priv + * Pointer to private structure. + * @param flow + * MLX5 flow attributes (filled by mlx5_flow_validate()). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A flow if the rule could be created. + */ +static struct rte_flow * +priv_flow_create_action_queue_drop(struct priv *priv, + struct mlx5_flow *flow, + struct rte_flow_error *error) +{ + struct rte_flow *rte_flow; + + assert(priv->pd); + assert(priv->ctx); + rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); + if (!rte_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate flow memory"); + return NULL; + } + rte_flow->drop = 1; + rte_flow->ibv_attr = flow->ibv_attr; + rte_flow->qp = priv->flow_drop_queue->qp; + if (!priv->started) + return rte_flow; + rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp, + rte_flow->ibv_attr); + if (!rte_flow->ibv_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "flow rule creation failure"); + goto error; + } + return rte_flow; +error: + assert(rte_flow); + rte_free(rte_flow); + return NULL; +} + /** * Complete flow rule creation. * * @param priv * Pointer to private structure. - * @param ibv_attr - * Verbs flow attributes. + * @param flow + * MLX5 flow attributes (filled by mlx5_flow_validate()). * @param action * Target action structure. * @param[out] error @@ -874,62 +995,55 @@ mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id) */ static struct rte_flow * priv_flow_create_action_queue(struct priv *priv, - struct ibv_exp_flow_attr *ibv_attr, + struct mlx5_flow *flow, struct mlx5_flow_action *action, struct rte_flow_error *error) { - struct rxq_ctrl *rxq; struct rte_flow *rte_flow; + unsigned int i; + unsigned int j; + const unsigned int wqs_n = 1 << log2above(action->queues_n); + struct ibv_exp_wq *wqs[wqs_n]; assert(priv->pd); assert(priv->ctx); - rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); + assert(!action->drop); + rte_flow = rte_calloc(__func__, 1, + sizeof(*rte_flow) + sizeof(struct rxq *) * + action->queues_n, 0); if (!rte_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot allocate flow memory"); return NULL; } - if (action->drop) { - rte_flow->cq = - ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0, - &(struct ibv_exp_cq_init_attr){ - .comp_mask = 0, - }); - if (!rte_flow->cq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate CQ"); - goto error; - } - rte_flow->wq = ibv_exp_create_wq(priv->ctx, - &(struct ibv_exp_wq_init_attr){ - .wq_type = IBV_EXP_WQT_RQ, - .max_recv_wr = 1, - .max_recv_sge = 1, - .pd = priv->pd, - .cq = rte_flow->cq, - }); - if (!rte_flow->wq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate WQ"); - goto error; - } - } else { - rxq = container_of((*priv->rxqs)[action->queue_id], + rte_flow->rxqs = (struct rxq *(*)[])((uintptr_t)rte_flow + + sizeof(struct rxq *) * + action->queues_n); + for (i = 0; i < action->queues_n; ++i) { + struct rxq_ctrl *rxq; + + rxq = container_of((*priv->rxqs)[action->queues[i]], struct rxq_ctrl, rxq); - rte_flow->rxq = &rxq->rxq; + wqs[i] = rxq->wq; + (*rte_flow->rxqs)[i] = &rxq->rxq; + ++rte_flow->rxqs_n; rxq->rxq.mark |= action->mark; - rte_flow->wq = rxq->wq; + } + /* finalise indirection table. */ + for (j = 0; i < wqs_n; ++i, ++j) { + wqs[i] = wqs[j]; + if (j == action->queues_n) + j = 0; } rte_flow->mark = action->mark; - rte_flow->ibv_attr = ibv_attr; + rte_flow->ibv_attr = flow->ibv_attr; + rte_flow->hash_fields = flow->hash_fields; rte_flow->ind_table = ibv_exp_create_rwq_ind_table( priv->ctx, &(struct ibv_exp_rwq_ind_table_init_attr){ .pd = priv->pd, - .log_ind_tbl_size = 0, - .ind_tbl = &rte_flow->wq, + .log_ind_tbl_size = log2above(action->queues_n), + .ind_tbl = wqs, .comp_mask = 0, }); if (!rte_flow->ind_table) { @@ -951,7 +1065,7 @@ priv_flow_create_action_queue(struct priv *priv, IBV_EXP_RX_HASH_FUNC_TOEPLITZ, .rx_hash_key_len = rss_hash_default_key_len, .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, + .rx_hash_fields_mask = rte_flow->hash_fields, .rwq_ind_tbl = rte_flow->ind_table, }, .port_num = priv->port, @@ -977,10 +1091,6 @@ error: ibv_destroy_qp(rte_flow->qp); if (rte_flow->ind_table) ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table); - if (!rte_flow->rxq && rte_flow->wq) - ibv_exp_destroy_wq(rte_flow->wq); - if (!rte_flow->rxq && rte_flow->cq) - ibv_destroy_cq(rte_flow->cq); rte_free(rte_flow); return NULL; } @@ -1034,6 +1144,7 @@ priv_flow_create(struct priv *priv, .reserved = 0, }; flow.inner = 0; + flow.hash_fields = 0; claim_zero(priv_flow_validate(priv, attr, items, actions, error, &flow)); action = (struct mlx5_flow_action){ @@ -1047,9 +1158,19 @@ priv_flow_create(struct priv *priv, continue; } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { action.queue = 1; - action.queue_id = + action.queues[action.queues_n++] = ((const struct rte_flow_action_queue *) actions->conf)->index; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + const struct rte_flow_action_rss *rss = + (const struct rte_flow_action_rss *) + actions->conf; + uint16_t n; + + action.queue = 1; + action.queues_n = rss->num; + for (n = 0; n < rss->num; ++n) + action.queues[n] = rss->queue[n]; } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { action.drop = 1; action.mark = 0; @@ -1061,6 +1182,8 @@ priv_flow_create(struct priv *priv, if (mark) action.mark_id = mark->id; action.mark = !action.drop; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { + action.mark = 1; } else { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1072,8 +1195,12 @@ priv_flow_create(struct priv *priv, mlx5_flow_create_flag_mark(&flow, action.mark_id); flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag); } - rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr, - &action, error); + if (action.drop) + rte_flow = + priv_flow_create_action_queue_drop(priv, &flow, error); + else + rte_flow = priv_flow_create_action_queue(priv, &flow, &action, + error); if (!rte_flow) goto exit; return rte_flow; @@ -1124,26 +1251,49 @@ priv_flow_destroy(struct priv *priv, LIST_REMOVE(flow, next); if (flow->ibv_flow) claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); + if (flow->drop) + goto free; if (flow->qp) claim_zero(ibv_destroy_qp(flow->qp)); if (flow->ind_table) claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table)); - if (!flow->rxq && flow->wq) + if (flow->drop && flow->wq) claim_zero(ibv_exp_destroy_wq(flow->wq)); - if (!flow->rxq && flow->cq) + if (flow->drop && flow->cq) claim_zero(ibv_destroy_cq(flow->cq)); if (flow->mark) { struct rte_flow *tmp; + struct rxq *rxq; uint32_t mark_n = 0; - - for (tmp = LIST_FIRST(&priv->flows); - tmp; - tmp = LIST_NEXT(tmp, next)) { - if ((flow->rxq == tmp->rxq) && tmp->mark) - ++mark_n; + uint32_t queue_n; + + /* + * To remove the mark from the queue, the queue must not be + * present in any other marked flow (RSS or not). + */ + for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) { + rxq = (*flow->rxqs)[queue_n]; + for (tmp = LIST_FIRST(&priv->flows); + tmp; + tmp = LIST_NEXT(tmp, next)) { + uint32_t tqueue_n; + + if (tmp->drop) + continue; + for (tqueue_n = 0; + tqueue_n < tmp->rxqs_n; + ++tqueue_n) { + struct rxq *trxq; + + trxq = (*tmp->rxqs)[tqueue_n]; + if (rxq == trxq) + ++mark_n; + } + } + rxq->mark = !!mark_n; } - flow->rxq->mark = !!mark_n; } +free: rte_free(flow->ibv_attr); DEBUG("Flow destroyed %p", (void *)flow); rte_free(flow); @@ -1205,6 +1355,125 @@ mlx5_flow_flush(struct rte_eth_dev *dev, return 0; } +/** + * Create drop queue. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success. + */ +static int +priv_flow_create_drop_queue(struct priv *priv) +{ + struct rte_flow_drop *fdq = NULL; + unsigned int i; + + assert(priv->pd); + assert(priv->ctx); + fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); + if (!fdq) { + WARN("cannot allocate memory for drop queue"); + goto error; + } + fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0, + &(struct ibv_exp_cq_init_attr){ + .comp_mask = 0, + }); + if (!fdq->cq) { + WARN("cannot allocate CQ for drop queue"); + goto error; + } + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + fdq->wqs[i] = ibv_exp_create_wq(priv->ctx, + &(struct ibv_exp_wq_init_attr){ + .wq_type = IBV_EXP_WQT_RQ, + .max_recv_wr = 1, + .max_recv_sge = 1, + .pd = priv->pd, + .cq = fdq->cq, + }); + if (!fdq->wqs[i]) { + WARN("cannot allocate WQ for drop queue"); + goto error; + } + } + fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, + &(struct ibv_exp_rwq_ind_table_init_attr){ + .pd = priv->pd, + .log_ind_tbl_size = 0, + .ind_tbl = fdq->wqs, + .comp_mask = 0, + }); + if (!fdq->ind_table) { + WARN("cannot allocate indirection table for drop queue"); + goto error; + } + fdq->qp = ibv_exp_create_qp(priv->ctx, + &(struct ibv_exp_qp_init_attr){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_PORT | + IBV_EXP_QP_INIT_ATTR_RX_HASH, + .pd = priv->pd, + .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){ + .rx_hash_function = + IBV_EXP_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_hash_default_key_len, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + .rwq_ind_tbl = fdq->ind_table, + }, + .port_num = priv->port, + }); + if (!fdq->qp) { + WARN("cannot allocate QP for drop queue"); + goto error; + } + priv->flow_drop_queue = fdq; + return 0; +error: + if (fdq->qp) + claim_zero(ibv_destroy_qp(fdq->qp)); + if (fdq->ind_table) + claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + if (fdq->wqs[i]) + claim_zero(ibv_exp_destroy_wq(fdq->wqs[i])); + } + if (fdq->cq) + claim_zero(ibv_destroy_cq(fdq->cq)); + if (fdq) + rte_free(fdq); + priv->flow_drop_queue = NULL; + return -1; +} + +/** + * Delete drop queue. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_flow_delete_drop_queue(struct priv *priv) +{ + struct rte_flow_drop *fdq = priv->flow_drop_queue; + unsigned int i; + + claim_zero(ibv_destroy_qp(fdq->qp)); + claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + assert(fdq->wqs[i]); + claim_zero(ibv_exp_destroy_wq(fdq->wqs[i])); + } + claim_zero(ibv_destroy_cq(fdq->cq)); + rte_free(fdq); + priv->flow_drop_queue = NULL; +} + /** * Remove all flows. * @@ -1223,10 +1492,15 @@ priv_flow_stop(struct priv *priv) flow = LIST_NEXT(flow, next)) { claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); flow->ibv_flow = NULL; - if (flow->mark) - flow->rxq->mark = 0; + if (flow->mark) { + unsigned int n; + + for (n = 0; n < flow->rxqs_n; ++n) + (*flow->rxqs)[n]->mark = 0; + } DEBUG("Flow %p removed", (void *)flow); } + priv_flow_delete_drop_queue(priv); } /** @@ -1241,21 +1515,34 @@ priv_flow_stop(struct priv *priv) int priv_flow_start(struct priv *priv) { + int ret; struct rte_flow *flow; + ret = priv_flow_create_drop_queue(priv); + if (ret) + return -1; for (flow = LIST_FIRST(&priv->flows); flow; flow = LIST_NEXT(flow, next)) { - flow->ibv_flow = ibv_exp_create_flow(flow->qp, - flow->ibv_attr); + struct ibv_qp *qp; + + if (flow->drop) + qp = priv->flow_drop_queue->qp; + else + qp = flow->qp; + flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr); if (!flow->ibv_flow) { DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; return rte_errno; } DEBUG("Flow %p applied", (void *)flow); - if (flow->rxq) - flow->rxq->mark |= flow->mark; + if (flow->mark) { + unsigned int n; + + for (n = 0; n < flow->rxqs_n; ++n) + (*flow->rxqs)[n]->mark = 1; + } } return 0; }