+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ */
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
+{
+ while (!TAILQ_EMPTY(list)) {
+ struct rte_flow *flow;
+
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
+ }
+}
+
+/**
+ * Create drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+priv_flow_create_drop_queue(struct priv *priv)
+{
+ struct mlx5_hrxq_drop *fdq = NULL;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ WARN("cannot allocate memory for drop queue");
+ goto error;
+ }
+ fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!fdq->cq) {
+ WARN("cannot allocate CQ for drop queue");
+ goto error;
+ }
+ fdq->wq = ibv_create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wq) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &fdq->wq,
+ .comp_mask = 0,
+ });
+ if (!fdq->ind_table) {
+ WARN("cannot allocate indirection table for drop queue");
+ goto error;
+ }
+ fdq->qp = ibv_create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ },
+ .rwq_ind_tbl = fdq->ind_table,
+ .pd = priv->pd
+ });
+ if (!fdq->qp) {
+ WARN("cannot allocate QP for drop queue");
+ goto error;
+ }
+ priv->flow_drop_queue = fdq;
+ return 0;
+error:
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ if (fdq)
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+ return -1;
+}
+
+/**
+ * Delete drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_flow_delete_drop_queue(struct priv *priv)
+{
+ struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
+
+ if (!fdq)
+ return;
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+}
+
+/**
+ * Remove all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ */
+void
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
+{
+ struct rte_flow *flow;
+
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ unsigned int i;
+
+ if (flow->drop) {
+ if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow
+ (flow->frxq[HASH_RXQ_ETH].ibv_flow));
+ flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
+ /* Next flow. */
+ continue;
+ }
+ if (flow->mark) {
+ struct mlx5_ind_table_ibv *ind_tbl = NULL;
+
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].hrxq)
+ continue;
+ ind_tbl = flow->frxq[i].hrxq->ind_table;
+ }
+ assert(ind_tbl);
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+ flow->frxq[i].ibv_flow = NULL;
+ mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ flow->frxq[i].hrxq = NULL;
+ }
+ DEBUG("Flow %p removed", (void *)flow);
+ }
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
+{
+ struct rte_flow *flow;
+
+ TAILQ_FOREACH(flow, list, next) {
+ unsigned int i;
+
+ if (flow->drop) {
+ flow->frxq[HASH_RXQ_ETH].ibv_flow =
+ ibv_create_flow
+ (priv->flow_drop_queue->qp,
+ flow->frxq[HASH_RXQ_ETH].ibv_attr);
+ if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ /* Next flow. */
+ continue;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_attr)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq[i].hrxq)
+ goto flow_create;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ }
+ if (!flow->mark)
+ continue;
+ for (i = 0; i != flow->queues_n; ++i)
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
+ }
+ return 0;
+}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ unsigned int i;
+ union {
+ struct rte_flow_action_rss rss;
+ struct {
+ const struct rte_eth_rss_conf *rss_conf;
+ uint16_t num;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ } local;
+ } action_rss;
+
+ if (!priv->reta_idx_n)
+ return EINVAL;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ action_rss.local.queue[i] = (*priv->reta_idx)[i];
+ action_rss.local.rss_conf = &priv->rss_conf;
+ action_rss.local.num = priv->reta_idx_n;
+ actions[0].conf = (const void *)&action_rss.rss;
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+ &error);
+ if (!flow)
+ return rte_errno;
+ return 0;
+}
+
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
+