From: Vasily Philipov Date: Wed, 5 Jul 2017 08:14:10 +0000 (+0300) Subject: net/mlx4: refactor RSS parent queue allocation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=ff00a0dc5600dbb0a29e4aa7fa4b078f98c7a360;p=dpdk.git net/mlx4: refactor RSS parent queue allocation A special "parent" queue must be allocated in addition to a group of standard Rx queues for RSS to work. This is done automatically outside of isolated mode by the PMD when applications request several Rx queues. Since each configured flow rule with the RSS action may target a different set of queues, the PMD must have the ability to dynamically allocate several parent queues, one per RSS group. If isolated mode was requested the default RSS parent queue isn't created in this case. Refactor RSS parent queue allocations (currently limited to a single parent) in preparation for flow API RSS action support. Signed-off-by: Vasily Philipov Acked-by: Adrien Mazarguil --- diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index cd828cb70b..e03ce5f03b 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -546,12 +546,92 @@ txq_cleanup(struct txq *txq); static int rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, - unsigned int socket, int inactive, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); + unsigned int socket, int inactive, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp, int children_n, + struct rxq *rxq_parent); static void rxq_cleanup(struct rxq *rxq); +/** + * Create RSS parent queue. + * + * The new parent is inserted in front of the list in the private structure. + * + * @param priv + * Pointer to private structure. + * @param queues + * Queues indices array, if NULL use all Rx queues. + * @param children_n + * The number of entries in queues[]. + * + * @return + * Pointer to a parent rxq structure, NULL on failure. + */ +static struct rxq * +priv_parent_create(struct priv *priv, + uint16_t queues[], + uint16_t children_n) +{ + int ret; + uint16_t i; + struct rxq *parent; + + parent = rte_zmalloc("parent queue", + sizeof(*parent), + RTE_CACHE_LINE_SIZE); + if (!parent) { + ERROR("cannot allocate memory for RSS parent queue"); + return NULL; + } + ret = rxq_setup(priv->dev, parent, 0, 0, 0, + NULL, NULL, children_n, NULL); + if (ret) { + rte_free(parent); + return NULL; + } + parent->rss.queues_n = children_n; + if (queues) { + for (i = 0; i < children_n; ++i) + parent->rss.queues[i] = queues[i]; + } else { + /* the default RSS ring case */ + assert(priv->rxqs_n == children_n); + for (i = 0; i < priv->rxqs_n; ++i) + parent->rss.queues[i] = i; + } + LIST_INSERT_HEAD(&priv->parents, parent, next); + return parent; +} + +/** + * Clean up RX queue parent structure. + * + * @param parent + * RX queue parent structure. + */ +void +rxq_parent_cleanup(struct rxq *parent) +{ + LIST_REMOVE(parent, next); + rxq_cleanup(parent); + rte_free(parent); +} + +/** + * Clean up parent structures from the parent list. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_parent_list_cleanup(struct priv *priv) +{ + while (!LIST_EMPTY(&priv->parents)) + rxq_parent_cleanup(LIST_FIRST(&priv->parents)); +} + /** * Ethernet device configuration. * @@ -571,7 +651,6 @@ dev_configure(struct rte_eth_dev *dev) unsigned int rxqs_n = dev->data->nb_rx_queues; unsigned int txqs_n = dev->data->nb_tx_queues; unsigned int tmp; - int ret; priv->rxqs = (void *)dev->data->rx_queues; priv->txqs = (void *)dev->data->tx_queues; @@ -601,7 +680,7 @@ dev_configure(struct rte_eth_dev *dev) for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] != NULL) return EINVAL; - rxq_cleanup(&priv->rxq_parent); + priv_parent_list_cleanup(priv); priv->rss = 0; priv->rxqs_n = 0; } @@ -626,14 +705,16 @@ dev_configure(struct rte_eth_dev *dev) priv->rss = 1; tmp = priv->rxqs_n; priv->rxqs_n = rxqs_n; - ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, 0, NULL, NULL); - if (!ret) + if (priv->isolated) { + priv->rss = 0; + return 0; + } + if (priv_parent_create(priv, NULL, priv->rxqs_n)) return 0; /* Failure, rollback. */ priv->rss = 0; priv->rxqs_n = tmp; - assert(ret > 0); - return ret; + return ENOMEM; } /** @@ -2517,7 +2598,7 @@ priv_mac_addr_del(struct priv *priv, unsigned int mac_index) if (!BITFIELD_ISSET(priv->mac_configured, mac_index)) return; if (priv->rss) { - rxq_mac_addr_del(&priv->rxq_parent, mac_index); + rxq_mac_addr_del(LIST_FIRST(&priv->parents), mac_index); goto end; } for (i = 0; (i != priv->dev->data->nb_rx_queues); ++i) @@ -2584,7 +2665,7 @@ priv_mac_addr_add(struct priv *priv, unsigned int mac_index, goto end; } if (priv->rss) { - ret = rxq_mac_addr_add(&priv->rxq_parent, mac_index); + ret = rxq_mac_addr_add(LIST_FIRST(&priv->parents), mac_index); if (ret) return ret; goto end; @@ -3354,15 +3435,18 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc, * Completion queue to associate with QP. * @param desc * Number of descriptors in QP (hint only). - * @param parent - * If nonzero, create a parent QP, otherwise a child. + * @param children_n + * If nonzero, a number of children for parent QP and zero for a child. + * @param rxq_parent + * Pointer for a parent in a child case, NULL otherwise. * * @return * QP pointer or NULL in case of error. */ static struct ibv_qp * rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc, - int parent, struct ibv_exp_res_domain *rd) + int children_n, struct ibv_exp_res_domain *rd, + struct rxq *rxq_parent) { struct ibv_exp_qp_init_attr attr = { /* CQ to be associated with the send queue. */ @@ -3392,16 +3476,16 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc, attr.max_inl_recv = priv->inl_recv_size, attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV; #endif - if (parent) { + if (children_n > 0) { attr.qpg.qpg_type = IBV_EXP_QPG_PARENT; /* TSS isn't necessary. */ attr.qpg.parent_attrib.tss_child_count = 0; attr.qpg.parent_attrib.rss_child_count = - rte_align32pow2(priv->rxqs_n + 1) >> 1; + rte_align32pow2(children_n + 1) >> 1; DEBUG("initializing parent RSS queue"); } else { attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX; - attr.qpg.qpg_parent = priv->rxq_parent.qp; + attr.qpg.qpg_parent = rxq_parent->qp; DEBUG("initializing child RSS queue"); } return ibv_exp_create_qp(priv->ctx, &attr); @@ -3437,13 +3521,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) struct ibv_recv_wr *bad_wr; unsigned int mb_len; int err; - int parent = (rxq == &priv->rxq_parent); - if (parent) { - ERROR("%p: cannot rehash parent queue %p", - (void *)dev, (void *)rxq); - return EINVAL; - } mb_len = rte_pktmbuf_data_room_size(rxq->mp); DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); /* Number of descriptors and mbufs currently allocated. */ @@ -3488,6 +3566,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) } /* From now on, any failure will render the queue unusable. * Reinitialize QP. */ + if (!tmpl.qp) + goto skip_init; mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET }; err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE); if (err) { @@ -3495,12 +3575,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) assert(err > 0); return err; } - err = ibv_resize_cq(tmpl.cq, desc_n); - if (err) { - ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err)); - assert(err > 0); - return err; - } mod = (struct ibv_exp_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, @@ -3509,9 +3583,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) }; err = ibv_exp_modify_qp(tmpl.qp, &mod, (IBV_EXP_QP_STATE | -#ifdef RSS_SUPPORT - (parent ? IBV_EXP_QP_GROUP_RSS : 0) | -#endif /* RSS_SUPPORT */ IBV_EXP_QP_PORT)); if (err) { ERROR("%p: QP state to IBV_QPS_INIT failed: %s", @@ -3519,6 +3590,13 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) assert(err > 0); return err; }; +skip_init: + err = ibv_resize_cq(tmpl.cq, desc_n); + if (err) { + ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err)); + assert(err > 0); + return err; + } /* Reconfigure flows. Do not care for errors. */ if (!priv->rss && !priv->isolated) { rxq_mac_addrs_add(&tmpl); @@ -3586,6 +3664,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) rxq->elts_n = 0; rte_free(rxq->elts.sp); rxq->elts.sp = NULL; + if (!tmpl.qp) + goto skip_rtr; /* Post WRs. */ err = ibv_post_recv(tmpl.qp, (tmpl.sp ? @@ -3612,6 +3692,116 @@ skip_rtr: return err; } +/** + * Create verbs QP resources associated with a rxq. + * + * @param rxq + * Pointer to RX queue structure. + * @param desc + * Number of descriptors to configure in queue. + * @param inactive + * If true, the queue is disabled because its index is higher or + * equal to the real number of queues, which must be a power of 2. + * @param children_n + * The number of children in a parent case, zero for a child. + * @param rxq_parent + * The pointer to a parent RX structure for a child in RSS case, + * NULL for parent. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_create_qp(struct rxq *rxq, + uint16_t desc, + int inactive, + int children_n, + struct rxq *rxq_parent) +{ + int ret; + struct ibv_exp_qp_attr mod; + struct ibv_exp_query_intf_params params; + enum ibv_exp_query_intf_status status; + struct ibv_recv_wr *bad_wr; + int parent = (children_n > 0); + struct priv *priv = rxq->priv; + +#ifdef RSS_SUPPORT + if (priv->rss && !inactive && (rxq_parent || parent)) + rxq->qp = rxq_setup_qp_rss(priv, rxq->cq, desc, + children_n, rxq->rd, + rxq_parent); + else +#endif /* RSS_SUPPORT */ + rxq->qp = rxq_setup_qp(priv, rxq->cq, desc, rxq->rd); + if (rxq->qp == NULL) { + ret = (errno ? errno : EINVAL); + ERROR("QP creation failure: %s", + strerror(ret)); + return ret; + } + mod = (struct ibv_exp_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + ret = ibv_exp_modify_qp(rxq->qp, &mod, + (IBV_EXP_QP_STATE | +#ifdef RSS_SUPPORT + (parent ? IBV_EXP_QP_GROUP_RSS : 0) | +#endif /* RSS_SUPPORT */ + IBV_EXP_QP_PORT)); + if (ret) { + ERROR("QP state to IBV_QPS_INIT failed: %s", + strerror(ret)); + return ret; + } + if (!priv->isolated && (parent || !priv->rss)) { + /* Configure MAC and broadcast addresses. */ + ret = rxq_mac_addrs_add(rxq); + if (ret) { + ERROR("QP flow attachment failed: %s", + strerror(ret)); + return ret; + } + } + if (!parent) { + ret = ibv_post_recv(rxq->qp, + (rxq->sp ? + &(*rxq->elts.sp)[0].wr : + &(*rxq->elts.no_sp)[0].wr), + &bad_wr); + if (ret) { + ERROR("ibv_post_recv() failed for WR %p: %s", + (void *)bad_wr, + strerror(ret)); + return ret; + } + } + mod = (struct ibv_exp_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = ibv_exp_modify_qp(rxq->qp, &mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("QP state to IBV_QPS_RTR failed: %s", + strerror(ret)); + return ret; + } + params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_QP_BURST, + .obj = rxq->qp, + }; + rxq->if_qp = ibv_exp_query_intf(priv->ctx, ¶ms, &status); + if (rxq->if_qp == NULL) { + ERROR("QP interface family query failed with status %d", + status); + return errno; + } + return 0; +} + /** * Configure a RX queue. * @@ -3630,14 +3820,21 @@ skip_rtr: * Thresholds parameters. * @param mp * Memory pool for buffer allocations. + * @param children_n + * The number of children in a parent case, zero for a child. + * @param rxq_parent + * The pointer to a parent RX structure (or NULL) in a child case, + * NULL for parent. * * @return * 0 on success, errno value on failure. */ static int rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, - unsigned int socket, int inactive, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) + unsigned int socket, int inactive, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp, int children_n, + struct rxq *rxq_parent) { struct priv *priv = dev->data->dev_private; struct rxq tmpl = { @@ -3645,17 +3842,15 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, .mp = mp, .socket = socket }; - struct ibv_exp_qp_attr mod; union { struct ibv_exp_query_intf_params params; struct ibv_exp_cq_init_attr cq; struct ibv_exp_res_domain_init_attr rd; } attr; enum ibv_exp_query_intf_status status; - struct ibv_recv_wr *bad_wr; unsigned int mb_len; int ret = 0; - int parent = (rxq == &priv->rxq_parent); + int parent = (children_n > 0); (void)conf; /* Thresholds configuration (ignored). */ /* @@ -3746,45 +3941,6 @@ skip_mr: priv->device_attr.max_qp_wr); DEBUG("priv->device_attr.max_sge is %d", priv->device_attr.max_sge); -#ifdef RSS_SUPPORT - if (priv->rss && !inactive) - tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent, - tmpl.rd); - else -#endif /* RSS_SUPPORT */ - tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd); - if (tmpl.qp == NULL) { - ret = (errno ? errno : EINVAL); - ERROR("%p: QP creation failure: %s", - (void *)dev, strerror(ret)); - goto error; - } - mod = (struct ibv_exp_qp_attr){ - /* Move the QP to this state. */ - .qp_state = IBV_QPS_INIT, - /* Primary port number. */ - .port_num = priv->port - }; - ret = ibv_exp_modify_qp(tmpl.qp, &mod, - (IBV_EXP_QP_STATE | -#ifdef RSS_SUPPORT - (parent ? IBV_EXP_QP_GROUP_RSS : 0) | -#endif /* RSS_SUPPORT */ - IBV_EXP_QP_PORT)); - if (ret) { - ERROR("%p: QP state to IBV_QPS_INIT failed: %s", - (void *)dev, strerror(ret)); - goto error; - } - if (!priv->isolated && (parent || !priv->rss)) { - /* Configure MAC and broadcast addresses. */ - ret = rxq_mac_addrs_add(&tmpl); - if (ret) { - ERROR("%p: QP flow attachment failed: %s", - (void *)dev, strerror(ret)); - goto error; - } - } /* Allocate descriptors for RX queues, except for the RSS parent. */ if (parent) goto skip_alloc; @@ -3795,29 +3951,14 @@ skip_mr: if (ret) { ERROR("%p: RXQ allocation failed: %s", (void *)dev, strerror(ret)); - goto error; - } - ret = ibv_post_recv(tmpl.qp, - (tmpl.sp ? - &(*tmpl.elts.sp)[0].wr : - &(*tmpl.elts.no_sp)[0].wr), - &bad_wr); - if (ret) { - ERROR("%p: ibv_post_recv() failed for WR %p: %s", - (void *)dev, - (void *)bad_wr, - strerror(ret)); - goto error; + return ret; } skip_alloc: - mod = (struct ibv_exp_qp_attr){ - .qp_state = IBV_QPS_RTR - }; - ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE); - if (ret) { - ERROR("%p: QP state to IBV_QPS_RTR failed: %s", - (void *)dev, strerror(ret)); - goto error; + if (parent || rxq_parent || !priv->rss) { + ret = rxq_create_qp(&tmpl, desc, inactive, + children_n, rxq_parent); + if (ret) + goto error; } /* Save port ID. */ tmpl.port_id = dev->data->port_id; @@ -3829,21 +3970,11 @@ skip_alloc: }; tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); if (tmpl.if_cq == NULL) { + ret = EINVAL; ERROR("%p: CQ interface family query failed with status %d", (void *)dev, status); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_QP_BURST, - .obj = tmpl.qp, - }; - tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_qp == NULL) { - ERROR("%p: QP interface family query failed with status %d", - (void *)dev, status); - goto error; - } /* Clean up rxq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq); rxq_cleanup(rxq); @@ -3881,6 +4012,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { + struct rxq *parent; struct priv *priv = dev->data->dev_private; struct rxq *rxq = (*priv->rxqs)[idx]; int inactive = 0; @@ -3915,9 +4047,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -ENOMEM; } } - if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1) - inactive = 1; - ret = rxq_setup(dev, rxq, desc, socket, inactive, conf, mp); + if (priv->rss && !priv->isolated) { + /* The list consists of the single default one. */ + parent = LIST_FIRST(&priv->parents); + if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1) + inactive = 1; + } else { + parent = NULL; + } + ret = rxq_setup(dev, rxq, desc, socket, + inactive, conf, mp, 0, parent); if (ret) rte_free(rxq); else { @@ -3954,7 +4093,6 @@ mlx4_rx_queue_release(void *dpdk_rxq) return; priv = rxq->priv; priv_lock(priv); - assert(rxq != &priv->rxq_parent); for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] == rxq) { DEBUG("%p: removing RX queue %p from list", @@ -4009,7 +4147,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) rxq = NULL; r = 1; } else if (priv->rss) { - rxq = &priv->rxq_parent; + rxq = LIST_FIRST(&priv->parents); r = 1; } else { rxq = (*priv->rxqs)[0]; @@ -4101,7 +4239,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev) rxq = NULL; r = 1; } else if (priv->rss) { - rxq = &priv->rxq_parent; + rxq = LIST_FIRST(&priv->parents); r = 1; } else { rxq = (*priv->rxqs)[0]; @@ -4235,7 +4373,7 @@ mlx4_dev_close(struct rte_eth_dev *dev) priv->txqs = NULL; } if (priv->rss) - rxq_cleanup(&priv->rxq_parent); + priv_parent_list_cleanup(priv); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(ibv_dealloc_pd(priv->pd)); @@ -4631,7 +4769,7 @@ mlx4_promiscuous_enable(struct rte_eth_dev *dev) if (!priv->started) goto end; if (priv->rss) { - ret = rxq_promiscuous_enable(&priv->rxq_parent); + ret = rxq_promiscuous_enable(LIST_FIRST(&priv->parents)); if (ret) { priv_unlock(priv); return; @@ -4676,7 +4814,7 @@ mlx4_promiscuous_disable(struct rte_eth_dev *dev) return; } if (priv->rss) { - rxq_promiscuous_disable(&priv->rxq_parent); + rxq_promiscuous_disable(LIST_FIRST(&priv->parents)); goto end; } for (i = 0; (i != priv->rxqs_n); ++i) @@ -4717,7 +4855,7 @@ mlx4_allmulticast_enable(struct rte_eth_dev *dev) if (!priv->started) goto end; if (priv->rss) { - ret = rxq_allmulticast_enable(&priv->rxq_parent); + ret = rxq_allmulticast_enable(LIST_FIRST(&priv->parents)); if (ret) { priv_unlock(priv); return; @@ -4762,7 +4900,7 @@ mlx4_allmulticast_disable(struct rte_eth_dev *dev) return; } if (priv->rss) { - rxq_allmulticast_disable(&priv->rxq_parent); + rxq_allmulticast_disable(LIST_FIRST(&priv->parents)); goto end; } for (i = 0; (i != priv->rxqs_n); ++i) @@ -5071,7 +5209,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) * Rehashing flows in all RX queues is necessary. */ if (priv->rss) - rxq_mac_addrs_del(&priv->rxq_parent); + rxq_mac_addrs_del(LIST_FIRST(&priv->parents)); else for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] != NULL) @@ -5079,7 +5217,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[j].enabled = 1; if (priv->started) { if (priv->rss) - rxq_mac_addrs_add(&priv->rxq_parent); + rxq_mac_addrs_add(LIST_FIRST(&priv->parents)); else for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) @@ -5093,7 +5231,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) * Rehashing flows in all RX queues is necessary. */ if (priv->rss) - rxq_mac_addrs_del(&priv->rxq_parent); + rxq_mac_addrs_del(LIST_FIRST(&priv->parents)); else for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] != NULL) @@ -5101,7 +5239,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[j].enabled = 0; if (priv->started) { if (priv->rss) - rxq_mac_addrs_add(&priv->rxq_parent); + rxq_mac_addrs_add(LIST_FIRST(&priv->parents)); else for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index daf53e18b6..4e2a0ae783 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -219,6 +219,7 @@ struct rxq_elt { /* RX queue descriptor. */ struct rxq { + LIST_ENTRY(rxq) next; /* Used by parent queue only */ struct priv *priv; /* Back pointer to private data. */ struct rte_mempool *mp; /* Memory Pool for allocations. */ struct ibv_mr *mr; /* Memory Region (for mp). */ @@ -247,6 +248,10 @@ struct rxq { struct mlx4_rxq_stats stats; /* RX queue counters. */ unsigned int socket; /* CPU socket ID for allocations. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ + struct { + uint16_t queues_n; + uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; + } rss; }; /* TX element. */ @@ -341,7 +346,6 @@ struct priv { #endif unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */ /* RX/TX queues. */ - struct rxq rxq_parent; /* Parent queue when RSS is enabled. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ struct rxq *(*rxqs)[]; /* RX queues. */ @@ -351,10 +355,21 @@ struct priv { struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ LIST_HEAD(mlx4_flows, rte_flow) flows; struct rte_intr_conf intr_conf; /* Active interrupt configuration. */ + LIST_HEAD(mlx4_parents, rxq) parents; rte_spinlock_t lock; /* Lock for control functions. */ }; void priv_lock(struct priv *priv); void priv_unlock(struct priv *priv); +int +rxq_create_qp(struct rxq *rxq, + uint16_t desc, + int inactive, + int children_n, + struct rxq *rxq_parent); + +void +rxq_parent_cleanup(struct rxq *parent); + #endif /* RTE_PMD_MLX4_H_ */ diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c index 5ad50bdc0d..1344101928 100644 --- a/drivers/net/mlx4/mlx4_flow.c +++ b/drivers/net/mlx4/mlx4_flow.c @@ -831,8 +831,23 @@ priv_flow_create_action_queue(struct priv *priv, if (action->drop) { qp = priv->flow_drop_queue->qp; } else { + int ret; struct rxq *rxq = (*priv->rxqs)[action->queue_id]; + if (!rxq->qp) { + assert(priv->isolated); + ret = rxq_create_qp(rxq, rxq->elts_n, + 0, 0, NULL); + if (ret) { + rte_flow_error_set( + error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "flow rule creation failure"); + goto error; + } + } qp = rxq->qp; rte_flow->qp = qp; }