X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=d5825fdd41799c669af840e4ab653953fbb6db75;hb=edad38fcd00e;hp=05b7c914682845036b49d5da0882a165b793fd35;hpb=0cdddf4d0626f001a936877aebcaf071090768e1;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 05b7c91468..d5825fdd41 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -36,27 +36,31 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include +#include +#include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #include #include #include +#include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif #include "mlx5.h" @@ -105,7 +109,6 @@ const struct hash_rxq_init hash_rxq_init[] = { }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, -#ifdef HAVE_FLOW_SPEC_IPV6 [HASH_RXQ_TCPV6] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | IBV_EXP_RX_HASH_DST_IPV6 | @@ -144,7 +147,6 @@ const struct hash_rxq_init hash_rxq_init[] = { }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, -#endif /* HAVE_FLOW_SPEC_IPV6 */ [HASH_RXQ_ETH] = { .hash_fields = 0, .dpdk_rss_hf = 0, @@ -168,17 +170,11 @@ static const struct ind_table_init ind_table_init[] = { 1 << HASH_RXQ_TCPV4 | 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | -#ifdef HAVE_FLOW_SPEC_IPV6 1 << HASH_RXQ_TCPV6 | 1 << HASH_RXQ_UDPV6 | 1 << HASH_RXQ_IPV6 | -#endif /* HAVE_FLOW_SPEC_IPV6 */ 0, -#ifdef HAVE_FLOW_SPEC_IPV6 .hash_types_n = 6, -#else /* HAVE_FLOW_SPEC_IPV6 */ - .hash_types_n = 3, -#endif /* HAVE_FLOW_SPEC_IPV6 */ }, { .max_size = 1, @@ -243,12 +239,8 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr, init = &hash_rxq_init[type]; *flow_attr = (struct ibv_exp_flow_attr){ .type = IBV_EXP_FLOW_ATTR_NORMAL, -#ifdef MLX5_FDIR_SUPPORT /* Priorities < 3 are reserved for flow director. */ .priority = init->flow_priority + 3, -#else /* MLX5_FDIR_SUPPORT */ - .priority = init->flow_priority, -#endif /* MLX5_FDIR_SUPPORT */ .num_of_specs = 0, .port = priv->port, .flags = 0, @@ -279,7 +271,7 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr, static enum hash_rxq_type hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) { - enum hash_rxq_type type = 0; + enum hash_rxq_type type = HASH_RXQ_TCPV4; assert(pos < table->hash_types_n); do { @@ -385,8 +377,13 @@ priv_create_hash_rxqs(struct priv *priv) DEBUG("indirection table extended to assume %u WQs", priv->reta_idx_n); } - for (i = 0; (i != priv->reta_idx_n); ++i) - wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq; + for (i = 0; (i != priv->reta_idx_n); ++i) { + struct rxq_ctrl *rxq_ctrl; + + rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], + struct rxq_ctrl, rxq); + wqs[i] = rxq_ctrl->wq; + } /* Get number of hash RX queues to configure. */ for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) hash_rxqs_n += ind_table_init[i].hash_types_n; @@ -589,9 +586,7 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) case HASH_RXQ_FLOW_TYPE_ALLMULTI: return !!priv->allmulti_req; case HASH_RXQ_FLOW_TYPE_BROADCAST: -#ifdef HAVE_FLOW_SPEC_IPV6 case HASH_RXQ_FLOW_TYPE_IPV6MULTI: -#endif /* HAVE_FLOW_SPEC_IPV6 */ /* If allmulti is enabled, broadcast and ipv6multi * are unnecessary. */ return !priv->allmulti_req; @@ -616,9 +611,11 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) int priv_rehash_flows(struct priv *priv) { - unsigned int i; + enum hash_rxq_flow_type i; - for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i) + for (i = HASH_RXQ_FLOW_TYPE_PROMISC; + i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); + ++i) if (!priv_allow_flow_type(priv, i)) { priv_special_flow_disable(priv, i); } else { @@ -649,29 +646,23 @@ priv_rehash_flows(struct priv *priv) */ static int rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n, - struct rte_mbuf **pool) + struct rte_mbuf *(*pool)[]) { + const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int i; - struct rxq_elt (*elts)[elts_n] = - rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, - rxq_ctrl->socket); int ret = 0; - if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)rxq_ctrl); - ret = ENOMEM; - goto error; - } - /* For each WR (packet). */ + /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct ibv_sge *sge = &(*elts)[i].sge; struct rte_mbuf *buf; + volatile struct mlx5_wqe_data_seg *scat = + &(*rxq_ctrl->rxq.wqes)[i]; if (pool != NULL) { - buf = *(pool++); + buf = (*pool)[i]; assert(buf != NULL); rte_pktmbuf_reset(buf); + rte_pktmbuf_refcnt_update(buf, 1); } else buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { @@ -680,40 +671,39 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n, ret = ENOMEM; goto error; } - elt->buf = buf; /* Headroom is reserved by rte_pktmbuf_alloc(). */ assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); /* Buffer is supposed to be empty. */ assert(rte_pktmbuf_data_len(buf) == 0); assert(rte_pktmbuf_pkt_len(buf) == 0); - /* sge->addr must be able to store a pointer. */ - assert(sizeof(sge->addr) >= sizeof(uintptr_t)); - /* SGE keeps its headroom. */ - sge->addr = (uintptr_t) - ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM); - sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM); - sge->lkey = rxq_ctrl->mr->lkey; - /* Redundant check for tailroom. */ - assert(sge->length == rte_pktmbuf_tailroom(buf)); + assert(!buf->next); + /* Only the first segment keeps headroom. */ + if (i % sges_n) + SET_DATA_OFF(buf, 0); + PORT(buf) = rxq_ctrl->rxq.port_id; + DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); + PKT_LEN(buf) = DATA_LEN(buf); + NB_SEGS(buf) = 1; + /* scat->addr must be able to store a pointer. */ + assert(sizeof(scat->addr) >= sizeof(uintptr_t)); + *scat = (struct mlx5_wqe_data_seg){ + .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = htonl(DATA_LEN(buf)), + .lkey = htonl(rxq_ctrl->mr->lkey), + }; + (*rxq_ctrl->rxq.elts)[i] = buf; } - DEBUG("%p: allocated and configured %u single-segment WRs", - (void *)rxq_ctrl, elts_n); - rxq_ctrl->rxq.elts_n = elts_n; - rxq_ctrl->rxq.elts_head = 0; - rxq_ctrl->rxq.elts = elts; + DEBUG("%p: allocated and configured %u segments (max %u packets)", + (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); assert(ret == 0); return 0; error: - if (elts != NULL) { - assert(pool == NULL); - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - rte_free(elts); + assert(pool == NULL); + elts_n = i; + for (i = 0; (i != elts_n); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; } DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); assert(ret > 0); @@ -730,22 +720,16 @@ static void rxq_free_elts(struct rxq_ctrl *rxq_ctrl) { unsigned int i; - unsigned int elts_n = rxq_ctrl->rxq.elts_n; - struct rxq_elt (*elts)[elts_n] = rxq_ctrl->rxq.elts; DEBUG("%p: freeing WRs", (void *)rxq_ctrl); - rxq_ctrl->rxq.elts_n = 0; - rxq_ctrl->rxq.elts = NULL; - if (elts == NULL) + if (rxq_ctrl->rxq.elts == NULL) return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - if (buf != NULL) - rte_pktmbuf_free_seg(buf); + for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; } - rte_free(elts); } /** @@ -763,42 +747,44 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) DEBUG("cleaning up %p", (void *)rxq_ctrl); rxq_free_elts(rxq_ctrl); - rxq_ctrl->rxq.poll = NULL; - rxq_ctrl->rxq.recv = NULL; + if (rxq_ctrl->fdir_queue != NULL) + priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue); if (rxq_ctrl->if_wq != NULL) { - assert(rxq_ctrl->rxq.priv != NULL); - assert(rxq_ctrl->rxq.priv->ctx != NULL); - assert(rxq_ctrl->rxq.wq != NULL); + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + assert(rxq_ctrl->wq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(rxq_ctrl->rxq.priv->ctx, + claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, rxq_ctrl->if_wq, ¶ms)); } if (rxq_ctrl->if_cq != NULL) { - assert(rxq_ctrl->rxq.priv != NULL); - assert(rxq_ctrl->rxq.priv->ctx != NULL); - assert(rxq_ctrl->rxq.cq != NULL); + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + assert(rxq_ctrl->cq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(rxq_ctrl->rxq.priv->ctx, + claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, rxq_ctrl->if_cq, ¶ms)); } - if (rxq_ctrl->rxq.wq != NULL) - claim_zero(ibv_exp_destroy_wq(rxq_ctrl->rxq.wq)); - if (rxq_ctrl->rxq.cq != NULL) - claim_zero(ibv_destroy_cq(rxq_ctrl->rxq.cq)); + if (rxq_ctrl->wq != NULL) + claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq)); + if (rxq_ctrl->cq != NULL) + claim_zero(ibv_destroy_cq(rxq_ctrl->cq)); + if (rxq_ctrl->channel != NULL) + claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel)); if (rxq_ctrl->rd != NULL) { struct ibv_exp_destroy_res_domain_attr attr = { .comp_mask = 0, }; - assert(rxq_ctrl->rxq.priv != NULL); - assert(rxq_ctrl->rxq.priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->rxq.priv->ctx, + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx, rxq_ctrl->rd, &attr)); } @@ -808,7 +794,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) } /** - * Reconfigure a RX queue with new parameters. + * Reconfigure RX queue buffers. * * rxq_rehash() does not allocate mbufs, which, if not done from the right * thread (such as a control thread), may corrupt the pool. @@ -825,110 +811,94 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl) int rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl) { - struct priv *priv = rxq_ctrl->rxq.priv; - struct rxq_ctrl tmpl = *rxq_ctrl; - unsigned int mbuf_n; - unsigned int desc_n; - struct rte_mbuf **pool; - unsigned int i, k; + unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; + unsigned int i; struct ibv_exp_wq_attr mod; - struct rxq_elt (*elts)[tmpl.rxq.elts_n]; int err; - DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq_ctrl); - /* Number of descriptors and mbufs currently allocated. */ - desc_n = tmpl.rxq.elts_n; - mbuf_n = desc_n; - /* Toggle RX checksum offload if hardware supports it. */ - if (priv->hw_csum) { - tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq_ctrl->rxq.csum = tmpl.rxq.csum; - } - if (priv->hw_csum_l2tun) { - tmpl.rxq.csum_l2tun = - !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq_ctrl->rxq.csum_l2tun = tmpl.rxq.csum_l2tun; - } + DEBUG("%p: rehashing queue %p with %u SGE(s) per packet", + (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n); + assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n))); /* From now on, any failure will render the queue unusable. * Reinitialize WQ. */ mod = (struct ibv_exp_wq_attr){ .attr_mask = IBV_EXP_WQ_ATTR_STATE, .wq_state = IBV_EXP_WQS_RESET, }; - err = ibv_exp_modify_wq(tmpl.rxq.wq, &mod); + err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod); if (err) { ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err)); assert(err > 0); return err; } - /* Allocate pool. */ - pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0); - if (pool == NULL) { - ERROR("%p: cannot allocate memory", (void *)dev); - return ENOBUFS; - } /* Snatch mbufs from original queue. */ - k = 0; - elts = rxq_ctrl->rxq.elts; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; + claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts)); + for (i = 0; i != elts_n; ++i) { + struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i]; - pool[k++] = buf; + assert(rte_mbuf_refcnt_read(buf) == 2); + rte_pktmbuf_free_seg(buf); } - assert(k == mbuf_n); - tmpl.rxq.elts_n = 0; - tmpl.rxq.elts = NULL; - assert((void *)&tmpl.rxq.elts == NULL); - err = rxq_alloc_elts(&tmpl, desc_n, pool); - if (err) { - ERROR("%p: cannot reallocate WRs, aborting", (void *)dev); - rte_free(pool); - assert(err > 0); - return err; - } - assert(tmpl.rxq.elts_n == desc_n); - rte_free(pool); - /* Clean up original data. */ - rxq_ctrl->rxq.elts_n = 0; - rte_free(rxq_ctrl->rxq.elts); - rxq_ctrl->rxq.elts = NULL; /* Change queue state to ready. */ mod = (struct ibv_exp_wq_attr){ .attr_mask = IBV_EXP_WQ_ATTR_STATE, .wq_state = IBV_EXP_WQS_RDY, }; - err = ibv_exp_modify_wq(tmpl.rxq.wq, &mod); + err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod); if (err) { ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", (void *)dev, strerror(err)); goto error; } - /* Post SGEs. */ - assert(tmpl.if_wq != NULL); - elts = tmpl.rxq.elts; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - err = tmpl.if_wq->recv_burst( - tmpl.rxq.wq, - &(*elts)[i].sge, - 1); - if (err) - break; - } - if (err) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, err); - /* Set err because it does not contain a valid errno value. */ - err = EIO; - goto error; - } - tmpl.rxq.recv = tmpl.if_wq->recv_burst; + /* Update doorbell counter. */ + rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n; + rte_wmb(); + *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); error: - *rxq_ctrl = tmpl; assert(err >= 0); return err; } +/** + * Initialize RX queue. + * + * @param tmpl + * Pointer to RX queue control template. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +rxq_setup(struct rxq_ctrl *tmpl) +{ + struct ibv_cq *ibcq = tmpl->cq; + struct mlx5_cq *cq = to_mxxx(cq, cq); + struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); + struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] = + rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); + + if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); + return EINVAL; + } + if (elts == NULL) + return ENOMEM; + tmpl->rxq.rq_db = rwq->rq.db; + tmpl->rxq.cqe_n = log2above(ibcq->cqe); + tmpl->rxq.cq_ci = 0; + tmpl->rxq.rq_ci = 0; + tmpl->rxq.cq_db = cq->dbrec; + tmpl->rxq.wqes = + (volatile struct mlx5_wqe_data_seg (*)[]) + (uintptr_t)rwq->rq.buff; + tmpl->rxq.cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq->active_buf->buf; + tmpl->rxq.elts = elts; + return 0; +} + /** * Configure a RX queue. * @@ -949,16 +919,18 @@ error: * 0 on success, errno value on failure. */ int -rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; struct rxq_ctrl tmpl = { + .priv = priv, .socket = socket, .rxq = { - .priv = priv, + .elts_n = log2above(desc), .mp = mp, + .rss_hash = priv->rxqs_n > 1, }, }; struct ibv_exp_wq_attr mod; @@ -967,17 +939,59 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, struct ibv_exp_cq_init_attr cq; struct ibv_exp_res_domain_init_attr rd; struct ibv_exp_wq_init_attr wq; + struct ibv_exp_cq_attr cq_attr; } attr; enum ibv_exp_query_intf_status status; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); - struct rxq_elt (*elts)[desc]; + unsigned int cqe_n = desc - 1; + struct rte_mbuf *(*elts)[desc] = NULL; int ret = 0; - unsigned int i; - unsigned int cq_size = desc; (void)conf; /* Thresholds configuration (ignored). */ - if (desc == 0) { - ERROR("%p: invalid number of RX descriptors", (void *)dev); + /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { + tmpl.rxq.sges_n = 0; + } else if (dev->data->dev_conf.rxmode.enable_scatter) { + unsigned int size = + RTE_PKTMBUF_HEADROOM + + dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int sges_n; + + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + tmpl.rxq.sges_n = sges_n; + /* Make sure rxq.sges_n did not overflow. */ + size = mb_len * (1 << tmpl.rxq.sges_n); + size -= RTE_PKTMBUF_HEADROOM; + if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + ERROR("%p: too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + (void *)dev, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + return EOVERFLOW; + } + } else { + WARN("%p: the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + (void *)dev, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); + } + DEBUG("%p: maximum number of segments per packet: %u", + (void *)dev, 1 << tmpl.rxq.sges_n); + if (desc % (1 << tmpl.rxq.sges_n)) { + ERROR("%p: number of RX queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + (void *)dev, + desc, + 1 << tmpl.rxq.sges_n); return EINVAL; } /* Toggle RX checksum offload if hardware supports it. */ @@ -986,7 +1000,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, if (priv->hw_csum_l2tun) tmpl.rxq.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - (void)mb_len; /* I'll be back! */ /* Use the entire RX mempool as the memory region. */ tmpl.mr = mlx5_mp2mr(priv->pd, mp); if (tmpl.mr == NULL) { @@ -1008,13 +1021,28 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, (void *)dev, strerror(ret)); goto error; } + if (dev->data->dev_conf.intr_conf.rxq) { + tmpl.channel = ibv_create_comp_channel(priv->ctx); + if (tmpl.channel == NULL) { + dev->data->dev_conf.intr_conf.rxq = 0; + ret = ENOMEM; + ERROR("%p: Comp Channel creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + } attr.cq = (struct ibv_exp_cq_init_attr){ .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; - tmpl.rxq.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0, - &attr.cq); - if (tmpl.rxq.cq == NULL) { + if (priv->cqe_comp) { + attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS; + attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE; + cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */ + } + tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0, + &attr.cq); + if (tmpl.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", (void *)dev, strerror(ret)); @@ -1031,28 +1059,20 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_EXP_WQT_RQ, /* Max number of outstanding WRs. */ - .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ? - priv->device_attr.max_qp_wr : - (int)cq_size), + .max_recv_wr = desc >> tmpl.rxq.sges_n, /* Max number of scatter/gather elements in a WR. */ - .max_recv_sge = 1, + .max_recv_sge = 1 << tmpl.rxq.sges_n, .pd = priv->pd, - .cq = tmpl.rxq.cq, + .cq = tmpl.cq, .comp_mask = IBV_EXP_CREATE_WQ_RES_DOMAIN | -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS IBV_EXP_CREATE_WQ_VLAN_OFFLOADS | -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 0, .res_domain = tmpl.rd, -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS .vlan_offloads = (tmpl.rxq.vlan_strip ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0), -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ }; - -#ifdef HAVE_VERBS_FCS /* By default, FCS (CRC) is stripped by hardware. */ if (dev->data->dev_conf.rxmode.hw_strip_crc) { tmpl.rxq.crc_present = 0; @@ -1073,9 +1093,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, (void *)dev, tmpl.rxq.crc_present ? "disabled" : "enabled", tmpl.rxq.crc_present << 2); -#endif /* HAVE_VERBS_FCS */ - -#ifdef HAVE_VERBS_RX_END_PADDING if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING")) ; /* Nothing else to do. */ else if (priv->hw_padding) { @@ -1088,19 +1105,25 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, " supported, make sure MLNX_OFED and firmware are" " up to date", (void *)dev); -#endif /* HAVE_VERBS_RX_END_PADDING */ - tmpl.rxq.wq = ibv_exp_create_wq(priv->ctx, &attr.wq); - if (tmpl.rxq.wq == NULL) { + tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq); + if (tmpl.wq == NULL) { ret = (errno ? errno : EINVAL); ERROR("%p: WQ creation failure: %s", (void *)dev, strerror(ret)); goto error; } - ret = rxq_alloc_elts(&tmpl, desc, NULL); - if (ret) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(ret)); + /* + * Make sure number of WRs*SGEs match expectations since a queue + * cannot allocate more than "desc" buffers. + */ + if (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) || + ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) { + ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", + (void *)dev, + (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n), + attr.wq.max_recv_wr, attr.wq.max_recv_sge); + ret = EINVAL; goto error; } /* Save port ID. */ @@ -1108,11 +1131,9 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id); attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS .intf_version = 1, -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.rxq.cq, + .obj = tmpl.cq, }; tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); if (tmpl.if_cq == NULL) { @@ -1123,7 +1144,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, .intf = IBV_EXP_INTF_WQ, - .obj = tmpl.rxq.wq, + .obj = tmpl.wq, }; tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); if (tmpl.if_wq == NULL) { @@ -1136,45 +1157,53 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc, .attr_mask = IBV_EXP_WQ_ATTR_STATE, .wq_state = IBV_EXP_WQS_RDY, }; - ret = ibv_exp_modify_wq(tmpl.rxq.wq, &mod); + ret = ibv_exp_modify_wq(tmpl.wq, &mod); if (ret) { ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", (void *)dev, strerror(ret)); goto error; } - /* Post SGEs. */ - elts = tmpl.rxq.elts; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - ret = tmpl.if_wq->recv_burst( - tmpl.rxq.wq, - &(*elts)[i].sge, - 1); - if (ret) - break; + ret = rxq_setup(&tmpl); + if (ret) { + ERROR("%p: cannot initialize RX queue structure: %s", + (void *)dev, strerror(ret)); + goto error; } + /* Reuse buffers from original queue if possible. */ + if (rxq_ctrl->rxq.elts_n) { + assert(1 << rxq_ctrl->rxq.elts_n == desc); + assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts); + ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts); + } else + ret = rxq_alloc_elts(&tmpl, desc, NULL); if (ret) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, ret); - /* Set ret because it does not contain a valid errno value. */ - ret = EIO; + ERROR("%p: RXQ allocation failed: %s", + (void *)dev, strerror(ret)); goto error; } /* Clean up rxq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl); rxq_cleanup(rxq_ctrl); + /* Move mbuf pointers to dedicated storage area in RX queue. */ + elts = (void *)(rxq_ctrl + 1); + rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts)); +#ifndef NDEBUG + memset(tmpl.rxq.elts, 0x55, sizeof(*elts)); +#endif + rte_free(tmpl.rxq.elts); + tmpl.rxq.elts = elts; *rxq_ctrl = tmpl; + /* Update doorbell counter. */ + rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; + rte_wmb(); + *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); - /* Assign function in queue. */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - rxq_ctrl->rxq.poll = rxq_ctrl->if_cq->poll_length_flags_cvlan; -#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - rxq_ctrl->rxq.poll = rxq_ctrl->if_cq->poll_length_flags; -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - rxq_ctrl->rxq.recv = rxq_ctrl->if_wq->recv_burst; return 0; error: + elts = tmpl.rxq.elts; rxq_cleanup(&tmpl); + rte_free(elts); assert(ret > 0); return ret; } @@ -1205,14 +1234,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, { struct priv *priv = dev->data->dev_private; struct rxq *rxq = (*priv->rxqs)[idx]; - struct rxq_ctrl *rxq_ctrl; + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); int ret; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; priv_lock(priv); - rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in RX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); + } DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); if (idx >= priv->rxqs_n) { @@ -1230,9 +1264,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } (*priv->rxqs)[idx] = NULL; rxq_cleanup(rxq_ctrl); + /* Resize if rxq size is changed. */ + if (rxq_ctrl->rxq.elts_n != log2above(desc)) { + rxq_ctrl = rte_realloc(rxq_ctrl, + sizeof(*rxq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!rxq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { - rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl), 0, - socket); + rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); if (rxq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); @@ -1240,7 +1288,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -ENOMEM; } } - ret = rxq_setup(dev, rxq_ctrl, desc, socket, conf, mp); + ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp); if (ret) rte_free(rxq_ctrl); else { @@ -1249,7 +1297,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void *)dev, (void *)rxq_ctrl); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; /* Update receive callback. */ - dev->rx_pkt_burst = mlx5_rx_burst; + priv_select_rx_function(priv); } priv_unlock(priv); return -ret; @@ -1275,12 +1323,12 @@ mlx5_rx_queue_release(void *dpdk_rxq) if (rxq == NULL) return; rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); - priv = rxq->priv; + priv = rxq_ctrl->priv; priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] == rxq) { DEBUG("%p: removing RX queue %p from list", - (void *)priv->dev, (void *)rxq); + (void *)priv->dev, (void *)rxq_ctrl); (*priv->rxqs)[i] = NULL; break; } @@ -1310,7 +1358,8 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct rxq *rxq = dpdk_rxq; - struct priv *priv = mlx5_secondary_data_setup(rxq->priv); + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct priv *priv = mlx5_secondary_data_setup(rxq_ctrl->priv); struct priv *primary_priv; unsigned int index; @@ -1328,3 +1377,113 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, rxq = (*priv->rxqs)[index]; return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n); } + +/** + * Fill epoll fd list for rxq interrupts. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +int +priv_intr_efd_enable(struct priv *priv) +{ + unsigned int i; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + if (n == 0) + return 0; + if (n < rxqs_n) { + WARN("rxqs num is larger than EAL max interrupt vector " + "%u > %u unable to supprt rxq interrupts", + rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + return -EINVAL; + } + intr_handle->type = RTE_INTR_HANDLE_EXT; + for (i = 0; i != n; ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + struct rxq_ctrl *rxq_ctrl = + container_of(rxq, struct rxq_ctrl, rxq); + int fd = rxq_ctrl->channel->fd; + int flags; + int rc; + + flags = fcntl(fd, F_GETFL); + rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + WARN("failed to change rxq interrupt file " + "descriptor %d for queue index %d", fd, i); + return -1; + } + intr_handle->efds[i] = fd; + } + intr_handle->nb_efd = n; + return 0; +} + +/** + * Clean epoll fd list for rxq interrupts. + * + * @param priv + * Private structure. + */ +void +priv_intr_efd_disable(struct priv *priv) +{ + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + rte_intr_free_epoll_fd(intr_handle); +} + +/** + * Create and init interrupt vector array. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +int +priv_create_intr_vec(struct priv *priv) +{ + unsigned int rxqs_n = priv->rxqs_n; + unsigned int i; + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + if (rxqs_n == 0) + return 0; + intr_handle->intr_vec = (int *) + rte_malloc("intr_vec", rxqs_n * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + WARN("Failed to allocate memory for intr_vec " + "rxq interrupt will not be supported"); + return -ENOMEM; + } + for (i = 0; i != rxqs_n; ++i) { + /* 1:1 mapping between rxq and interrupt. */ + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + } + return 0; +} + +/** + * Destroy init interrupt vector array. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +void +priv_destroy_intr_vec(struct priv *priv) +{ + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + rte_free(intr_handle->intr_vec); +}