X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=a1fdeef2a9b2063e60ff95a914a512bd6e2e835f;hb=d5c06b1b10ae;hp=b87eecc092187862f9e902c26a89475f2a5b459e;hpb=940f0a1d072d05b7894db4fd4efff5eea08246ba;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index b87eecc092..a1fdeef2a9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -93,7 +93,6 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) /** * Check whether Multi-Packet RQ is enabled for the device. - * MPRQ can be enabled explicitly, or implicitly by enabling LRO. * * @param dev * Pointer to Ethernet device. @@ -124,21 +123,6 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev) return n == priv->rxqs_n; } -/** - * Check whether LRO is supported and enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, 1 if enabled. - */ -inline int -mlx5_lro_on(struct rte_eth_dev *dev) -{ - return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev)); -} - /** * Allocate RX queue elements for Multi-Packet RQ. * @@ -395,6 +379,8 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TCP_CKSUM); if (config->hw_vlan_strip) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (MLX5_LRO_SUPPORTED(dev)) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -402,19 +388,14 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) /** * Returns the per-port supported offloads. * - * @param dev - * Pointer to Ethernet device. - * * @return * Supported Rx offloads. */ uint64_t -mlx5_get_rx_port_offloads(struct rte_eth_dev *dev) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; - if (MLX5_LRO_SUPPORTED(dev)) - offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -573,8 +554,10 @@ rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); rxq_ctrl->rxq.wqes = NULL; } - if (rxq_ctrl->wq_umem) + if (rxq_ctrl->wq_umem) { mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); + rxq_ctrl->wq_umem = NULL; + } } /** @@ -888,7 +871,8 @@ mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ .comp_mask = 0, }; - if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { + if (priv->config.cqe_comp && !rxq_data->hw_timestamp && + !rxq_data->lro) { cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT @@ -910,6 +894,10 @@ mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, "port %u Rx CQE compression is disabled for HW" " timestamp", dev->data->port_id); + } else if (priv->config.cqe_comp && rxq_data->lro) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for LRO", + dev->data->port_id); } #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD if (priv->config.cqe_pad) { @@ -1088,7 +1076,7 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); struct mlx5_devx_create_rq_attr rq_attr; - uint32_t wqe_n = 1 << rxq_data->elts_n; + uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); uint32_t wq_size = 0; uint32_t wqe_size = 0; uint32_t log_wqe_size = 0; @@ -1116,21 +1104,16 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; wqe_size = sizeof(struct mlx5_wqe_mprq); } else { - int max_sge = 0; - int num_scatter = 0; - rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; - max_sge = 1 << rxq_data->sges_n; - num_scatter = RTE_MAX(max_sge, 1); - wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter; + wqe_size = sizeof(struct mlx5_wqe_data_seg); } - log_wqe_size = log2above(wqe_size); + log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; rq_attr.wq_attr.log_wq_stride = log_wqe_size; - rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n; + rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; /* Calculate and allocate WQ memory space. */ wqe_size = 1 << log_wqe_size; /* round up power of two.*/ wq_size = wqe_n * wqe_size; - buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE, + buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket); if (!buf) return NULL; @@ -1242,6 +1225,7 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx, goto error; rxq_ctrl->dbr_offset = dbr_offset; rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id; + rxq_ctrl->dbr_umem_id_valid = 1; rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + (uintptr_t)rxq_ctrl->dbr_offset); } @@ -1358,14 +1342,22 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev) * Callback function to initialize mbufs for Multi-Packet RQ. */ static inline void -mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused, +mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsigned int i __rte_unused) { struct mlx5_mprq_buf *buf = _m; + struct rte_mbuf_ext_shared_info *shinfo; + unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg; + unsigned int j; memset(_m, 0, sizeof(*buf)); buf->mp = mp; rte_atomic16_set(&buf->refcnt, 1); + for (j = 0; j != strd_n; ++j) { + shinfo = &buf->shinfos[j]; + shinfo->free_cb = mlx5_mprq_buf_free_cb; + shinfo->fcb_opaque = buf; + } } /** @@ -1460,7 +1452,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) } assert(strd_num_n && strd_sz_n); buf_len = (1 << strd_num_n) * (1 << strd_sz_n); - obj_size = buf_len + sizeof(struct mlx5_mprq_buf); + obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * + sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; /* * Received packets can be either memcpy'd or externally referenced. In * case that the packet is attached to an mbuf as an external buffer, as @@ -1505,7 +1498,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) } snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, - 0, NULL, NULL, mlx5_mprq_buf_init, NULL, + 0, NULL, NULL, mlx5_mprq_buf_init, + (void *)(uintptr_t)(1 << strd_num_n), dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, @@ -1530,6 +1524,42 @@ exit: return 0; } +#define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u) +#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \ + sizeof(struct rte_vlan_hdr) * 2 + \ + sizeof(struct rte_ipv6_hdr))) +#define MAX_TCP_OPTION_SIZE 40u +#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \ + sizeof(struct rte_tcp_hdr) + \ + MAX_TCP_OPTION_SIZE)) + +/** + * Adjust the maximum LRO massage size. + * + * @param dev + * Pointer to Ethernet device. + * @param max_lro_size + * The maximum size for LRO packet. + */ +static void +mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.hca_attr.lro_max_msg_sz_mode == + MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size > + MLX5_MAX_TCP_HDR_OFFSET) + max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; + max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); + assert(max_lro_size >= 256u); + max_lro_size /= 256u; + if (priv->max_lro_msg_size) + priv->max_lro_msg_size = + RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); + else + priv->max_lro_msg_size = max_lro_size; +} + /** * Create a DPDK Rx queue. * @@ -1555,6 +1585,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int mb_len = rte_pktmbuf_data_room_size(mp); unsigned int mprq_stride_size; struct mlx5_dev_config *config = &priv->config; + unsigned int strd_headroom_en; /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. @@ -1563,8 +1594,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); const int mprq_en = mlx5_check_mprq_support(dev) > 0; - + unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + + RTE_PKTMBUF_HEADROOM; + unsigned int max_lro_size = 0; + unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; + + if (non_scatter_min_mbuf_size > mb_len && !(offloads & + DEV_RX_OFFLOAD_SCATTER)) { + DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" + " configured and no enough mbuf space(%u) to contain " + "the maximum RX packet length(%u) with head-room(%u)", + dev->data->port_id, idx, mb_len, max_rx_pkt_len, + RTE_PKTMBUF_HEADROOM); + rte_errno = ENOSPC; + return NULL; + } tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), @@ -1581,6 +1628,21 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->socket = socket; if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; + /* + * LRO packet may consume all the stride memory, hence we cannot + * guaranty head-room near the packet memory in the stride. + * In this case scatter is, for sure, enabled and an empty mbuf may be + * added in the start for the head-room. + */ + if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 && + non_scatter_min_mbuf_size > mb_len) { + strd_headroom_en = 0; + mprq_stride_size = RTE_MIN(max_rx_pkt_len, + 1u << config->mprq.max_stride_size_n); + } else { + strd_headroom_en = 1; + mprq_stride_size = non_scatter_min_mbuf_size; + } /* * This Rx queue can be configured as a Multi-Packet RQ if all of the * following conditions are met: @@ -1590,11 +1652,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * stride. * Otherwise, enable Rx scatter if necessary. */ - assert(mb_len >= RTE_PKTMBUF_HEADROOM); - mprq_stride_size = - dev->data->dev_conf.rxmode.max_rx_pkt_len + - sizeof(struct rte_mbuf_ext_shared_info) + - RTE_PKTMBUF_HEADROOM; if (mprq_en && desc > (1U << config->mprq.stride_num_n) && mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { @@ -1606,50 +1663,49 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size), config->mprq.min_stride_size_n); tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; - tmpl->rxq.mprq_max_memcpy_len = - RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM, + tmpl->rxq.strd_headroom_en = strd_headroom_en; + tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, config->mprq.max_memcpy_len); + max_lro_size = RTE_MIN(max_rx_pkt_len, + (1u << tmpl->rxq.strd_num_n) * + (1u << tmpl->rxq.strd_sz_n)); DRV_LOG(DEBUG, "port %u Rx queue %u: Multi-Packet RQ is enabled" " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); - } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - (mb_len - RTE_PKTMBUF_HEADROOM)) { + } else if (max_rx_pkt_len <= first_mb_free_size) { tmpl->rxq.sges_n = 0; + max_lro_size = max_rx_pkt_len; } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { - unsigned int size = - RTE_PKTMBUF_HEADROOM + - dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; + if (lro_on_queue && first_mb_free_size < + MLX5_MAX_LRO_HEADER_FIX) { + DRV_LOG(ERR, "Not enough space in the first segment(%u)" + " to include the max header size(%u) for LRO", + first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX); + rte_errno = ENOTSUP; + goto error; + } /* * Determine the number of SGEs needed for a full packet * and round it to the next power of two. */ sges_n = log2above((size / mb_len) + !!(size % mb_len)); - tmpl->rxq.sges_n = sges_n; - /* Make sure rxq.sges_n did not overflow. */ - size = mb_len * (1 << tmpl->rxq.sges_n); - size -= RTE_PKTMBUF_HEADROOM; - if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { DRV_LOG(ERR, "port %u too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - dev->data->port_id, - 1 << sges_n, - dev->data->dev_conf.rxmode.max_rx_pkt_len); - rte_errno = EOVERFLOW; + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + 1 << sges_n, max_rx_pkt_len, + 1u << MLX5_MAX_LOG_RQ_SEGS); + rte_errno = ENOTSUP; goto error; } - } else { - DRV_LOG(WARNING, - "port %u the requested maximum Rx packet size (%u) is" - " larger than a single mbuf (%u) and scattered mode has" - " not been requested", - dev->data->port_id, - dev->data->dev_conf.rxmode.max_rx_pkt_len, - mb_len - RTE_PKTMBUF_HEADROOM); + tmpl->rxq.sges_n = sges_n; + max_lro_size = max_rx_pkt_len; } if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) DRV_LOG(WARNING, @@ -1671,6 +1727,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = EINVAL; goto error; } + mlx5_max_lro_msg_size_adjust(dev, max_lro_size); /* Toggle RX checksum offload if hardware supports it. */ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); @@ -1678,13 +1735,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ tmpl->rxq.crc_present = 0; + tmpl->rxq.lro = lro_on_queue; if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { /* * RQs used for LRO-enabled TIRs should not be * configured to scatter the FCS. */ - if (mlx5_lro_on(dev)) + if (lro_on_queue) DRV_LOG(WARNING, "port %u CRC stripping has been " "disabled but will still be performed " @@ -1780,8 +1838,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj)) rxq_ctrl->obj = NULL; if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { - claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id, - rxq_ctrl->dbr_offset)); + if (rxq_ctrl->dbr_umem_id_valid) + claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id, + rxq_ctrl->dbr_offset)); mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); rte_free(rxq_ctrl); @@ -2030,8 +2089,6 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) * Number of queues. * @param tunnel * Tunnel type. - * @param lro - * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP. * * @return * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. @@ -2041,7 +2098,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused, int lro) + int tunnel __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; @@ -2136,7 +2193,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, } } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ struct mlx5_devx_tir_attr tir_attr; - + uint32_t i; + uint32_t lro = 1; + + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < queues_n; ++i) { + if (!(*priv->rxqs)[queues[i]]->lro) { + lro = 0; + break; + } + } memset(&tir_attr, 0, sizeof(tir_attr)); tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; @@ -2151,8 +2217,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, if (lro) { tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; - tir_attr.lro_max_msg_sz = 0xff; - tir_attr.lro_enable_mask = lro; + tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; + tir_attr.lro_enable_mask = + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; } tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); if (!tir) {