X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=a1fdeef2a9b2063e60ff95a914a512bd6e2e835f;hb=17ed314c6c0b269c92941e41aee3622f5726e1fa;hp=bd26ee2fe7ee43c045d3d22d070a4a3f4d8fb742;hpb=50c00baff763532a1397958baa5ac0521c71ed0e;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index bd26ee2fe7..a1fdeef2a9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -93,7 +93,6 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) /** * Check whether Multi-Packet RQ is enabled for the device. - * MPRQ can be enabled explicitly, or implicitly by enabling LRO. * * @param dev * Pointer to Ethernet device. @@ -124,21 +123,6 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev) return n == priv->rxqs_n; } -/** - * Check whether LRO is supported and enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, 1 if enabled. - */ -inline int -mlx5_lro_on(struct rte_eth_dev *dev) -{ - return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev)); -} - /** * Allocate RX queue elements for Multi-Packet RQ. * @@ -395,6 +379,8 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TCP_CKSUM); if (config->hw_vlan_strip) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (MLX5_LRO_SUPPORTED(dev)) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -402,19 +388,14 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) /** * Returns the per-port supported offloads. * - * @param dev - * Pointer to Ethernet device. - * * @return * Supported Rx offloads. */ uint64_t -mlx5_get_rx_port_offloads(struct rte_eth_dev *dev) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; - if (MLX5_LRO_SUPPORTED(dev)) - offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -890,7 +871,8 @@ mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ .comp_mask = 0, }; - if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { + if (priv->config.cqe_comp && !rxq_data->hw_timestamp && + !rxq_data->lro) { cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT @@ -912,6 +894,10 @@ mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, "port %u Rx CQE compression is disabled for HW" " timestamp", dev->data->port_id); + } else if (priv->config.cqe_comp && rxq_data->lro) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for LRO", + dev->data->port_id); } #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD if (priv->config.cqe_pad) { @@ -1090,7 +1076,7 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); struct mlx5_devx_create_rq_attr rq_attr; - uint32_t wqe_n = 1 << rxq_data->elts_n; + uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); uint32_t wq_size = 0; uint32_t wqe_size = 0; uint32_t log_wqe_size = 0; @@ -1118,21 +1104,16 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; wqe_size = sizeof(struct mlx5_wqe_mprq); } else { - int max_sge = 0; - int num_scatter = 0; - rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; - max_sge = 1 << rxq_data->sges_n; - num_scatter = RTE_MAX(max_sge, 1); - wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter; + wqe_size = sizeof(struct mlx5_wqe_data_seg); } - log_wqe_size = log2above(wqe_size); + log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; rq_attr.wq_attr.log_wq_stride = log_wqe_size; - rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n; + rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; /* Calculate and allocate WQ memory space. */ wqe_size = 1 << log_wqe_size; /* round up power of two.*/ wq_size = wqe_n * wqe_size; - buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE, + buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket); if (!buf) return NULL; @@ -1547,6 +1528,11 @@ exit: #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \ sizeof(struct rte_vlan_hdr) * 2 + \ sizeof(struct rte_ipv6_hdr))) +#define MAX_TCP_OPTION_SIZE 40u +#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \ + sizeof(struct rte_tcp_hdr) + \ + MAX_TCP_OPTION_SIZE)) + /** * Adjust the maximum LRO massage size. * @@ -1599,12 +1585,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int mb_len = rte_pktmbuf_data_room_size(mp); unsigned int mprq_stride_size; struct mlx5_dev_config *config = &priv->config; - /* - * LRO packet may consume all the stride memory, hence we cannot - * guaranty head-room. A new striding RQ feature may be added in CX6 DX - * to allow head-room and tail-room for the LRO packets. - */ - unsigned int strd_headroom_en = mlx5_lro_on(dev) ? 0 : 1; + unsigned int strd_headroom_en; /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. @@ -1613,10 +1594,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); const int mprq_en = mlx5_check_mprq_support(dev) > 0; unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + RTE_PKTMBUF_HEADROOM; + unsigned int max_lro_size = 0; + unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; if (non_scatter_min_mbuf_size > mb_len && !(offloads & DEV_RX_OFFLOAD_SCATTER)) { @@ -1644,6 +1628,21 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->socket = socket; if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; + /* + * LRO packet may consume all the stride memory, hence we cannot + * guaranty head-room near the packet memory in the stride. + * In this case scatter is, for sure, enabled and an empty mbuf may be + * added in the start for the head-room. + */ + if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 && + non_scatter_min_mbuf_size > mb_len) { + strd_headroom_en = 0; + mprq_stride_size = RTE_MIN(max_rx_pkt_len, + 1u << config->mprq.max_stride_size_n); + } else { + strd_headroom_en = 1; + mprq_stride_size = non_scatter_min_mbuf_size; + } /* * This Rx queue can be configured as a Multi-Packet RQ if all of the * following conditions are met: @@ -1653,8 +1652,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * stride. * Otherwise, enable Rx scatter if necessary. */ - mprq_stride_size = max_rx_pkt_len + RTE_PKTMBUF_HEADROOM * - strd_headroom_en; if (mprq_en && desc > (1U << config->mprq.stride_num_n) && mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { @@ -1667,40 +1664,48 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, config->mprq.min_stride_size_n); tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; tmpl->rxq.strd_headroom_en = strd_headroom_en; - tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len - - RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len); - mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len, - (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n))); + tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, + config->mprq.max_memcpy_len); + max_lro_size = RTE_MIN(max_rx_pkt_len, + (1u << tmpl->rxq.strd_num_n) * + (1u << tmpl->rxq.strd_sz_n)); DRV_LOG(DEBUG, "port %u Rx queue %u: Multi-Packet RQ is enabled" " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); - } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) { + } else if (max_rx_pkt_len <= first_mb_free_size) { tmpl->rxq.sges_n = 0; + max_lro_size = max_rx_pkt_len; } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; + if (lro_on_queue && first_mb_free_size < + MLX5_MAX_LRO_HEADER_FIX) { + DRV_LOG(ERR, "Not enough space in the first segment(%u)" + " to include the max header size(%u) for LRO", + first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX); + rte_errno = ENOTSUP; + goto error; + } /* * Determine the number of SGEs needed for a full packet * and round it to the next power of two. */ sges_n = log2above((size / mb_len) + !!(size % mb_len)); - tmpl->rxq.sges_n = sges_n; - /* Make sure rxq.sges_n did not overflow. */ - size = mb_len * (1 << tmpl->rxq.sges_n); - size -= RTE_PKTMBUF_HEADROOM; - if (size < max_rx_pkt_len) { + if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { DRV_LOG(ERR, "port %u too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - dev->data->port_id, - 1 << sges_n, - max_rx_pkt_len); - rte_errno = EOVERFLOW; + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + 1 << sges_n, max_rx_pkt_len, + 1u << MLX5_MAX_LOG_RQ_SEGS); + rte_errno = ENOTSUP; goto error; } + tmpl->rxq.sges_n = sges_n; + max_lro_size = max_rx_pkt_len; } if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) DRV_LOG(WARNING, @@ -1722,6 +1727,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = EINVAL; goto error; } + mlx5_max_lro_msg_size_adjust(dev, max_lro_size); /* Toggle RX checksum offload if hardware supports it. */ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); @@ -1729,13 +1735,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ tmpl->rxq.crc_present = 0; + tmpl->rxq.lro = lro_on_queue; if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { /* * RQs used for LRO-enabled TIRs should not be * configured to scatter the FCS. */ - if (mlx5_lro_on(dev)) + if (lro_on_queue) DRV_LOG(WARNING, "port %u CRC stripping has been " "disabled but will still be performed " @@ -2082,8 +2089,6 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) * Number of queues. * @param tunnel * Tunnel type. - * @param lro - * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP. * * @return * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. @@ -2093,7 +2098,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused, int lro) + int tunnel __rte_unused) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; @@ -2188,7 +2193,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, } } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ struct mlx5_devx_tir_attr tir_attr; - + uint32_t i; + uint32_t lro = 1; + + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < queues_n; ++i) { + if (!(*priv->rxqs)[queues[i]]->lro) { + lro = 0; + break; + } + } memset(&tir_attr, 0, sizeof(tir_attr)); tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; @@ -2204,7 +2218,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; - tir_attr.lro_enable_mask = lro; + tir_attr.lro_enable_mask = + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; } tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); if (!tir) {