const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
int ret = 0;
- unsigned int lro_on = mlx5_lro_on(dev);
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
j = 0;
}
}
- if (lro_on && priv->config.cqe_comp) {
- /* CQE compressing is not supported for LRO CQEs. */
- DRV_LOG(WARNING, "Rx CQE compression isn't supported with LRO");
- priv->config.cqe_comp = 0;
- }
ret = mlx5_proc_priv_init(dev);
if (ret)
return ret;
info->max_tx_queues = max;
info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
- info->rx_offload_capa = (mlx5_get_rx_port_offloads(dev) |
+ info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
info->rx_queue_offload_capa);
info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
info->if_index = mlx5_ifindex(dev);
return n == priv->rxqs_n;
}
-/**
- * Check whether LRO is supported and enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 if disabled, 1 if enabled.
- */
-inline int
-mlx5_lro_on(struct rte_eth_dev *dev)
-{
- return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
-}
-
/**
* Allocate RX queue elements for Multi-Packet RQ.
*
DEV_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (MLX5_LRO_SUPPORTED(dev))
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
/**
* Returns the per-port supported offloads.
*
- * @param dev
- * Pointer to Ethernet device.
- *
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
+mlx5_get_rx_port_offloads(void)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
- if (MLX5_LRO_SUPPORTED(dev))
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+ !rxq_data->lro) {
cq_attr.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
"port %u Rx CQE compression is disabled for HW"
" timestamp",
dev->data->port_id);
+ } else if (priv->config.cqe_comp && rxq_data->lro) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for LRO",
+ dev->data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
if (priv->config.cqe_pad) {
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
+ unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
const int mprq_en = mlx5_check_mprq_support(dev) > 0;
unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
* In this case scatter is, for sure, enabled and an empty mbuf may be
* added in the start for the head-room.
*/
- if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&
+ if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
non_scatter_min_mbuf_size > mb_len) {
strd_headroom_en = 0;
mprq_stride_size = RTE_MIN(max_rx_pkt_len,
unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
- if (mlx5_lro_on(dev) && first_mb_free_size <
+ if (lro_on_queue && first_mb_free_size <
MLX5_MAX_LRO_HEADER_FIX) {
DRV_LOG(ERR, "Not enough space in the first segment(%u)"
" to include the max header size(%u) for LRO",
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
+ tmpl->rxq.lro = lro_on_queue;
if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
* configured to scatter the FCS.
*/
- if (mlx5_lro_on(dev))
+ if (lro_on_queue)
DRV_LOG(WARNING,
"port %u CRC stripping has been "
"disabled but will still be performed "
}
} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
struct mlx5_devx_tir_attr tir_attr;
-
+ uint32_t i;
+ uint32_t lro = 1;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < queues_n; ++i) {
+ if (!(*priv->rxqs)[queues[i]]->lro) {
+ lro = 0;
+ break;
+ }
+ }
memset(&tir_attr, 0, sizeof(tir_attr));
tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
if (dev->data->dev_conf.lpbk_mode)
tir_attr.self_lb_block =
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
- if (mlx5_lro_on(dev)) {
+ if (lro) {
tir_attr.lro_timeout_period_usecs =
priv->config.lro.timeout;
tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */
- unsigned int :2; /* Remaining bits. */
+ unsigned int lro:1; /* Enable LRO. */
+ unsigned int :1; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
int mlx5_hrxq_verify(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
-uint64_t mlx5_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
-int mlx5_lro_on(struct rte_eth_dev *dev);
/* mlx5_txq.c */