net/mlx5: fix parameters defaults
[dpdk.git] / drivers / net / mlx5 / mlx5_ethdev.c
index 5542193..3fd22cb 100644 (file)
@@ -695,14 +695,14 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                         DEV_TX_OFFLOAD_TCP_CKSUM);
        if (priv->tso)
                info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+       if (priv->tunnel_en)
+               info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                         DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                         DEV_TX_OFFLOAD_GRE_TNL_TSO);
        if (priv_get_ifname(priv, &ifname) == 0)
                info->if_index = if_nametoindex(ifname);
-       /* FIXME: RETA update/query API expects the callee to know the size of
-        * the indirection table, for this PMD the size varies depending on
-        * the number of RX queues, it becomes impossible to find the correct
-        * size if it is not fixed.
-        * The API should be updated to solve this problem. */
-       info->reta_size = priv->ind_table_max_size;
+       info->reta_size = priv->reta_idx_n ?
+               priv->reta_idx_n : priv->ind_table_max_size;
        info->hash_key_size = ((*priv->rss_conf) ?
                               (*priv->rss_conf)[0]->rss_key_len :
                               0);
@@ -991,7 +991,6 @@ recover:
                struct rxq *rxq = (*priv->rxqs)[i];
                struct rxq_ctrl *rxq_ctrl =
                        container_of(rxq, struct rxq_ctrl, rxq);
-               int sp;
                unsigned int mb_len;
                unsigned int tmp;
 
@@ -999,10 +998,9 @@ recover:
                        continue;
                mb_len = rte_pktmbuf_data_room_size(rxq->mp);
                assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-               /* Toggle scattered support (sp) if necessary. */
-               sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
                /* Provide new values to rxq_setup(). */
-               dev->data->dev_conf.rxmode.jumbo_frame = sp;
+               dev->data->dev_conf.rxmode.jumbo_frame =
+                       (max_frame_len > ETHER_MAX_LEN);
                dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
                if (rehash)
                        ret = rxq_rehash(dev, rxq_ctrl);
@@ -1276,13 +1274,12 @@ mlx5_dev_link_status_handler(void *arg)
  *   Callback argument.
  */
 void
-mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+mlx5_dev_interrupt_handler(void *cb_arg)
 {
        struct rte_eth_dev *dev = cb_arg;
        struct priv *priv = dev->data->dev_private;
        int ret;
 
-       (void)intr_handle;
        priv_lock(priv);
        ret = priv_dev_link_status_handler(priv, dev);
        priv_unlock(priv);
@@ -1586,7 +1583,11 @@ priv_select_tx_function(struct priv *priv)
 {
        priv->dev->tx_pkt_burst = mlx5_tx_burst;
        /* Select appropriate TX function. */
-       if (priv->mps && priv->txq_inline) {
+       if (priv->mps == MLX5_MPW_ENHANCED) {
+               priv->dev->tx_pkt_burst =
+                       mlx5_tx_burst_empw;
+               DEBUG("selected Enhanced MPW TX function");
+       } else if (priv->mps && priv->txq_inline) {
                priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
                DEBUG("selected MPW inline TX function");
        } else if (priv->mps) {