net/ngbe: support MAC filters
[dpdk.git] / drivers / net / mlx5 / mlx5_txq.c
index 92fbdab..e2a38d9 100644 (file)
@@ -98,35 +98,42 @@ uint64_t
 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
-                            DEV_TX_OFFLOAD_VLAN_INSERT);
+       uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+                            RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
        struct mlx5_dev_config *config = &priv->config;
 
        if (config->hw_csum)
-               offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
-                            DEV_TX_OFFLOAD_UDP_CKSUM |
-                            DEV_TX_OFFLOAD_TCP_CKSUM);
+               offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                            RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                            RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
        if (config->tso)
-               offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+               offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
        if (config->tx_pp)
-               offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
+               offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
        if (config->swp) {
-               if (config->hw_csum)
-                       offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-               if (config->tso)
-                       offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
-                                    DEV_TX_OFFLOAD_UDP_TNL_TSO);
+               if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
+                       offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+               if (config->swp & MLX5_SW_PARSING_TSO_CAP)
+                       offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+                                    RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
        }
        if (config->tunnel_en) {
                if (config->hw_csum)
-                       offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-               if (config->tso)
-                       offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-                                    DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+                       offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+               if (config->tso) {
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
+                               offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_GRE_CAP)
+                               offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
+                               offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
+               }
        }
        if (!config->mprq.enabled)
-               offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
        return offloads;
 }
 
@@ -498,10 +505,10 @@ mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 static void
 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
 {
-       struct mlx5_priv *priv = txq_ctrl->priv;
+       struct mlx5_common_device *cdev = txq_ctrl->priv->sh->cdev;
        off_t cmd;
 
-       txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
+       txq_ctrl->txq.db_heu = cdev->config.dbnc == MLX5_TXDB_HEURISTIC;
        txq_ctrl->txq.db_nc = 0;
        /* Check the doorbell register mapping type. */
        cmd = txq_ctrl->uar_mmap_offset / page_size;
@@ -794,17 +801,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
        unsigned int inlen_mode; /* Minimal required Inline data. */
        unsigned int txqs_inline; /* Min Tx queues to enable inline. */
        uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
-       bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
-                                           DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                           DEV_TX_OFFLOAD_GRE_TNL_TSO |
-                                           DEV_TX_OFFLOAD_IP_TNL_TSO |
-                                           DEV_TX_OFFLOAD_UDP_TNL_TSO);
+       bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+                                           RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                           RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+                                           RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+                                           RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
        bool vlan_inline;
        unsigned int temp;
 
        txq_ctrl->txq.fast_free =
-               !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
-                  !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+               !!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
+                  !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
                   !config->mprq.enabled);
        if (config->txqs_inline == MLX5_ARG_UNSET)
                txqs_inline =
@@ -863,7 +870,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
         * tx_burst routine.
         */
        txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
-       vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
+       vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
                      !config->hw_vlan_insert;
        /*
         * If there are few Tx queues it is prioritized
@@ -971,11 +978,21 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                                                    MLX5_MAX_TSO_HEADER);
                txq_ctrl->txq.tso_en = 1;
        }
-       txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
-       txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
-                                DEV_TX_OFFLOAD_UDP_TNL_TSO |
-                                DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
-                               txq_ctrl->txq.offloads) && config->swp;
+       if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
+          ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
+          ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
+          (config->swp  & MLX5_SW_PARSING_TSO_CAP))
+               txq_ctrl->txq.tunnel_en = 1;
+       txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+                                 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
+                                 txq_ctrl->txq.offloads) && (config->swp &
+                                 MLX5_SW_PARSING_TSO_CAP)) |
+                               ((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
+                                txq_ctrl->txq.offloads) && (config->swp &
+                                MLX5_SW_PARSING_CSUM_CAP));
 }
 
 /**
@@ -1117,13 +1134,11 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOMEM;
                return NULL;
        }
-       if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
-                              MLX5_MR_BTREE_CACHE_N, socket)) {
+       if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
+                             &priv->sh->cdev->mr_scache.dev_gen, socket)) {
                /* rte_errno is already set. */
                goto error;
        }
-       /* Save pointer of global generation number to check memory event. */
-       tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
        MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
        tmpl->txq.offloads = conf->offloads |
                             dev->data->dev_conf.txmode.offloads;