net/mlx5: support Rx queue count API
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index ec6a482..ed1fcfc 100644 (file)
  */
 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
 
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling vectorized Tx.
+ */
+#define MLX5_TXQS_MAX_VEC "txqs_max_vec"
+
 /* Device parameter to enable multi-packet send WQEs. */
 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
 
@@ -393,6 +399,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
        .filter_ctrl = mlx5_dev_filter_ctrl,
        .rx_descriptor_status = mlx5_rx_descriptor_status,
        .tx_descriptor_status = mlx5_tx_descriptor_status,
+       .rx_queue_count = mlx5_rx_queue_count,
        .rx_queue_intr_enable = mlx5_rx_intr_enable,
        .rx_queue_intr_disable = mlx5_rx_intr_disable,
        .is_removed = mlx5_is_removed,
@@ -496,6 +503,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
                config->txq_inline = tmp;
        } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
                config->txqs_inline = tmp;
+       } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
+               config->txqs_vec = tmp;
        } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
                config->mps = !!tmp;
        } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
@@ -543,6 +552,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
                MLX5_RXQS_MIN_MPRQ,
                MLX5_TXQ_INLINE,
                MLX5_TXQS_MIN_INLINE,
+               MLX5_TXQS_MAX_VEC,
                MLX5_TXQ_MPW_EN,
                MLX5_TXQ_MPW_HDR_DSEG_EN,
                MLX5_TXQ_MAX_INLINE_LEN,
@@ -704,8 +714,8 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
  *   Backing DPDK device.
  * @param ibv_dev
  *   Verbs device.
- * @param vf
- *   If nonzero, enable VF-specific features.
+ * @param config
+ *   Device configuration parameters.
  * @param[in] switch_info
  *   Switch properties of Ethernet device.
  *
@@ -719,7 +729,7 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
               struct ibv_device *ibv_dev,
-              int vf,
+              struct mlx5_dev_config config,
               const struct mlx5_switch_info *switch_info)
 {
        struct ibv_context *ctx;
@@ -727,24 +737,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        struct ibv_port_attr port_attr;
        struct ibv_pd *pd = NULL;
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
-       struct mlx5_dev_config config = {
-               .vf = !!vf,
-               .cqe_pad = 0,
-               .mps = MLX5_ARG_UNSET,
-               .tx_vec_en = 1,
-               .rx_vec_en = 1,
-               .mpw_hdr_dseg = 0,
-               .txq_inline = MLX5_ARG_UNSET,
-               .txqs_inline = MLX5_ARG_UNSET,
-               .inline_max_packet_sz = MLX5_ARG_UNSET,
-               .vf_nl_en = 1,
-               .mprq = {
-                       .enabled = 0,
-                       .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
-                       .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
-                       .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
-               },
-       };
        struct rte_eth_dev *eth_dev = NULL;
        struct priv *priv = NULL;
        int err = 0;
@@ -1176,7 +1168,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        eth_dev->dev_ops = &mlx5_dev_ops;
        /* Register MAC address. */
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
-       if (vf && config.vf_nl_en)
+       if (config.vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
        priv->tcf_context = mlx5_flow_tcf_context_create();
        if (!priv->tcf_context) {
@@ -1345,7 +1337,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 {
        struct ibv_device **ibv_list;
        unsigned int n = 0;
-       int vf;
+       struct mlx5_dev_config dev_config;
        int ret;
 
        assert(pci_drv == &mlx5_driver);
@@ -1443,21 +1435,46 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
         */
        if (n)
                qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
+       /* Default configuration. */
+       dev_config = (struct mlx5_dev_config){
+               .mps = MLX5_ARG_UNSET,
+               .tx_vec_en = 1,
+               .rx_vec_en = 1,
+               .txq_inline = MLX5_ARG_UNSET,
+               .txqs_inline = MLX5_ARG_UNSET,
+               .txqs_vec = MLX5_ARG_UNSET,
+               .inline_max_packet_sz = MLX5_ARG_UNSET,
+               .vf_nl_en = 1,
+               .mprq = {
+                       .enabled = 0, /* Disabled by default. */
+                       .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
+                       .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+                       .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+               },
+       };
+       /* Device speicific configuration. */
        switch (pci_dev->id.device_id) {
+       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+               dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
+               break;
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
        case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
-               vf = 1;
+               dev_config.vf = 1;
                break;
        default:
-               vf = 0;
+               break;
        }
+       /* Set architecture-dependent default value if unset. */
+       if (dev_config.txqs_vec == MLX5_ARG_UNSET)
+               dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS;
        for (i = 0; i != n; ++i) {
                uint32_t restore;
 
-               list[i].eth_dev = mlx5_dev_spawn
-                       (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+               list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
+                                                list[i].ibv_dev, dev_config,
+                                                &list[i].info);
                if (!list[i].eth_dev) {
                        if (rte_errno != EBUSY && rte_errno != EEXIST)
                                break;