net/bnxt: support thor controller
authorLance Richardson <lance.richardson@broadcom.com>
Sun, 2 Jun 2019 17:42:44 +0000 (13:42 -0400)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 13 Jun 2019 14:54:30 +0000 (23:54 +0900)
This commit adds support to the bnxt PMD for devices
based on the BCM57508 "thor" Ethernet controller.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
15 files changed:
doc/guides/nics/bnxt.rst
doc/guides/rel_notes/release_19_08.rst
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_cpr.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_ring.h
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxq.h
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_txq.c
drivers/net/bnxt/bnxt_txq.h
drivers/net/bnxt/bnxt_txr.c

index 697b97e..d88e021 100644 (file)
@@ -7,10 +7,10 @@ BNXT Poll Mode Driver
 The bnxt poll mode library (**librte_pmd_bnxt**) implements support for:
 
   * **Broadcom NetXtreme-C®/NetXtreme-E®/NetXtreme-S®
-    BCM5730X / BCM574XX / BCM58000 family of Ethernet Network Controllers**
+    BCM5730X / BCM574XX / BCM58000 / BCM575XX family of Ethernet Network Controllers**
 
-    These adapters support Standards compliant 10/25/50/100Gbps 30MPPS
-    full-duplex throughput.
+    Adapters based on this family of controllers support standards-compliant Ethernet
+    operation with link speeds of 10/25/50/100/200Gbps.
 
     Information about the NetXtreme family of adapters can be found in the
     `NetXtreme® Brand section
index 7b36d64..6d518b3 100644 (file)
@@ -62,6 +62,12 @@ New Features
   * Added support for SSE vector mode
   * Updated HWRM API to version 1.10.0.74
 
+* **Added support for Broadcom NetXtreme-E BCM57500 Ethernet controllers.**
+
+  Added support to the bnxt PMD for the BCM57500 (a.k.a. "Thor") family
+  of Ethernet controllers. These controllers support link speeds up to
+  200Gbps, 50G PAM-4, and PCIe 4.0.
+
 * **Added memif PMD.**
 
   Added the new Shared Memory Packet Interface (``memif``) PMD.
index 9bb8d82..641790f 100644 (file)
@@ -239,6 +239,93 @@ struct bnxt_coal {
        uint16_t                        cmpl_aggr_dma_tmr_during_int;
 };
 
+/* 64-bit doorbell */
+#define DBR_XID_SFT                            32
+#define DBR_PATH_L2                            (0x1ULL << 56)
+#define DBR_TYPE_SQ                            (0x0ULL << 60)
+#define DBR_TYPE_SRQ                           (0x2ULL << 60)
+#define DBR_TYPE_CQ                            (0x4ULL << 60)
+#define DBR_TYPE_NQ                            (0xaULL << 60)
+
+#define BNXT_RSS_TBL_SIZE_THOR         512
+#define BNXT_RSS_ENTRIES_PER_CTX_THOR  64
+#define BNXT_MAX_RSS_CTXTS_THOR \
+       (BNXT_RSS_TBL_SIZE_THOR / BNXT_RSS_ENTRIES_PER_CTX_THOR)
+
+#define BNXT_MAX_TC    8
+#define BNXT_MAX_QUEUE 8
+#define BNXT_MAX_TC_Q  (BNXT_MAX_TC + 1)
+#define BNXT_MAX_Q     (bp->max_q + 1)
+#define BNXT_PAGE_SHFT 12
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT)
+#define MAX_CTX_PAGES  (BNXT_PAGE_SIZE / 8)
+
+#define PTU_PTE_VALID             0x1UL
+#define PTU_PTE_LAST              0x2UL
+#define PTU_PTE_NEXT_TO_LAST      0x4UL
+
+struct bnxt_ring_mem_info {
+       int                             nr_pages;
+       int                             page_size;
+       uint32_t                        flags;
+#define BNXT_RMEM_VALID_PTE_FLAG       1
+#define BNXT_RMEM_RING_PTE_FLAG                2
+
+       void                            **pg_arr;
+       rte_iova_t                      *dma_arr;
+       const struct rte_memzone        *mz;
+
+       uint64_t                        *pg_tbl;
+       rte_iova_t                      pg_tbl_map;
+       const struct rte_memzone        *pg_tbl_mz;
+
+       int                             vmem_size;
+       void                            **vmem;
+};
+
+struct bnxt_ctx_pg_info {
+       uint32_t        entries;
+       void            *ctx_pg_arr[MAX_CTX_PAGES];
+       rte_iova_t      ctx_dma_arr[MAX_CTX_PAGES];
+       struct bnxt_ring_mem_info ring_mem;
+};
+
+struct bnxt_ctx_mem_info {
+       uint32_t        qp_max_entries;
+       uint16_t        qp_min_qp1_entries;
+       uint16_t        qp_max_l2_entries;
+       uint16_t        qp_entry_size;
+       uint16_t        srq_max_l2_entries;
+       uint32_t        srq_max_entries;
+       uint16_t        srq_entry_size;
+       uint16_t        cq_max_l2_entries;
+       uint32_t        cq_max_entries;
+       uint16_t        cq_entry_size;
+       uint16_t        vnic_max_vnic_entries;
+       uint16_t        vnic_max_ring_table_entries;
+       uint16_t        vnic_entry_size;
+       uint32_t        stat_max_entries;
+       uint16_t        stat_entry_size;
+       uint16_t        tqm_entry_size;
+       uint32_t        tqm_min_entries_per_ring;
+       uint32_t        tqm_max_entries_per_ring;
+       uint32_t        mrav_max_entries;
+       uint16_t        mrav_entry_size;
+       uint16_t        tim_entry_size;
+       uint32_t        tim_max_entries;
+       uint8_t         tqm_entries_multiple;
+
+       uint32_t        flags;
+#define BNXT_CTX_FLAG_INITED    0x01
+
+       struct bnxt_ctx_pg_info qp_mem;
+       struct bnxt_ctx_pg_info srq_mem;
+       struct bnxt_ctx_pg_info cq_mem;
+       struct bnxt_ctx_pg_info vnic_mem;
+       struct bnxt_ctx_pg_info stat_mem;
+       struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TC_Q];
+};
+
 #define BNXT_HWRM_SHORT_REQ_LEN                sizeof(struct hwrm_short_input)
 struct bnxt {
        void                            *bar0;
@@ -262,6 +349,7 @@ struct bnxt {
 #define BNXT_FLAG_KONG_MB_EN   (1 << 10)
 #define BNXT_FLAG_TRUSTED_VF_EN        (1 << 11)
 #define BNXT_FLAG_DFLT_VNIC_SET        (1 << 12)
+#define BNXT_FLAG_THOR_CHIP    (1 << 13)
 #define BNXT_FLAG_NEW_RM       (1 << 30)
 #define BNXT_FLAG_INIT_DONE    (1U << 31)
 #define BNXT_PF(bp)            (!((bp)->flags & BNXT_FLAG_VF))
@@ -272,6 +360,9 @@ struct bnxt {
 #define BNXT_USE_CHIMP_MB      0 //For non-CFA commands, everything uses Chimp.
 #define BNXT_USE_KONG(bp)      ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
 #define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
+#define BNXT_CHIP_THOR(bp)     ((bp)->flags & BNXT_FLAG_THOR_CHIP)
+#define BNXT_HAS_NQ(bp)                BNXT_CHIP_THOR(bp)
+#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp))
 
        unsigned int            rx_nr_rings;
        unsigned int            rx_cp_nr_rings;
@@ -325,6 +416,9 @@ struct bnxt {
        struct bnxt_link_info   link_info;
        struct bnxt_cos_queue_info      cos_queue[BNXT_COS_QUEUE_COUNT];
        uint8_t                 tx_cosq_id;
+       uint8_t                 max_tc;
+       uint8_t                 max_lltc;
+       uint8_t                 max_q;
 
        uint16_t                fw_fid;
        uint8_t                 dflt_mac_addr[RTE_ETHER_ADDR_LEN];
@@ -332,11 +426,12 @@ struct bnxt {
        uint16_t                max_cp_rings;
        uint16_t                max_tx_rings;
        uint16_t                max_rx_rings;
+       uint16_t                max_nq_rings;
        uint16_t                max_l2_ctx;
        uint16_t                max_vnics;
        uint16_t                max_stat_ctx;
        uint16_t                vlan;
-       struct bnxt_pf_info             pf;
+       struct bnxt_pf_info     pf;
        uint8_t                 port_partition_type;
        uint8_t                 dev_stopped;
        uint8_t                 vxlan_port_cnt;
@@ -352,6 +447,7 @@ struct bnxt {
        uint8_t                 num_leds;
        struct bnxt_ptp_cfg     *ptp_cfg;
        uint16_t                vf_resv_strategy;
+       struct bnxt_ctx_mem_info        *ctx;
 };
 
 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
index 77a22d2..8c6a34b 100644 (file)
@@ -19,6 +19,10 @@ struct bnxt_db_info;
        (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) &    \
            CMPL_BASE_V) == !(v))
 
+#define NQ_CMP_VALID(nqcmp, raw_cons, ring)            \
+       (!!((nqcmp)->v & rte_cpu_to_le_32(NQ_CN_V)) ==  \
+        !((raw_cons) & ((ring)->ring_size)))
+
 #define CMP_TYPE(cmp)                                          \
        (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
 
@@ -70,8 +74,12 @@ struct bnxt_db_info;
                    ((cpr)->cp_db.doorbell))
 
 struct bnxt_db_info {
-       void            *doorbell;
-       uint32_t        db_key32;
+       void                    *doorbell;
+       union {
+               uint64_t        db_key64;
+               uint32_t        db_key32;
+       };
+       bool                    db_64;
 };
 
 struct bnxt_ring;
index 72679be..c0a84bb 100644 (file)
@@ -71,6 +71,10 @@ int bnxt_logtype_driver;
 #define BROADCOM_DEV_ID_57407_MF 0x16ea
 #define BROADCOM_DEV_ID_57414_MF 0x16ec
 #define BROADCOM_DEV_ID_57416_MF 0x16ee
+#define BROADCOM_DEV_ID_57508 0x1750
+#define BROADCOM_DEV_ID_57504 0x1751
+#define BROADCOM_DEV_ID_57502 0x1752
+#define BROADCOM_DEV_ID_57500_VF 0x1807
 #define BROADCOM_DEV_ID_58802 0xd802
 #define BROADCOM_DEV_ID_58804 0xd804
 #define BROADCOM_DEV_ID_58808 0x16f0
@@ -119,6 +123,10 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -224,6 +232,12 @@ static int bnxt_init_chip(struct bnxt *bp)
                bp->flags &= ~BNXT_FLAG_JUMBO;
        }
 
+       /* THOR does not support ring groups.
+        * But we will use the array to save RSS context IDs.
+        */
+       if (BNXT_CHIP_THOR(bp))
+               bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+
        rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
        if (rc) {
                PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
@@ -317,7 +331,7 @@ static int bnxt_init_chip(struct bnxt *bp)
                                    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
                                    j, rxq->vnic, rxq->vnic->fw_grp_ids);
 
-                       if (rxq->rx_deferred_start)
+                       if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
                                rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
                }
 
@@ -573,22 +587,16 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
            eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
            bp->max_cp_rings ||
            eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
-           bp->max_stat_ctx ||
-           (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
-           (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
-            bp->max_vnics < eth_dev->data->nb_rx_queues)) {
-               PMD_DRV_LOG(ERR,
-                       "Insufficient resources to support requested config\n");
-               PMD_DRV_LOG(ERR,
-                       "Num Queues Requested: Tx %d, Rx %d\n",
-                       eth_dev->data->nb_tx_queues,
-                       eth_dev->data->nb_rx_queues);
-               PMD_DRV_LOG(ERR,
-                       "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
-                       bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
-                       bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
-               return -ENOSPC;
-       }
+           bp->max_stat_ctx)
+               goto resource_error;
+
+       if (BNXT_HAS_RING_GRPS(bp) &&
+           (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
+               goto resource_error;
+
+       if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+           bp->max_vnics < eth_dev->data->nb_rx_queues)
+               goto resource_error;
 
        bp->rx_cp_nr_rings = bp->rx_nr_rings;
        bp->tx_cp_nr_rings = bp->tx_nr_rings;
@@ -601,6 +609,19 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
                bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
        }
        return 0;
+
+resource_error:
+       PMD_DRV_LOG(ERR,
+                   "Insufficient resources to support requested config\n");
+       PMD_DRV_LOG(ERR,
+                   "Num Queues Requested: Tx %d, Rx %d\n",
+                   eth_dev->data->nb_tx_queues,
+                   eth_dev->data->nb_rx_queues);
+       PMD_DRV_LOG(ERR,
+                   "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
+                   bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
+                   bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
+       return -ENOSPC;
 }
 
 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
@@ -3265,7 +3286,8 @@ static bool bnxt_vf_pciid(uint16_t id)
            id == BROADCOM_DEV_ID_57414_VF ||
            id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
            id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
-           id == BROADCOM_DEV_ID_58802_VF)
+           id == BROADCOM_DEV_ID_58802_VF ||
+           id == BROADCOM_DEV_ID_57500_VF)
                return true;
        return false;
 }
@@ -3327,6 +3349,245 @@ init_err_disable:
        return rc;
 }
 
+static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,
+                                 struct bnxt_ctx_pg_info *ctx_pg,
+                                 uint32_t mem_size,
+                                 const char *suffix,
+                                 uint16_t idx)
+{
+       struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+       const struct rte_memzone *mz = NULL;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       rte_iova_t mz_phys_addr;
+       uint64_t valid_bits = 0;
+       uint32_t sz;
+       int i;
+
+       if (!mem_size)
+               return 0;
+
+       rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
+                        BNXT_PAGE_SIZE;
+       rmem->page_size = BNXT_PAGE_SIZE;
+       rmem->pg_arr = ctx_pg->ctx_pg_arr;
+       rmem->dma_arr = ctx_pg->ctx_dma_arr;
+       rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+
+       valid_bits = PTU_PTE_VALID;
+
+       if (rmem->nr_pages > 1) {
+               snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x",
+                        suffix, idx);
+               mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+               mz = rte_memzone_lookup(mz_name);
+               if (!mz) {
+                       mz = rte_memzone_reserve_aligned(mz_name,
+                                               rmem->nr_pages * 8,
+                                               SOCKET_ID_ANY,
+                                               RTE_MEMZONE_2MB |
+                                               RTE_MEMZONE_SIZE_HINT_ONLY |
+                                               RTE_MEMZONE_IOVA_CONTIG,
+                                               BNXT_PAGE_SIZE);
+                       if (mz == NULL)
+                               return -ENOMEM;
+               }
+
+               memset(mz->addr, 0, mz->len);
+               mz_phys_addr = mz->iova;
+               if ((unsigned long)mz->addr == mz_phys_addr) {
+                       PMD_DRV_LOG(WARNING,
+                               "Memzone physical address same as virtual.\n");
+                       PMD_DRV_LOG(WARNING,
+                                   "Using rte_mem_virt2iova()\n");
+                       mz_phys_addr = rte_mem_virt2iova(mz->addr);
+                       if (mz_phys_addr == 0) {
+                               PMD_DRV_LOG(ERR,
+                                       "unable to map addr to phys memory\n");
+                               return -ENOMEM;
+                       }
+               }
+               rte_mem_lock_page(((char *)mz->addr));
+
+               rmem->pg_tbl = mz->addr;
+               rmem->pg_tbl_map = mz_phys_addr;
+               rmem->pg_tbl_mz = mz;
+       }
+
+       snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx);
+       mz = rte_memzone_lookup(mz_name);
+       if (!mz) {
+               mz = rte_memzone_reserve_aligned(mz_name,
+                                                mem_size,
+                                                SOCKET_ID_ANY,
+                                                RTE_MEMZONE_1GB |
+                                                RTE_MEMZONE_SIZE_HINT_ONLY |
+                                                RTE_MEMZONE_IOVA_CONTIG,
+                                                BNXT_PAGE_SIZE);
+               if (mz == NULL)
+                       return -ENOMEM;
+       }
+
+       memset(mz->addr, 0, mz->len);
+       mz_phys_addr = mz->iova;
+       if ((unsigned long)mz->addr == mz_phys_addr) {
+               PMD_DRV_LOG(WARNING,
+                           "Memzone physical address same as virtual.\n");
+               PMD_DRV_LOG(WARNING,
+                           "Using rte_mem_virt2iova()\n");
+               for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)
+                       rte_mem_lock_page(((char *)mz->addr) + sz);
+               mz_phys_addr = rte_mem_virt2iova(mz->addr);
+               if (mz_phys_addr == RTE_BAD_IOVA) {
+                       PMD_DRV_LOG(ERR,
+                                   "unable to map addr to phys memory\n");
+                       return -ENOMEM;
+               }
+       }
+
+       for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
+               rte_mem_lock_page(((char *)mz->addr) + sz);
+               rmem->pg_arr[i] = ((char *)mz->addr) + sz;
+               rmem->dma_arr[i] = mz_phys_addr + sz;
+
+               if (rmem->nr_pages > 1) {
+                       if (i == rmem->nr_pages - 2 &&
+                           (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               valid_bits |= PTU_PTE_NEXT_TO_LAST;
+                       else if (i == rmem->nr_pages - 1 &&
+                                (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               valid_bits |= PTU_PTE_LAST;
+
+                       rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
+                                                          valid_bits);
+               }
+       }
+
+       rmem->mz = mz;
+       if (rmem->vmem_size)
+               rmem->vmem = (void **)mz->addr;
+       rmem->dma_arr[0] = mz_phys_addr;
+       return 0;
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
+               return;
+
+       bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+       rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
+       rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
+       rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
+       rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
+       rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
+       rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
+       rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
+       rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
+       rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
+       rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
+
+       for (i = 0; i < BNXT_MAX_Q; i++) {
+               if (bp->ctx->tqm_mem[i])
+                       rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
+       }
+
+       rte_free(bp->ctx);
+       bp->ctx = NULL;
+}
+
+#define roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define min_t(type, x, y) ({                    \
+       type __min1 = (x);                      \
+       type __min2 = (y);                      \
+       __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({                    \
+       type __max1 = (x);                      \
+       type __max2 = (y);                      \
+       __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
+
+int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+       struct bnxt_ctx_pg_info *ctx_pg;
+       struct bnxt_ctx_mem_info *ctx;
+       uint32_t mem_size, ena, entries;
+       int i, rc;
+
+       rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+       if (rc) {
+               PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
+               return rc;
+       }
+       ctx = bp->ctx;
+       if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+               return 0;
+
+       ctx_pg = &ctx->qp_mem;
+       ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+       mem_size = ctx->qp_entry_size * ctx_pg->entries;
+       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+       if (rc)
+               return rc;
+
+       ctx_pg = &ctx->srq_mem;
+       ctx_pg->entries = ctx->srq_max_l2_entries;
+       mem_size = ctx->srq_entry_size * ctx_pg->entries;
+       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+       if (rc)
+               return rc;
+
+       ctx_pg = &ctx->cq_mem;
+       ctx_pg->entries = ctx->cq_max_l2_entries;
+       mem_size = ctx->cq_entry_size * ctx_pg->entries;
+       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+       if (rc)
+               return rc;
+
+       ctx_pg = &ctx->vnic_mem;
+       ctx_pg->entries = ctx->vnic_max_vnic_entries +
+               ctx->vnic_max_ring_table_entries;
+       mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
+       if (rc)
+               return rc;
+
+       ctx_pg = &ctx->stat_mem;
+       ctx_pg->entries = ctx->stat_max_entries;
+       mem_size = ctx->stat_entry_size * ctx_pg->entries;
+       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
+       if (rc)
+               return rc;
+
+       entries = ctx->qp_max_l2_entries;
+       entries = roundup(entries, ctx->tqm_entries_multiple);
+       entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
+                         ctx->tqm_max_entries_per_ring);
+       for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
+               ctx_pg = ctx->tqm_mem[i];
+               /* use min tqm entries for now. */
+               ctx_pg->entries = entries;
+               mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
+               if (rc)
+                       return rc;
+               ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+       }
+
+       ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
+       rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+       if (rc)
+               PMD_DRV_LOG(ERR,
+                           "Failed to configure context mem: rc = %d\n", rc);
+       else
+               ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+       return 0;
+}
 
 #define ALLOW_FUNC(x)  \
        { \
@@ -3361,6 +3622,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
        if (bnxt_vf_pciid(pci_dev->id.device_id))
                bp->flags |= BNXT_FLAG_VF;
 
+       if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_57504 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_57502 ||
+           pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF)
+               bp->flags |= BNXT_FLAG_THOR_CHIP;
+
        rc = bnxt_init_board(eth_dev);
        if (rc) {
                PMD_DRV_LOG(ERR,
@@ -3497,13 +3764,6 @@ skip_ext_stats:
                PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
                goto error_free;
        }
-
-       rc = bnxt_hwrm_func_qcfg(bp);
-       if (rc) {
-               PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
-               goto error_free;
-       }
-
        /* Get the MAX capabilities for this function */
        rc = bnxt_hwrm_func_qcaps(bp);
        if (rc) {
@@ -3538,7 +3798,12 @@ skip_ext_stats:
        memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
        memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
 
-       if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+       /* THOR does not support ring groups.
+        * But we will use the array to save RSS context IDs.
+        */
+       if (BNXT_CHIP_THOR(bp)) {
+               bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+       } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
                /* 1 ring is for default completion ring */
                PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
                rc = -ENOSPC;
@@ -3592,6 +3857,11 @@ skip_ext_stats:
                pci_dev->mem_resource[0].phys_addr,
                pci_dev->mem_resource[0].addr);
 
+       rc = bnxt_hwrm_func_qcfg(bp);
+       if (rc) {
+               PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
+               goto error_free;
+       }
 
        if (BNXT_PF(bp)) {
                //if (bp->pf.active_vfs) {
@@ -3677,6 +3947,7 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
                bnxt_dev_close_op(eth_dev);
        if (bp->pf.vf_info)
                rte_free(bp->pf.vf_info);
+       bnxt_free_ctx_mem(bp);
        eth_dev->dev_ops = NULL;
        eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
index 45d37f1..29f2701 100644 (file)
@@ -29,6 +29,7 @@
 #define HWRM_CMD_TIMEOUT               6000000
 #define HWRM_SPEC_CODE_1_8_3           0x10803
 #define HWRM_VERSION_1_9_1             0x10901
+#define HWRM_VERSION_1_9_2             0x10903
 
 struct bnxt_plcmodes_cfg {
        uint32_t        flags;
@@ -62,6 +63,18 @@ static int page_roundup(size_t size)
        return 1 << page_getenum(size);
 }
 
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
+                                 uint8_t *pg_attr,
+                                 uint64_t *pg_dir)
+{
+       if (rmem->nr_pages > 1) {
+               *pg_attr = 1;
+               *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
+       } else {
+               *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
+       }
+}
+
 /*
  * HWRM Functions (sent to HWRM)
  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
@@ -608,6 +621,10 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 
        rc = __bnxt_hwrm_func_qcaps(bp);
        if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+               rc = bnxt_alloc_ctx_mem(bp);
+               if (rc)
+                       return rc;
+
                rc = bnxt_hwrm_func_resc_qcaps(bp);
                if (!rc)
                        bp->flags |= BNXT_FLAG_NEW_RM;
@@ -703,13 +720,16 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
 
        HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
 
-       req.enables = rte_cpu_to_le_32
-                       (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
-                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
-                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
-                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
-                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
-                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
+       enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
+                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
+                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
+                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
+
+       if (BNXT_HAS_RING_GRPS(bp)) {
+               enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+               req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+       }
 
        req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
        req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
@@ -717,14 +737,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
        req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
        req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
                                              bp->tx_nr_rings);
-       req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
        req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
        if (bp->vf_resv_strategy ==
            HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
-               enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
-                               HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
-                               HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
-               req.enables |= rte_cpu_to_le_32(enables);
+               enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+                          HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+                          HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
                req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
                req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
                req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
@@ -738,7 +756,11 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
                        HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
                        HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
 
+       if (test && BNXT_HAS_RING_GRPS(bp))
+               flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
+
        req.flags = rte_cpu_to_le_32(flags);
+       req.enables |= rte_cpu_to_le_32(enables);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -774,6 +796,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
                bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
                bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
        }
+       bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
        bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
        if (bp->vf_resv_strategy >
            HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
@@ -1092,6 +1115,13 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
                        }
                }
        }
+
+       bp->max_tc = resp->max_configurable_queues;
+       bp->max_lltc = resp->max_configurable_lossless_queues;
+       if (bp->max_tc > BNXT_MAX_QUEUE)
+               bp->max_tc = BNXT_MAX_QUEUE;
+       bp->max_q = bp->max_tc;
+
        PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
 
        return rc;
@@ -1106,6 +1136,8 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
        uint32_t enables = 0;
        struct hwrm_ring_alloc_input req = {.req_type = 0 };
        struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       struct rte_mempool *mb_pool;
+       uint16_t rx_buf_size;
 
        HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
 
@@ -1117,24 +1149,59 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 
        switch (ring_type) {
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+               req.ring_type = ring_type;
+               req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+               req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
                req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
-               /* FALLTHROUGH */
+               if (stats_ctx_id != INVALID_STATS_CTX_ID)
+                       enables |=
+                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+               break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                req.ring_type = ring_type;
                req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+               if (BNXT_CHIP_THOR(bp)) {
+                       mb_pool = bp->rx_queues[0]->mb_pool;
+                       rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+                                     RTE_PKTMBUF_HEADROOM;
+                       req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+                       enables |=
+                               HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
+               }
                if (stats_ctx_id != INVALID_STATS_CTX_ID)
                        enables |=
-                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+                               HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
                break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
                req.ring_type = ring_type;
-               /*
-                * TODO: Some HWRM versions crash with
-                * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
-                */
+               if (BNXT_HAS_NQ(bp)) {
+                       /* Association of cp ring with nq */
+                       req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+                       enables |=
+                               HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
+               }
                req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
                break;
+       case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+               req.ring_type = ring_type;
+               req.page_size = BNXT_PAGE_SHFT;
+               req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+               break;
+       case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+               req.ring_type = ring_type;
+               req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
+
+               mb_pool = bp->rx_queues[0]->mb_pool;
+               rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+                             RTE_PKTMBUF_HEADROOM;
+               req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+
+               req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+               enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
+                          HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
+                          HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+               break;
        default:
                PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
                        ring_type);
@@ -1156,12 +1223,23 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                        return rc;
                case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                        PMD_DRV_LOG(ERR,
-                               "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+                                   "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+                       HWRM_UNLOCK();
+                       return rc;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+                       PMD_DRV_LOG(ERR,
+                                   "hwrm_ring_alloc rx agg failed. rc:%d\n",
+                                   rc);
                        HWRM_UNLOCK();
                        return rc;
                case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
                        PMD_DRV_LOG(ERR,
-                               "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+                                   "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+                       HWRM_UNLOCK();
+                       return rc;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+                       PMD_DRV_LOG(ERR,
+                                   "hwrm_ring_alloc nq failed. rc:%d\n", rc);
                        HWRM_UNLOCK();
                        return rc;
                default:
@@ -1208,6 +1286,14 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
                        PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
                                rc);
                        return rc;
+               case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
+                       PMD_DRV_LOG(ERR,
+                                   "hwrm_ring_free nq failed. rc:%d\n", rc);
+                       return rc;
+               case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+                       PMD_DRV_LOG(ERR,
+                                   "hwrm_ring_free agg failed. rc:%d\n", rc);
+                       return rc;
                default:
                        PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
                        return rc;
@@ -1332,6 +1418,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct hwrm_vnic_alloc_input req = { 0 };
        struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (!BNXT_HAS_RING_GRPS(bp))
+               goto skip_ring_grps;
+
        /* map ring groups to this vnic */
        PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
                vnic->start_grp_id, vnic->end_grp_id);
@@ -1342,6 +1431,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
        vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
        vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+
+skip_ring_grps:
        vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
                                RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
        HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
@@ -1423,6 +1514,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint32_t ctx_enable_flag = 0;
        struct bnxt_plcmodes_cfg pmodes;
+       uint32_t enables = 0;
 
        if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
                PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
@@ -1435,9 +1527,22 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 
        HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
 
+       if (BNXT_CHIP_THOR(bp)) {
+               struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
+               struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+               struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+               req.default_rx_ring_id =
+                       rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
+               req.default_cmpl_ring_id =
+                       rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
+               enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
+                         HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
+               goto config_mru;
+       }
+
        /* Only RSS support for now TBD: COS & LB */
-       req.enables =
-           rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
+       enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
        if (vnic->lb_rule != 0xffff)
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
        if (vnic->cos_rule != 0xffff)
@@ -1446,12 +1551,15 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
        }
-       req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
-       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+       enables |= ctx_enable_flag;
        req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
        req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
        req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
        req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+
+config_mru:
+       req.enables = rte_cpu_to_le_32(enables);
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
        req.mru = rte_cpu_to_le_16(vnic->mru);
        /* Configure default VNIC only once. */
        if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
@@ -1672,6 +1780,9 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
        struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (BNXT_CHIP_THOR(bp))
+               return 0;
+
        HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
 
        if (enable) {
@@ -1887,6 +1998,9 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        uint16_t idx;
        uint32_t rc = 0;
 
+       if (!BNXT_HAS_RING_GRPS(bp))
+               return 0;
+
        for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
 
                if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
@@ -1900,6 +2014,18 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
+static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+       struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+       bnxt_hwrm_ring_free(bp, cp_ring,
+                           HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
+       cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+       memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+                                    sizeof(*cpr->cp_desc_ring));
+       cpr->cp_raw_cons = 0;
+}
+
 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
@@ -1935,6 +2061,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        ring = rxr->ag_ring_struct;
        if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                bnxt_hwrm_ring_free(bp, ring,
+                                   BNXT_CHIP_THOR(bp) ?
+                                   HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
                ring->fw_ring_id = INVALID_HW_RING_ID;
                memset(rxr->ag_buf_ring, 0,
@@ -1943,8 +2071,11 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                rxr->ag_prod = 0;
                bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
        }
-       if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+       if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                bnxt_free_cp_ring(bp, cpr);
+               if (rxq->nq_ring)
+                       bnxt_free_nq_ring(bp, rxq->nq_ring);
+       }
 
        bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
 }
@@ -1975,6 +2106,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr);
                        cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+                       if (txq->nq_ring)
+                               bnxt_free_nq_ring(bp, txq->nq_ring);
                }
        }
 
@@ -1989,6 +2122,9 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
        uint16_t i;
        uint32_t rc = 0;
 
+       if (!BNXT_HAS_RING_GRPS(bp))
+               return 0;
+
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                rc = bnxt_hwrm_ring_grp_alloc(bp, i);
                if (rc)
@@ -2516,18 +2652,27 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 {
        struct hwrm_func_cfg_input req = {0};
        struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint32_t enables;
        int rc;
 
-       req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
-                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+       enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+                 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
+
+       if (BNXT_HAS_RING_GRPS(bp)) {
+               enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+               req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+       } else if (BNXT_HAS_NQ(bp)) {
+               enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
+               req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+       }
+
        req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
        req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
        req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
@@ -2540,8 +2685,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
        req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
        req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
        req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
-       req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
        req.fid = rte_cpu_to_le_16(0xffff);
+       req.enables = rte_cpu_to_le_32(enables);
 
        HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
 
@@ -2711,6 +2856,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
        bp->pf.func_cfg_flags |=
                HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
        rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+       rc = __bnxt_hwrm_func_qcaps(bp);
        return rc;
 }
 
@@ -3970,6 +4116,192 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
        return 0;
 }
 
+#define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+       struct hwrm_func_backing_store_qcaps_input req = {0};
+       struct hwrm_func_backing_store_qcaps_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       if (!BNXT_CHIP_THOR(bp) ||
+           bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
+           BNXT_VF(bp) ||
+           bp->ctx)
+               return 0;
+
+       HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+       HWRM_CHECK_RESULT_SILENT();
+
+       if (!rc) {
+               struct bnxt_ctx_pg_info *ctx_pg;
+               struct bnxt_ctx_mem_info *ctx;
+               int total_alloc_len;
+               int i;
+
+               total_alloc_len = sizeof(*ctx);
+               ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
+                                RTE_CACHE_LINE_SIZE);
+               if (!ctx) {
+                       rc = -ENOMEM;
+                       goto ctx_err;
+               }
+               memset(ctx, 0, total_alloc_len);
+
+               ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+                                   sizeof(*ctx_pg) * BNXT_MAX_Q,
+                                   RTE_CACHE_LINE_SIZE);
+               if (!ctx_pg) {
+                       rc = -ENOMEM;
+                       goto ctx_err;
+               }
+               for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
+                       ctx->tqm_mem[i] = ctx_pg;
+
+               bp->ctx = ctx;
+               ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
+               ctx->qp_min_qp1_entries =
+                       rte_le_to_cpu_16(resp->qp_min_qp1_entries);
+               ctx->qp_max_l2_entries =
+                       rte_le_to_cpu_16(resp->qp_max_l2_entries);
+               ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
+               ctx->srq_max_l2_entries =
+                       rte_le_to_cpu_16(resp->srq_max_l2_entries);
+               ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
+               ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
+               ctx->cq_max_l2_entries =
+                       rte_le_to_cpu_16(resp->cq_max_l2_entries);
+               ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
+               ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
+               ctx->vnic_max_vnic_entries =
+                       rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
+               ctx->vnic_max_ring_table_entries =
+                       rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
+               ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
+               ctx->stat_max_entries =
+                       rte_le_to_cpu_32(resp->stat_max_entries);
+               ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
+               ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
+               ctx->tqm_min_entries_per_ring =
+                       rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
+               ctx->tqm_max_entries_per_ring =
+                       rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
+               ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+               if (!ctx->tqm_entries_multiple)
+                       ctx->tqm_entries_multiple = 1;
+               ctx->mrav_max_entries =
+                       rte_le_to_cpu_32(resp->mrav_max_entries);
+               ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
+               ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
+               ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+       } else {
+               rc = 0;
+       }
+ctx_err:
+       HWRM_UNLOCK();
+       return rc;
+}
+
+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
+{
+       struct hwrm_func_backing_store_cfg_input req = {0};
+       struct hwrm_func_backing_store_cfg_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       struct bnxt_ctx_mem_info *ctx = bp->ctx;
+       struct bnxt_ctx_pg_info *ctx_pg;
+       uint32_t *num_entries;
+       uint64_t *pg_dir;
+       uint8_t *pg_attr;
+       uint32_t ena;
+       int i, rc;
+
+       if (!ctx)
+               return 0;
+
+       HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
+       req.enables = rte_cpu_to_le_32(enables);
+
+       if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
+               ctx_pg = &ctx->qp_mem;
+               req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+               req.qp_num_qp1_entries =
+                       rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
+               req.qp_num_l2_entries =
+                       rte_cpu_to_le_16(ctx->qp_max_l2_entries);
+               req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+                                     &req.qpc_pg_size_qpc_lvl,
+                                     &req.qpc_page_dir);
+       }
+
+       if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
+               ctx_pg = &ctx->srq_mem;
+               req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+               req.srq_num_l2_entries =
+                                rte_cpu_to_le_16(ctx->srq_max_l2_entries);
+               req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+                                     &req.srq_pg_size_srq_lvl,
+                                     &req.srq_page_dir);
+       }
+
+       if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
+               ctx_pg = &ctx->cq_mem;
+               req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+               req.cq_num_l2_entries =
+                               rte_cpu_to_le_16(ctx->cq_max_l2_entries);
+               req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+                                     &req.cq_pg_size_cq_lvl,
+                                     &req.cq_page_dir);
+       }
+
+       if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
+               ctx_pg = &ctx->vnic_mem;
+               req.vnic_num_vnic_entries =
+                       rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
+               req.vnic_num_ring_table_entries =
+                       rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
+               req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+                                     &req.vnic_pg_size_vnic_lvl,
+                                     &req.vnic_page_dir);
+       }
+
+       if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
+               ctx_pg = &ctx->stat_mem;
+               req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
+               req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+                                     &req.stat_pg_size_stat_lvl,
+                                     &req.stat_page_dir);
+       }
+
+       req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+       num_entries = &req.tqm_sp_num_entries;
+       pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
+       pg_dir = &req.tqm_sp_page_dir;
+       ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
+       for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+               if (!(enables & ena))
+                       continue;
+
+               req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+
+               ctx_pg = ctx->tqm_mem[i];
+               *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+               bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+       }
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+       if (rc)
+               rc = -EIO;
+       return rc;
+}
+
 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
 {
        struct hwrm_port_qstats_ext_input req = {0};
index 53d79f0..ffd99de 100644 (file)
@@ -36,6 +36,13 @@ struct bnxt_cp_ring_info;
 #define HWRM_SPEC_CODE_1_9_0           0x10900
 #define HWRM_SPEC_CODE_1_9_2           0x10902
 
+#define FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES              \
+       (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |        \
+       HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |        \
+       HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |         \
+       HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |       \
+       HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
+
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
                                   struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -179,4 +186,7 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
                        struct bnxt_coal *coal, uint16_t ring_id);
 int bnxt_hwrm_check_vf_rings(struct bnxt *bp);
 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp);
+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables);
+int bnxt_alloc_ctx_mem(struct bnxt *bp);
 #endif
index 8473e4a..56bb463 100644 (file)
@@ -67,6 +67,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                            struct bnxt_tx_queue *txq,
                            struct bnxt_rx_queue *rxq,
                            struct bnxt_cp_ring_info *cp_ring_info,
+                           struct bnxt_cp_ring_info *nq_ring_info,
                            const char *suffix)
 {
        struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
@@ -78,49 +79,70 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
        uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
        const struct rte_memzone *mz = NULL;
        char mz_name[RTE_MEMZONE_NAMESIZE];
+       rte_iova_t mz_phys_addr_base;
        rte_iova_t mz_phys_addr;
        int sz;
 
        int stats_len = (tx_ring_info || rx_ring_info) ?
            RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
                                   sizeof (struct hwrm_resp_hdr)) : 0;
+       stats_len = RTE_ALIGN(stats_len, 128);
 
        int cp_vmem_start = stats_len;
        int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+       cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
 
-       int tx_vmem_start = cp_vmem_start + cp_vmem_len;
+       int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
+               RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
+       nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
+
+       int nq_vmem_start = cp_vmem_start + cp_vmem_len;
+
+       int tx_vmem_start = nq_vmem_start + nq_vmem_len;
        int tx_vmem_len =
            tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
                                                tx_ring_struct->vmem_size) : 0;
+       tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
 
        int rx_vmem_start = tx_vmem_start + tx_vmem_len;
        int rx_vmem_len = rx_ring_info ?
                RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
                                                rx_ring_struct->vmem_size) : 0;
+       rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
        int ag_vmem_start = 0;
        int ag_vmem_len = 0;
        int cp_ring_start =  0;
+       int nq_ring_start = 0;
 
        ag_vmem_start = rx_vmem_start + rx_vmem_len;
        ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
                                rx_ring_info->ag_ring_struct->vmem_size) : 0;
        cp_ring_start = ag_vmem_start + ag_vmem_len;
+       cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
 
        int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
                                                 sizeof(struct cmpl_base));
+       cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
+       nq_ring_start = cp_ring_start + cp_ring_len;
+       nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
+
+       int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
 
-       int tx_ring_start = cp_ring_start + cp_ring_len;
+       int tx_ring_start = nq_ring_start + nq_ring_len;
        int tx_ring_len = tx_ring_info ?
            RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
                                   sizeof(struct tx_bd_long)) : 0;
+       tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
 
        int rx_ring_start = tx_ring_start + tx_ring_len;
        int rx_ring_len =  rx_ring_info ?
                RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
                sizeof(struct rx_prod_pkt_bd)) : 0;
+       rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
 
        int ag_ring_start = rx_ring_start + rx_ring_len;
        int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
+       ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
 
        int ag_bitmap_start = ag_ring_start + ag_ring_len;
        int ag_bitmap_len =  rx_ring_info ?
@@ -154,14 +176,16 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                        return -ENOMEM;
        }
        memset(mz->addr, 0, mz->len);
+       mz_phys_addr_base = mz->iova;
        mz_phys_addr = mz->iova;
-       if ((unsigned long)mz->addr == mz_phys_addr) {
+       if ((unsigned long)mz->addr == mz_phys_addr_base) {
                PMD_DRV_LOG(WARNING,
                        "Memzone physical address same as virtual.\n");
                PMD_DRV_LOG(WARNING,
                        "Using rte_mem_virt2iova()\n");
                for (sz = 0; sz < total_alloc_len; sz += getpagesize())
                        rte_mem_lock_page(((char *)mz->addr) + sz);
+               mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
                mz_phys_addr = rte_mem_virt2iova(mz->addr);
                if (mz_phys_addr == 0) {
                        PMD_DRV_LOG(ERR,
@@ -255,6 +279,24 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                cp_ring_info->hw_stats_map = mz_phys_addr;
        }
        cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+
+       if (BNXT_HAS_NQ(bp)) {
+               struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
+
+               nq_ring->bd = (char *)mz->addr + nq_ring_start;
+               nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
+               nq_ring_info->cp_desc_ring = nq_ring->bd;
+               nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
+               nq_ring->mem_zone = (const void *)mz;
+
+               if (!nq_ring->bd)
+                       return -ENOMEM;
+               if (nq_ring->vmem_size)
+                       *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
+
+               nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+       }
+
        return 0;
 }
 
@@ -279,43 +321,109 @@ static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
 static void bnxt_set_db(struct bnxt *bp,
                        struct bnxt_db_info *db,
                        uint32_t ring_type,
-                       uint32_t map_idx)
+                       uint32_t map_idx,
+                       uint32_t fid)
 {
-       db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
-       switch (ring_type) {
-       case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
-               db->db_key32 = DB_KEY_TX;
-               break;
-       case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
-               db->db_key32 = DB_KEY_RX;
-               break;
-       case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
-               db->db_key32 = DB_KEY_CP;
-               break;
+       if (BNXT_CHIP_THOR(bp)) {
+               if (BNXT_PF(bp))
+                       db->doorbell = (char *)bp->doorbell_base + 0x10000;
+               else
+                       db->doorbell = (char *)bp->doorbell_base + 0x4000;
+               switch (ring_type) {
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+                       db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+                       break;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+                       db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+                       break;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
+                       db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
+                       break;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+                       db->db_key64 = DBR_PATH_L2 | DBR_TYPE_NQ;
+                       break;
+               }
+               db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
+               db->db_64 = true;
+       } else {
+               db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
+               switch (ring_type) {
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+                       db->db_key32 = DB_KEY_TX;
+                       break;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+                       db->db_key32 = DB_KEY_RX;
+                       break;
+               case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
+                       db->db_key32 = DB_KEY_CP;
+                       break;
+               }
+               db->db_64 = false;
        }
 }
 
 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
-                               struct bnxt_cp_ring_info *cpr)
+                               struct bnxt_cp_ring_info *cpr,
+                               struct bnxt_cp_ring_info *nqr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+       uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
        uint8_t ring_type;
        int rc = 0;
 
        ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
 
+       if (BNXT_HAS_NQ(bp)) {
+               if (nqr) {
+                       nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
+               } else {
+                       PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
+                       return -EINVAL;
+               }
+       }
+
        rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,
-                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+                                 HWRM_NA_SIGNATURE, nq_ring_id);
        if (rc)
                return rc;
 
        cpr->cp_cons = 0;
-       bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index);
+       bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,
+                   cp_ring->fw_ring_id);
        bnxt_db_cq(cpr);
 
        return 0;
 }
 
+static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
+                             struct bnxt_cp_ring_info *nqr,
+                             bool rx)
+{
+       struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
+       uint8_t ring_type;
+       int rc = 0;
+
+       if (!BNXT_HAS_NQ(bp))
+               return -EINVAL;
+
+       ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
+
+       rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,
+                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+       if (rc)
+               return rc;
+
+       if (rx)
+               bp->grp_info[queue_index].cp_fw_ring_id = nq_ring->fw_ring_id;
+
+       bnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,
+                   nq_ring->fw_ring_id);
+       bnxt_db_nq(nqr);
+
+       return 0;
+}
+
 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
 {
        struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
@@ -336,7 +444,7 @@ static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
 
        rxr->rx_prod = 0;
        bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
-       bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index);
+       bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);
        bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
 
        return 0;
@@ -354,7 +462,14 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
        uint8_t ring_type;
        int rc = 0;
 
-       ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
+       ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
+
+       if (BNXT_CHIP_THOR(bp)) {
+               ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
+               hw_stats_ctx_id = cpr->hw_stats_ctx_id;
+       } else {
+               ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
+       }
 
        rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
                                  hw_stats_ctx_id, cp_ring->fw_ring_id);
@@ -364,7 +479,7 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
 
        rxr->ag_prod = 0;
        bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
-       bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx);
+       bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);
        bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
 
        return 0;
@@ -375,10 +490,16 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
        struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+       struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
        struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
        int rc = 0;
 
-       if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr))
+       if (BNXT_HAS_NQ(bp)) {
+               if (bnxt_alloc_nq_ring(bp, queue_index, nqr, true))
+                       goto err_out;
+       }
+
+       if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))
                goto err_out;
 
        bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
@@ -444,12 +565,16 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                struct bnxt_rx_queue *rxq = bp->rx_queues[i];
                struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+               struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
                struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
                struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 
-               bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+               if (BNXT_HAS_NQ(bp)) {
+                       if (bnxt_alloc_nq_ring(bp, i, nqr, true))
+                               goto err_out;
+               }
 
-               if (bnxt_alloc_cmpl_ring(bp, i, cpr))
+               if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
                        goto err_out;
 
                bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
@@ -492,11 +617,17 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                struct bnxt_tx_queue *txq = bp->tx_queues[i];
                struct bnxt_cp_ring_info *cpr = txq->cp_ring;
                struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+               struct bnxt_cp_ring_info *nqr = txq->nq_ring;
                struct bnxt_tx_ring_info *txr = txq->tx_ring;
                struct bnxt_ring *ring = txr->tx_ring_struct;
                unsigned int idx = i + bp->rx_cp_nr_rings;
 
-               if (bnxt_alloc_cmpl_ring(bp, idx, cpr))
+               if (BNXT_HAS_NQ(bp)) {
+                       if (bnxt_alloc_nq_ring(bp, idx, nqr, false))
+                               goto err_out;
+               }
+
+               if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
                        goto err_out;
 
                /* Tx ring */
@@ -508,7 +639,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                if (rc)
                        goto err_out;
 
-               bnxt_set_db(bp, &txr->tx_db, ring_type, idx);
+               bnxt_set_db(bp, &txr->tx_db, ring_type, idx, ring->fw_ring_id);
                txq->index = idx;
                bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
        }
index 8cb0e8e..af2c576 100644 (file)
@@ -49,6 +49,7 @@ struct bnxt_ring {
        void                    **vmem;
 
        uint16_t                fw_ring_id; /* Ring id filled by Chimp FW */
+       uint16_t                fw_rx_ring_id;
        const void              *mem_zone;
 };
 
@@ -70,19 +71,40 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                            struct bnxt_tx_queue *txq,
                            struct bnxt_rx_queue *rxq,
                            struct bnxt_cp_ring_info *cp_ring_info,
+                           struct bnxt_cp_ring_info *nq_ring_info,
                            const char *suffix);
 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
 int bnxt_alloc_hwrm_rings(struct bnxt *bp);
 
 static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
 {
-       rte_write32(db->db_key32 | idx, db->doorbell);
+       if (db->db_64)
+               rte_write64_relaxed(db->db_key64 | idx, db->doorbell);
+       else
+               rte_write32(db->db_key32 | idx, db->doorbell);
+}
+
+static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)
+{
+       struct bnxt_db_info *db = &cpr->cp_db;
+
+       rte_smp_wmb();
+       if (likely(db->db_64))
+               rte_write64(db->db_key64 | DBR_TYPE_NQ |
+                           RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),
+                           db->doorbell);
 }
 
 static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)
 {
+       struct bnxt_db_info *db = &cpr->cp_db;
+       uint32_t idx = RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons);
+
        rte_smp_wmb();
-       B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+       if (db->db_64)
+               rte_write64(db->db_key64 | idx, db->doorbell);
+       else
+               B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
 }
 
 #endif
index 31ab38f..6919acb 100644 (file)
@@ -341,7 +341,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        eth_dev->data->rx_queues[queue_idx] = rxq;
        /* Allocate RX ring hardware descriptors */
        if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
-                       "rxr")) {
+                       rxq->nq_ring, "rxr")) {
                PMD_DRV_LOG(ERR,
                        "ring_dma_zone_reserve for rx_ring failed!\n");
                bnxt_rx_queue_release_op(rxq);
@@ -424,15 +424,18 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
                vnic = rxq->vnic;
 
-               if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
-                       return 0;
+               if (BNXT_HAS_RING_GRPS(bp)) {
+                       if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+                               return 0;
+
+                       vnic->fw_grp_ids[rx_queue_id] =
+                                       bp->grp_info[rx_queue_id].fw_grp_id;
+               }
 
                PMD_DRV_LOG(DEBUG,
                            "vnic = %p fw_grp_id = %d\n",
                            vnic, bp->grp_info[rx_queue_id].fw_grp_id);
 
-               vnic->fw_grp_ids[rx_queue_id] =
-                                       bp->grp_info[rx_queue_id].fw_grp_id;
                rc = bnxt_vnic_rss_configure(bp, vnic);
        }
 
@@ -469,7 +472,8 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
                vnic = rxq->vnic;
-               vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+               if (BNXT_HAS_RING_GRPS(bp))
+                       vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
                rc = bnxt_vnic_rss_configure(bp, vnic);
        }
 
index 7c6b4de..b5e42d0 100644 (file)
@@ -39,6 +39,7 @@ struct bnxt_rx_queue {
        uint32_t                        rx_buf_use_size;  /* useable size */
        struct bnxt_rx_ring_info        *rx_ring;
        struct bnxt_cp_ring_info        *cp_ring;
+       struct bnxt_cp_ring_info        *nq_ring;
        rte_atomic64_t          rx_mbuf_alloc_fail;
        const struct rte_memzone *mz;
 };
index 75d2c76..44303f3 100644 (file)
@@ -637,6 +637,7 @@ void bnxt_free_rx_rings(struct bnxt *bp)
 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 {
        struct bnxt_cp_ring_info *cpr;
+       struct bnxt_cp_ring_info *nqr;
        struct bnxt_rx_ring_info *rxr;
        struct bnxt_ring *ring;
 
@@ -685,6 +686,32 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
        ring->vmem_size = 0;
        ring->vmem = NULL;
 
+       if (BNXT_HAS_NQ(rxq->bp)) {
+               nqr = rte_zmalloc_socket("bnxt_rx_ring_cq",
+                                        sizeof(struct bnxt_cp_ring_info),
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+               if (nqr == NULL)
+                       return -ENOMEM;
+
+               rxq->nq_ring = nqr;
+
+               ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+                                         sizeof(struct bnxt_ring),
+                                         RTE_CACHE_LINE_SIZE, socket_id);
+               if (ring == NULL)
+                       return -ENOMEM;
+
+               nqr->cp_ring_struct = ring;
+               ring->ring_size =
+                       rte_align32pow2(rxr->rx_ring_struct->ring_size *
+                                       (2 + AGG_RING_SIZE_FACTOR));
+               ring->ring_mask = ring->ring_size - 1;
+               ring->bd = (void *)nqr->cp_desc_ring;
+               ring->bd_dma = nqr->cp_desc_mapping;
+               ring->vmem_size = 0;
+               ring->vmem = NULL;
+       }
+
        /* Allocate Aggregator rings */
        ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
                                   sizeof(struct bnxt_ring),
index 665bfbc..43b3496 100644 (file)
@@ -134,7 +134,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        /* Allocate TX ring hardware descriptors */
        if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
-                       "txr")) {
+                       txq->nq_ring, "txr")) {
                PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
                bnxt_tx_queue_release_op(txq);
                rc = -ENOMEM;
index a0d4678..9190e3f 100644 (file)
@@ -32,6 +32,7 @@ struct bnxt_tx_queue {
 
        unsigned int            cp_nr_rings;
        struct bnxt_cp_ring_info        *cp_ring;
+       struct bnxt_cp_ring_info        *nq_ring;
        const struct rte_memzone *mz;
        struct rte_mbuf **free;
 };
index 4bb177f..124186e 100644 (file)
@@ -57,6 +57,7 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
 {
        struct bnxt_cp_ring_info *cpr;
+       struct bnxt_cp_ring_info *nqr;
        struct bnxt_tx_ring_info *txr;
        struct bnxt_ring *ring;
 
@@ -100,6 +101,30 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
        ring->vmem_size = 0;
        ring->vmem = NULL;
 
+       if (BNXT_HAS_NQ(txq->bp)) {
+               nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
+                                        sizeof(struct bnxt_cp_ring_info),
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+               if (nqr == NULL)
+                       return -ENOMEM;
+
+               txq->nq_ring = nqr;
+
+               ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+                                         sizeof(struct bnxt_ring),
+                                         RTE_CACHE_LINE_SIZE, socket_id);
+               if (ring == NULL)
+                       return -ENOMEM;
+
+               nqr->cp_ring_struct = ring;
+               ring->ring_size = txr->tx_ring_struct->ring_size;
+               ring->ring_mask = ring->ring_size - 1;
+               ring->bd = (void *)nqr->cp_desc_ring;
+               ring->bd_dma = nqr->cp_desc_mapping;
+               ring->vmem_size = 0;
+               ring->vmem = NULL;
+       }
+
        return 0;
 }