net/bnxt: support CoS classification
authorVenkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Fri, 4 Oct 2019 03:48:58 +0000 (20:48 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 8 Oct 2019 10:14:32 +0000 (12:14 +0200)
Class of Service (CoS) is a way to manage multiple types of
traffic over a network to offer different types of services
to applications. CoS classification (priority to cosqueue) is
determined by the user and configured through the PF driver.
DPDK driver queries this configuration and maps the cos queue
ids to different VNICs. This patch adds this support.

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Santoshkumar Karanappa Rastapur <santosh.rastapur@broadcom.com>
Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_vnic.h
drivers/net/bnxt/hsi_struct_def_dpdk.h

index ad97e0e..5cfe5ee 100644 (file)
@@ -470,8 +470,10 @@ struct bnxt {
 
        uint32_t                flow_flags;
 #define BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN    BIT(0)
-
        pthread_mutex_t         flow_lock;
+
+       uint32_t                vnic_cap_flags;
+#define BNXT_VNIC_CAP_COS_CLASSIFY     BIT(0)
        unsigned int            rx_nr_rings;
        unsigned int            rx_cp_nr_rings;
        unsigned int            rx_num_qs_per_vnic;
@@ -523,8 +525,10 @@ struct bnxt {
        uint16_t                        hwrm_max_ext_req_len;
 
        struct bnxt_link_info   link_info;
-       struct bnxt_cos_queue_info      cos_queue[BNXT_COS_QUEUE_COUNT];
-       uint8_t                 tx_cosq_id;
+       struct bnxt_cos_queue_info      rx_cos_queue[BNXT_COS_QUEUE_COUNT];
+       struct bnxt_cos_queue_info      tx_cos_queue[BNXT_COS_QUEUE_COUNT];
+       uint8_t                 tx_cosq_id[BNXT_COS_QUEUE_COUNT];
+       uint8_t                 rx_cosq_cnt;
        uint8_t                 max_tc;
        uint8_t                 max_lltc;
        uint8_t                 max_q;
index 4fc182b..9adcd94 100644 (file)
@@ -308,6 +308,25 @@ static int bnxt_init_chip(struct bnxt *bp)
                goto err_out;
        }
 
+       if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
+               goto skip_cosq_cfg;
+
+       for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+               if (bp->rx_cos_queue[i].id != 0xff) {
+                       struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
+
+                       if (!vnic) {
+                               PMD_DRV_LOG(ERR,
+                                           "Num pools more than FW profile\n");
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
+                       vnic->cos_queue_id = bp->rx_cos_queue[i].id;
+                       bp->rx_cosq_cnt++;
+               }
+       }
+
+skip_cosq_cfg:
        rc = bnxt_mq_rx_configure(bp);
        if (rc) {
                PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
@@ -4540,7 +4559,7 @@ static int bnxt_init_fw(struct bnxt *bp)
        if (rc)
                return -EIO;
 
-       rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
+       rc = bnxt_hwrm_vnic_qcaps(bp);
        if (rc)
                return rc;
 
@@ -4548,16 +4567,19 @@ static int bnxt_init_fw(struct bnxt *bp)
        if (rc)
                return rc;
 
-       /* Get the MAX capabilities for this function */
+       /* Get the MAX capabilities for this function.
+        * This function also allocates context memory for TQM rings and
+        * informs the firmware about this allocated backing store memory.
+        */
        rc = bnxt_hwrm_func_qcaps(bp);
        if (rc)
                return rc;
 
-       rc = bnxt_hwrm_vnic_qcaps(bp);
+       rc = bnxt_hwrm_func_qcfg(bp, &mtu);
        if (rc)
                return rc;
 
-       rc = bnxt_hwrm_func_qcfg(bp, &mtu);
+       rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
        if (rc)
                return rc;
 
index 9f30214..76ef004 100644 (file)
@@ -700,6 +700,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        return rc;
 }
 
+/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 {
        int rc = 0;
@@ -714,6 +715,12 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 
        HWRM_CHECK_RESULT();
 
+       if (rte_le_to_cpu_32(resp->flags) &
+           HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
+               bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
+               PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
+       }
+
        bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
 
        HWRM_UNLOCK();
@@ -1199,11 +1206,13 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
        int rc = 0;
        struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
        struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
        int i;
 
+get_rx_info:
        HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
 
-       req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+       req.flags = rte_cpu_to_le_32(dir);
        /* HWRM Version >= 1.9.1 */
        if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
                req.drv_qmap_cap =
@@ -1212,30 +1221,51 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 
        HWRM_CHECK_RESULT();
 
-#define GET_QUEUE_INFO(x) \
-       bp->cos_queue[x].id = resp->queue_id##x; \
-       bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
-
-       GET_QUEUE_INFO(0);
-       GET_QUEUE_INFO(1);
-       GET_QUEUE_INFO(2);
-       GET_QUEUE_INFO(3);
-       GET_QUEUE_INFO(4);
-       GET_QUEUE_INFO(5);
-       GET_QUEUE_INFO(6);
-       GET_QUEUE_INFO(7);
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+               GET_TX_QUEUE_INFO(0);
+               GET_TX_QUEUE_INFO(1);
+               GET_TX_QUEUE_INFO(2);
+               GET_TX_QUEUE_INFO(3);
+               GET_TX_QUEUE_INFO(4);
+               GET_TX_QUEUE_INFO(5);
+               GET_TX_QUEUE_INFO(6);
+               GET_TX_QUEUE_INFO(7);
+       } else  {
+               GET_RX_QUEUE_INFO(0);
+               GET_RX_QUEUE_INFO(1);
+               GET_RX_QUEUE_INFO(2);
+               GET_RX_QUEUE_INFO(3);
+               GET_RX_QUEUE_INFO(4);
+               GET_RX_QUEUE_INFO(5);
+               GET_RX_QUEUE_INFO(6);
+               GET_RX_QUEUE_INFO(7);
+       }
 
        HWRM_UNLOCK();
 
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
+               goto done;
+
        if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
-               bp->tx_cosq_id = bp->cos_queue[0].id;
+               bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
        } else {
+               int j;
+
                /* iterate and find the COSq profile to use for Tx */
-               for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
-                       if (bp->cos_queue[i].profile ==
-                               HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
-                               bp->tx_cosq_id = bp->cos_queue[i].id;
-                               break;
+               if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+                       for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+                               if (bp->tx_cos_queue[i].id != 0xff)
+                                       bp->tx_cosq_id[j++] =
+                                               bp->tx_cos_queue[i].id;
+                       }
+               } else {
+                       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+                               if (bp->tx_cos_queue[i].profile ==
+                                       HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+                                       bp->tx_cosq_id[0] =
+                                               bp->tx_cos_queue[i].id;
+                                       break;
+                               }
                        }
                }
        }
@@ -1246,15 +1276,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
                bp->max_tc = BNXT_MAX_QUEUE;
        bp->max_q = bp->max_tc;
 
-       PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+               dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
+               goto get_rx_info;
+       }
 
+done:
        return rc;
 }
 
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+                        uint16_t tx_cosq_id)
 {
        int rc = 0;
        uint32_t enables = 0;
@@ -1276,7 +1311,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                req.ring_type = ring_type;
                req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
-               req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
+               req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
                if (stats_ctx_id != INVALID_STATS_CTX_ID)
                        enables |=
                        HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
@@ -1682,6 +1717,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
        }
+       if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+               ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
+               req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
+       }
+
        enables |= ctx_enable_flag;
        req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
        req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
index 8912a4e..fcbce60 100644 (file)
@@ -52,6 +52,16 @@ HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED
        HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |       \
        HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
 
+#define GET_TX_QUEUE_INFO(x) \
+       bp->tx_cos_queue[x].id = resp->queue_id##x; \
+       bp->tx_cos_queue[x].profile =   \
+               resp->queue_id##x##_service_profile
+
+#define GET_RX_QUEUE_INFO(x) \
+       bp->rx_cos_queue[x].id = resp->queue_id##x; \
+       bp->rx_cos_queue[x].profile =   \
+               resp->queue_id##x##_service_profile
+
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
                                   struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -90,7 +100,8 @@ int bnxt_hwrm_set_async_event_cr(struct bnxt *bp);
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+                        uint16_t tx_cosq_id);
 int bnxt_hwrm_ring_free(struct bnxt *bp,
                        struct bnxt_ring *ring, uint32_t ring_type);
 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
index 7b6d87c..c83d300 100644 (file)
@@ -424,7 +424,7 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
        }
 
        rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
-                                 HWRM_NA_SIGNATURE, nq_ring_id);
+                                 HWRM_NA_SIGNATURE, nq_ring_id, 0);
        if (rc)
                return rc;
 
@@ -450,7 +450,7 @@ static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
        ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
 
        rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
-                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
        if (rc)
                return rc;
 
@@ -475,7 +475,7 @@ static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
 
        rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
                                  queue_index, cpr->hw_stats_ctx_id,
-                                 cp_ring->fw_ring_id);
+                                 cp_ring->fw_ring_id, 0);
        if (rc)
                return rc;
 
@@ -510,7 +510,7 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
        }
 
        rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
-                                 hw_stats_ctx_id, cp_ring->fw_ring_id);
+                                 hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
 
        if (rc)
                return rc;
@@ -701,6 +701,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                struct bnxt_tx_ring_info *txr = txq->tx_ring;
                struct bnxt_ring *ring = txr->tx_ring_struct;
                unsigned int idx = i + bp->rx_cp_nr_rings;
+               uint16_t tx_cosq_id = 0;
 
                if (BNXT_HAS_NQ(bp)) {
                        if (bnxt_alloc_nq_ring(bp, idx, nqr))
@@ -710,12 +711,17 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
                        goto err_out;
 
+               if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
+                       tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0];
+               else
+                       tx_cosq_id = bp->tx_cosq_id[0];
                /* Tx ring */
                ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX;
                rc = bnxt_hwrm_ring_alloc(bp, ring,
                                          ring_type,
                                          i, cpr->hw_stats_ctx_id,
-                                         cp_ring->fw_ring_id);
+                                         cp_ring->fw_ring_id,
+                                         tx_cosq_id);
                if (rc)
                        goto err_out;
 
@@ -747,7 +753,7 @@ int bnxt_alloc_async_cp_ring(struct bnxt *bp)
                ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
 
        rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
-                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+                                 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
 
        if (rc)
                return rc;
index 03b115d..5d291cb 100644 (file)
@@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                switch (dev_conf->rxmode.mq_mode) {
                case ETH_MQ_RX_VMDQ_RSS:
                case ETH_MQ_RX_VMDQ_ONLY:
+               case ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* FALLTHROUGH */
                        /* ETH_8/64_POOLs */
                        pools = conf->nb_queue_pools;
@@ -91,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                                pools = max_pools;
                        break;
                case ETH_MQ_RX_RSS:
-                       pools = 1;
+                       pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
                        break;
                default:
                        PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
index de34b21..4f760e0 100644 (file)
@@ -45,6 +45,7 @@ struct bnxt_vnic_info {
        uint16_t        cos_rule;
        uint16_t        lb_rule;
        uint16_t        rx_queue_cnt;
+       uint16_t        cos_queue_id;
        bool            vlan_strip;
        bool            func_default;
        bool            bd_stall;
index 26d12cf..c45d088 100644 (file)
@@ -21157,7 +21157,7 @@ struct hwrm_vnic_free_output {
  *****************/
 
 
-/* hwrm_vnic_cfg_input (size:320b/40B) */
+/* hwrm_vnic_cfg_input (size:384b/48B) */
 struct hwrm_vnic_cfg_input {
        /* The HWRM command request type. */
        uint16_t        req_type;
@@ -21300,6 +21300,9 @@ struct hwrm_vnic_cfg_input {
         */
        #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
                UINT32_C(0x40)
+       /* This bit must be '1' for the queue_id field to be configured. */
+       #define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID \
+               UINT32_C(0x80)
        /* Logical vnic ID */
        uint16_t        vnic_id;
        /*
@@ -21345,6 +21348,19 @@ struct hwrm_vnic_cfg_input {
         * be chosen if packet does not match any RSS rules.
         */
        uint16_t        default_cmpl_ring_id;
+       /*
+        * When specified, only incoming packets classified to the specified CoS
+        * queue ID will be arriving on this VNIC.  Packet priority to CoS mapping
+        * rules can be specified using HWRM_QUEUE_PRI2COS_CFG.  In this mode,
+        * ntuple filters with VNIC destination specified are invalid since they
+        * conflict with the the CoS to VNIC steering rules in this mode.
+        *
+        * If this field is not specified, packet to VNIC steering will be
+        * subject to the standard L2 filter rules and any additional ntuple
+        * filter rules with destination VNIC specified.
+        */
+       uint16_t        queue_id;
+       uint8_t unused0[6];
 } __attribute__((packed));
 
 /* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -21640,6 +21656,16 @@ struct hwrm_vnic_qcaps_output {
         */
        #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
                UINT32_C(0x80)
+       /*
+        * When this bit is '1', it indicates that firmware supports the
+        * ability to steer incoming packets from one CoS queue to one
+        * VNIC.  This optional feature can then be enabled
+        * using HWRM_VNIC_CFG on any VNIC.  This feature is only
+        * available when NVM option “enable_cos_classfication” is set
+        * to 1.  If set to '0', firmware does not support this feature.
+        */
+       #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP \
+               UINT32_C(0x100)
        /*
         * This field advertises the maximum concurrent TPA aggregations
         * supported by the VNIC on new devices that support TPA v2.