net/bnxt: support redirecting tunnel packets to VF
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index 29f2701..09fee3d 100644 (file)
@@ -590,6 +590,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
        bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
        bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+       bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
        /* TODO: For now, do not support VMDq/RFS on VFs. */
        if (BNXT_PF(bp)) {
                if (bp->pf.max_vfs)
@@ -1638,9 +1639,11 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
        return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+                            struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
        int rc = 0;
+       uint16_t ctx_id;
        struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
        struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
                                                bp->hwrm_cmd_resp_addr;
@@ -1648,38 +1651,40 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-
        HWRM_CHECK_RESULT();
 
-       vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+       ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+       if (!BNXT_HAS_RING_GRPS(bp))
+               vnic->fw_grp_ids[ctx_idx] = ctx_id;
+       else if (ctx_idx == 0)
+               vnic->rss_rule = ctx_id;
+
        HWRM_UNLOCK();
-       PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
 
        return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+                           struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
        int rc = 0;
        struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
        struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
                                                bp->hwrm_cmd_resp_addr;
 
-       if (vnic->rss_rule == (uint16_t)HWRM_NA_SIGNATURE) {
+       if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
                PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
                return rc;
        }
        HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
 
-       req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
+       req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
 
-       vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
-
        return rc;
 }
 
@@ -1711,6 +1716,47 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        return rc;
 }
 
+static int
+bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       int i;
+       int rc = 0;
+       int nr_ctxs = bp->max_ring_grps;
+       struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+       struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (!(vnic->rss_table && vnic->hash_type))
+               return 0;
+
+       HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+       req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+       req.hash_mode_flags = vnic->hash_mode;
+
+       req.hash_key_tbl_addr =
+           rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+       for (i = 0; i < nr_ctxs; i++) {
+               req.ring_grp_tbl_addr =
+                       rte_cpu_to_le_64(vnic->rss_table_dma_addr +
+                                        i * HW_HASH_INDEX_SIZE);
+               req.ring_table_pair_index = i;
+               req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+               rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+                                           BNXT_USE_CHIMP_MB);
+
+               HWRM_CHECK_RESULT();
+               if (rc)
+                       break;
+       }
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
                           struct bnxt_vnic_info *vnic)
 {
@@ -1718,6 +1764,9 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
        struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
+       if (BNXT_CHIP_THOR(bp))
+               return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+
        HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
 
        req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@@ -2247,7 +2296,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
-       int i;
+       int i, j;
 
        if (bp->vnic_info == NULL)
                return;
@@ -2263,7 +2312,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 
                bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
-               bnxt_hwrm_vnic_ctx_free(bp, vnic);
+               if (!BNXT_CHIP_THOR(bp)) {
+                       for (j = 0; j < vnic->num_lb_ctxts; j++) {
+                               bnxt_hwrm_vnic_ctx_free(bp, vnic,
+                                                       vnic->fw_grp_ids[j]);
+                               vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+                       }
+               } else {
+                       bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+                       vnic->rss_rule = INVALID_HW_RING_ID;
+               }
 
                bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
 
@@ -4037,32 +4095,105 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
        return 0;
 }
 
-int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static int
+bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
-       unsigned int rss_idx, fw_idx, i;
+       struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+       struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+       int nr_ctxs = bp->max_ring_grps;
+       struct bnxt_rx_queue **rxqs = bp->rx_queues;
+       uint16_t *ring_tbl = vnic->rss_table;
+       int max_rings = bp->rx_nr_rings;
+       int i, j, k, cnt;
+       int rc = 0;
 
-       if (vnic->rss_table && vnic->hash_type) {
-               /*
-                * Fill the RSS hash & redirection table with
-                * ring group ids for all VNICs
-                */
-               for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
-                       rss_idx++, fw_idx++) {
-                       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-                               fw_idx %= bp->rx_cp_nr_rings;
-                               if (vnic->fw_grp_ids[fw_idx] !=
-                                   INVALID_HW_RING_ID)
+       HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+       req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+       req.hash_mode_flags = vnic->hash_mode;
+
+       req.ring_grp_tbl_addr =
+           rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+       req.hash_key_tbl_addr =
+           rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+       for (i = 0, k = 0; i < nr_ctxs; i++) {
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_cp_ring_info *cpr;
+
+               req.ring_table_pair_index = i;
+               req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+               for (j = 0; j < 64; j++) {
+                       uint16_t ring_id;
+
+                       /* Find next active ring. */
+                       for (cnt = 0; cnt < max_rings; cnt++) {
+                               if (rx_queue_state[k] !=
+                                               RTE_ETH_QUEUE_STATE_STOPPED)
                                        break;
-                               fw_idx++;
+                               if (++k == max_rings)
+                                       k = 0;
                        }
-                       if (i == bp->rx_cp_nr_rings)
+
+                       /* Return if no rings are active. */
+                       if (cnt == max_rings)
                                return 0;
-                       vnic->rss_table[rss_idx] =
-                               vnic->fw_grp_ids[fw_idx];
+
+                       /* Add rx/cp ring pair to RSS table. */
+                       rxr = rxqs[k]->rx_ring;
+                       cpr = rxqs[k]->cp_ring;
+
+                       ring_id = rxr->rx_ring_struct->fw_ring_id;
+                       *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+                       ring_id = cpr->cp_ring_struct->fw_ring_id;
+                       *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+
+                       if (++k == max_rings)
+                               k = 0;
                }
-               return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+               rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+                                           BNXT_USE_CHIMP_MB);
+
+               HWRM_CHECK_RESULT();
+               if (rc)
+                       break;
        }
-       return 0;
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       unsigned int rss_idx, fw_idx, i;
+
+       if (!(vnic->rss_table && vnic->hash_type))
+               return 0;
+
+       if (BNXT_CHIP_THOR(bp))
+               return bnxt_vnic_rss_configure_thor(bp, vnic);
+
+       /*
+        * Fill the RSS hash & redirection table with
+        * ring group ids for all VNICs
+        */
+       for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+               rss_idx++, fw_idx++) {
+               for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+                       fw_idx %= bp->rx_cp_nr_rings;
+                       if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
+                               break;
+                       fw_idx++;
+               }
+               if (i == bp->rx_cp_nr_rings)
+                       return 0;
+               vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+       }
+       return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
 }
 
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -4095,6 +4226,37 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
        req->flags = rte_cpu_to_le_16(flags);
 }
 
+static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+               struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
+{
+       struct hwrm_ring_aggint_qcaps_input req = {0};
+       struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       uint32_t enables;
+       uint16_t flags;
+       int rc;
+
+       HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+       if (rc)
+               goto out;
+
+       agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
+       agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
+
+       flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
+               HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+       agg_req->flags = rte_cpu_to_le_16(flags);
+       enables =
+        HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
+        HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
+       agg_req->enables = rte_cpu_to_le_32(enables);
+
+out:
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+       return rc;
+}
+
 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
                        struct bnxt_coal *coal, uint16_t ring_id)
 {
@@ -4103,12 +4265,17 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
                                                bp->hwrm_cmd_resp_addr;
        int rc;
 
-       /* Set ring coalesce parameters only for Stratus 100G NIC */
-       if (!bnxt_stratus_device(bp))
+       /* Set ring coalesce parameters only for 100G NICs */
+       if (BNXT_CHIP_THOR(bp)) {
+               if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+                       return -1;
+       } else if (bnxt_stratus_device(bp)) {
+               bnxt_hwrm_set_coal_params(coal, &req);
+       } else {
                return 0;
+       }
 
        HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
-       bnxt_hwrm_set_coal_params(coal, &req);
        req.ring_id = rte_cpu_to_le_16(ring_id);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
        HWRM_CHECK_RESULT();
@@ -4345,3 +4512,85 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
 
        return rc;
 }
+
+int
+bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
+{
+       struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
+       struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_KONG(bp));
+       req.tunnel_type = type;
+       req.dest_fid = bp->fw_fid;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       HWRM_CHECK_RESULT();
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int
+bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
+{
+       struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
+       struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_KONG(bp));
+       req.tunnel_type = type;
+       req.dest_fid = bp->fw_fid;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       HWRM_CHECK_RESULT();
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
+{
+       struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
+       struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_KONG(bp));
+       req.src_fid = bp->fw_fid;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       HWRM_CHECK_RESULT();
+
+       if (type)
+               *type = resp->tunnel_mask;
+
+       HWRM_UNLOCK();
+
+       return rc;
+}
+
+int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
+                                  uint16_t *dst_fid)
+{
+       struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
+       struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_KONG(bp));
+       req.src_fid = bp->fw_fid;
+       req.tunnel_type = tun_type;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+       HWRM_CHECK_RESULT();
+
+       if (dst_fid)
+               *dst_fid = resp->dest_fid;
+
+       PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
+
+       HWRM_UNLOCK();
+
+       return rc;
+}