net/bnxt: fix multi adapter
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
index 6502913..85c9cbb 100644 (file)
@@ -35,7 +35,7 @@ ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
        rte_mem_lock_page(parms->mem_va);
 
        parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
-       if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
+       if (parms->mem_pa == (void *)RTE_BAD_IOVA) {
                BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
                return -ENOMEM;
        }
@@ -93,6 +93,13 @@ ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
        /* Add the FC info tbl to the ulp context. */
        bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
 
+       ulp_fc_info->num_counters = dparms->flow_count_db_entries;
+       if (!ulp_fc_info->num_counters) {
+               /* No need for software counters, call fw directly */
+               BNXT_TF_DBG(DEBUG, "Sw flow counter support not enabled\n");
+               return 0;
+       }
+
        sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
                                dparms->flow_count_db_entries;
 
@@ -132,6 +139,7 @@ int32_t
 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
 {
        struct bnxt_ulp_fc_info *ulp_fc_info;
+       struct hw_fc_mem_info *shd_info;
        int i;
 
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
@@ -143,11 +151,15 @@ ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
 
        pthread_mutex_destroy(&ulp_fc_info->fc_lock);
 
-       for (i = 0; i < TF_DIR_MAX; i++)
-               rte_free(ulp_fc_info->sw_acc_tbl[i]);
+       if (ulp_fc_info->num_counters) {
+               for (i = 0; i < TF_DIR_MAX; i++)
+                       rte_free(ulp_fc_info->sw_acc_tbl[i]);
 
-       for (i = 0; i < TF_DIR_MAX; i++)
-               ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
+               for (i = 0; i < TF_DIR_MAX; i++) {
+                       shd_info = &ulp_fc_info->shadow_hw_tbl[i];
+                       ulp_fc_mgr_shadow_mem_free(shd_info);
+               }
+       }
 
        rte_free(ulp_fc_info);
 
@@ -169,7 +181,10 @@ bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
 
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
 
-       return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
+       if (ulp_fc_info)
+               return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
+
+       return false;
 }
 
 /*
@@ -186,10 +201,9 @@ ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
 
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
 
-       if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
+       if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
                rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
-                                 ulp_fc_mgr_alarm_cb,
-                                 (void *)ctxt);
+                                 ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data);
                ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
        }
 
@@ -211,7 +225,7 @@ void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
                return;
 
        ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
-       rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
+       rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data);
 }
 
 /*
@@ -283,6 +297,74 @@ ulp_bulk_get_flow_stats(struct tf *tfp,
        return rc;
 }
 
+static int32_t
+ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
+                       struct ulp_flow_db_res_params *res,
+                       struct rte_flow_query_count *qcount)
+{
+       struct tf *tfp;
+       struct bnxt_ulp_device_params *dparms;
+       struct tf_get_tbl_entry_parms parms = { 0 };
+       struct tf_set_tbl_entry_parms   sparms = { 0 };
+       enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;
+       uint64_t stats = 0;
+       uint32_t dev_id = 0;
+       int32_t rc = 0;
+
+       tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
+       if (!tfp) {
+               BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
+               return -EINVAL;
+       }
+
+       if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
+               BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
+               bnxt_ulp_cntxt_entry_release();
+               return -EINVAL;
+       }
+
+       dparms = bnxt_ulp_device_params_get(dev_id);
+       if (!dparms) {
+               BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
+               bnxt_ulp_cntxt_entry_release();
+               return -EINVAL;
+       }
+       parms.dir = res->direction;
+       parms.type = stype;
+       parms.idx = res->resource_hndl;
+       parms.data_sz_in_bytes = sizeof(uint64_t);
+       parms.data = (uint8_t *)&stats;
+       rc = tf_get_tbl_entry(tfp, &parms);
+       if (rc) {
+               PMD_DRV_LOG(ERR,
+                           "Get failed for id:0x%x rc:%d\n",
+                           parms.idx, rc);
+               return rc;
+       }
+       qcount->hits = FLOW_CNTR_PKTS(stats, dparms);
+       if (qcount->hits)
+               qcount->hits_set = 1;
+       qcount->bytes = FLOW_CNTR_BYTES(stats, dparms);
+       if (qcount->bytes)
+               qcount->bytes_set = 1;
+
+       if (qcount->reset) {
+               stats = 0;
+               sparms.dir = res->direction;
+               sparms.type = stype;
+               sparms.idx = res->resource_hndl;
+               sparms.data = (uint8_t *)&stats;
+               sparms.data_sz_in_bytes = sizeof(uint64_t);
+               rc = tf_set_tbl_entry(tfp, &sparms);
+               if (rc) {
+                       PMD_DRV_LOG(ERR, "Set failed for id:0x%x rc:%d\n",
+                                   sparms.idx, rc);
+                       return rc;
+               }
+       }
+       return rc;
+}
+
 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
                                    struct tf *tfp,
                                    struct bnxt_ulp_fc_info *fc_info,
@@ -314,28 +396,26 @@ static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
                return rc;
        }
 
-       /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
+       /* PKT/BYTE COUNT SHIFT/MASK are device specific */
        sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
        sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
+
        /* Some dpdk applications may accumulate the flow counters while some
         * may not. In cases where the application is accumulating the counters
         * the PMD need not do the accumulation itself and viceversa to report
         * the correct flow counters.
         */
-       if (ctxt->cfg_data->accum_stats) {
-               sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
-               sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
-       } else {
-               sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
-               sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
-       }
+       sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
+       sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
 
        /* Update the parent counters if it is child flow */
-       if (sw_acc_tbl_entry->parent_flow_id) {
+       if (sw_acc_tbl_entry->pc_flow_idx & FLOW_CNTR_PC_FLOW_VALID) {
+               uint32_t pc_idx;
+
                /* Update the parent counters */
                t_sw = sw_acc_tbl_entry;
-               if (ulp_flow_db_parent_flow_count_update(ctxt,
-                                                        t_sw->parent_flow_id,
+               pc_idx = t_sw->pc_flow_idx & ~FLOW_CNTR_PC_FLOW_VALID;
+               if (ulp_flow_db_parent_flow_count_update(ctxt, pc_idx,
                                                         t_sw->pkt_count,
                                                         t_sw->byte_count)) {
                        PMD_DRV_LOG(ERR, "Error updating parent counters\n");
@@ -359,30 +439,43 @@ ulp_fc_mgr_alarm_cb(void *arg)
        int rc = 0;
        unsigned int j;
        enum tf_dir i;
-       struct bnxt_ulp_context *ctxt = arg;
+       struct bnxt_ulp_context *ctxt;
        struct bnxt_ulp_fc_info *ulp_fc_info;
        struct bnxt_ulp_device_params *dparms;
        struct tf *tfp;
        uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
 
+       ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
+       if (ctxt == NULL) {
+               BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
+               rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
+                                 ulp_fc_mgr_alarm_cb, arg);
+               return;
+       }
+
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
-       if (!ulp_fc_info)
+       if (!ulp_fc_info) {
+               bnxt_ulp_cntxt_entry_release();
                return;
+       }
 
        if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
                BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
+               bnxt_ulp_cntxt_entry_release();
                return;
        }
 
        dparms = bnxt_ulp_device_params_get(dev_id);
        if (!dparms) {
                BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
+               bnxt_ulp_cntxt_entry_release();
                return;
        }
 
-       tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
+       tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
        if (!tfp) {
                BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
+               bnxt_ulp_cntxt_entry_release();
                return;
        }
 
@@ -396,6 +489,7 @@ ulp_fc_mgr_alarm_cb(void *arg)
        if (!ulp_fc_info->num_entries) {
                pthread_mutex_unlock(&ulp_fc_info->fc_lock);
                ulp_fc_mgr_thread_cancel(ctxt);
+               bnxt_ulp_cntxt_entry_release();
                return;
        }
        /*
@@ -434,12 +528,13 @@ ulp_fc_mgr_alarm_cb(void *arg)
 
        if (rc) {
                ulp_fc_mgr_thread_cancel(ctxt);
+               bnxt_ulp_cntxt_entry_release();
                return;
        }
 out:
+       bnxt_ulp_cntxt_entry_release();
        rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
-                         ulp_fc_mgr_alarm_cb,
-                         (void *)ctxt);
+                         ulp_fc_mgr_alarm_cb, arg);
 }
 
 /*
@@ -459,7 +554,10 @@ bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
 
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
 
-       return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
+       if (ulp_fc_info)
+               return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
+
+       return false;
 }
 
 /*
@@ -514,6 +612,9 @@ int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
        if (!ulp_fc_info)
                return -EIO;
 
+       if (!ulp_fc_info->num_counters)
+               return 0;
+
        pthread_mutex_lock(&ulp_fc_info->fc_lock);
        sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
        ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
@@ -546,12 +647,16 @@ int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
        if (!ulp_fc_info)
                return -EIO;
 
+       if (!ulp_fc_info->num_counters)
+               return 0;
+
        pthread_mutex_lock(&ulp_fc_info->fc_lock);
        sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
        ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
        ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
        ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
        ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
+       ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
        ulp_fc_info->num_entries--;
        pthread_mutex_unlock(&ulp_fc_info->fc_lock);
 
@@ -582,6 +687,8 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
        uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
        struct sw_acc_counter *sw_acc_tbl_entry;
        bool found_cntr_resource = false;
+       bool found_parent_flow = false;
+       uint32_t pc_idx = 0;
 
        ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
        if (!ulp_fc_info)
@@ -601,12 +708,16 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
                     (params.resource_sub_type ==
                      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
                      params.resource_sub_type ==
-                     BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
-                     params.resource_sub_type ==
-                     BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
+                     BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) {
                        found_cntr_resource = true;
                        break;
                }
+               if (params.resource_func ==
+                   BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
+                       found_parent_flow = true;
+                       pc_idx = params.resource_hndl;
+               }
+
        } while (!rc && nxt_resource_index);
 
        bnxt_ulp_cntxt_release_fdb_lock(ctxt);
@@ -616,8 +727,12 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
 
        dir = params.direction;
        hw_cntr_id = params.resource_hndl;
-       if (params.resource_sub_type ==
+       if (!found_parent_flow &&
+           params.resource_sub_type ==
                        BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
+               if (!ulp_fc_info->num_counters)
+                       return ulp_fc_tf_flow_stat_get(ctxt, &params, count);
+
                /* TODO:
                 * Think about optimizing with try_lock later
                 */
@@ -636,14 +751,17 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
                        sw_acc_tbl_entry->byte_count = 0;
                }
                pthread_mutex_unlock(&ulp_fc_info->fc_lock);
-       } else if (params.resource_sub_type ==
-                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
+       } else if (found_parent_flow &&
+                  params.resource_sub_type ==
+                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
                /* Get stats from the parent child table */
-               ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
+               ulp_flow_db_parent_flow_count_get(ctxt, pc_idx,
                                                  &count->hits, &count->bytes,
                                                  count->reset);
-               count->hits_set = 1;
-               count->bytes_set = 1;
+               if (count->hits)
+                       count->hits_set = 1;
+               if (count->bytes)
+                       count->bytes_set = 1;
        } else {
                /* TBD: Handle External counters */
                rc = -EINVAL;
@@ -661,13 +779,13 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
  *
  * hw_cntr_id [in] The HW flow counter ID
  *
- * fid [in] parent flow id
+ * pc_idx [in] parent child db index
  *
  */
 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
                                        enum tf_dir dir,
                                        uint32_t hw_cntr_id,
-                                       uint32_t fid)
+                                       uint32_t pc_idx)
 {
        struct bnxt_ulp_fc_info *ulp_fc_info;
        uint32_t sw_cntr_idx;
@@ -680,10 +798,11 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
        pthread_mutex_lock(&ulp_fc_info->fc_lock);
        sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
        if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
-               ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
+               pc_idx |= FLOW_CNTR_PC_FLOW_VALID;
+               ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx;
        } else {
                BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
-                           hw_cntr_id, fid);
+                           hw_cntr_id, pc_idx);
                rc = -ENOENT;
        }
        pthread_mutex_unlock(&ulp_fc_info->fc_lock);