net/ngbe: support MAC filters
[dpdk.git] / drivers / net / sfc / sfc_mae_counter.c
index b0cb815..418caff 100644 (file)
@@ -99,6 +99,8 @@ sfc_mae_counter_enable(struct sfc_adapter *sa,
                       &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
        p->generation_count = generation_count;
 
+       p->ft_group_hit_counter = counterp->ft_group_hit_counter;
+
        /*
         * The flag is set at the very end of add operation and reset
         * at the beginning of delete operation. Release ordering is
@@ -210,6 +212,14 @@ sfc_mae_counter_increment(struct sfc_adapter *sa,
        __atomic_store(&p->value.pkts_bytes,
                       &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
 
+       if (p->ft_group_hit_counter != NULL) {
+               uint64_t ft_group_hit_counter;
+
+               ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
+               __atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
+                                __ATOMIC_RELAXED);
+       }
+
        sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
                 ", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
                 pkts, cnt_val.pkts, bytes, cnt_val.bytes);
@@ -793,3 +803,58 @@ fail_counter_stream:
 
        return rc;
 }
+
+int
+sfc_mae_counter_get(struct sfc_mae_counters *counters,
+                   const struct sfc_mae_counter_id *counter,
+                   struct rte_flow_query_count *data)
+{
+       struct sfc_flow_tunnel *ft = counter->ft;
+       uint64_t non_reset_jump_hit_counter;
+       struct sfc_mae_counter *p;
+       union sfc_pkts_bytes value;
+
+       SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
+       p = &counters->mae_counters[counter->mae_id.id];
+
+       /*
+        * Ordering is relaxed since it is the only operation on counter value.
+        * And it does not depend on different stores/loads in other threads.
+        * Paired with relaxed ordering in counter increment.
+        */
+       value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
+                                                 __ATOMIC_RELAXED);
+
+       data->hits_set = 1;
+       data->hits = value.pkts - p->reset.pkts;
+
+       if (ft != NULL) {
+               data->hits += ft->group_hit_counter;
+               non_reset_jump_hit_counter = data->hits;
+               data->hits -= ft->reset_jump_hit_counter;
+       } else {
+               data->bytes_set = 1;
+               data->bytes = value.bytes - p->reset.bytes;
+       }
+
+       if (data->reset != 0) {
+               if (ft != NULL) {
+                       ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
+               } else {
+                       p->reset.pkts = value.pkts;
+                       p->reset.bytes = value.bytes;
+               }
+       }
+
+       return 0;
+}
+
+bool
+sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
+{
+       if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
+           sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
+               return B_FALSE;
+       else
+               return B_TRUE;
+}