1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2020-2021 Xilinx, Inc.
6 #include <rte_common.h>
7 #include <rte_service_component.h>
10 #include "efx_regs_counters_pkt_format.h"
15 #include "sfc_mae_counter.h"
16 #include "sfc_service.h"
19 sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
23 cid = sfc_get_service_lcore(sa->socket_id);
24 if (cid != RTE_MAX_LCORE)
27 if (sa->socket_id != SOCKET_ID_ANY)
28 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
30 if (cid == RTE_MAX_LCORE) {
31 sfc_warn(sa, "failed to get service lcore for counter service");
32 } else if (sa->socket_id != SOCKET_ID_ANY) {
34 "failed to get service lcore for counter service at socket %d, but got at socket %u",
35 sa->socket_id, rte_lcore_to_socket_id(cid));
41 sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
43 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
45 if (encp->enc_mae_supported == B_FALSE)
48 if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE)
55 sfc_mae_counter_enable(struct sfc_adapter *sa,
56 struct sfc_mae_counter_id *counterp)
58 struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
59 struct sfc_mae_counters *counters = ®->counters;
60 struct sfc_mae_counter *p;
61 efx_counter_t mae_counter;
62 uint32_t generation_count;
67 * The actual count of counters allocated is ignored since a failure
68 * to allocate a single counter is indicated by non-zero return code.
70 rc = efx_mae_counters_alloc(sa->nic, 1, &unused, &mae_counter,
73 sfc_err(sa, "failed to alloc MAE counter: %s",
75 goto fail_mae_counter_alloc;
78 if (mae_counter.id >= counters->n_mae_counters) {
80 * ID of a counter is expected to be within the range
81 * between 0 and the maximum count of counters to always
82 * fit into a pre-allocated array size of maximum counter ID.
84 sfc_err(sa, "MAE counter ID is out of expected range");
86 goto fail_counter_id_range;
89 counterp->mae_id = mae_counter;
91 p = &counters->mae_counters[mae_counter.id];
94 * Ordering is relaxed since it is the only operation on counter value.
95 * And it does not depend on different stores/loads in other threads.
96 * Paired with relaxed ordering in counter increment.
98 __atomic_store(&p->reset.pkts_bytes.int128,
99 &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
100 p->generation_count = generation_count;
102 p->ft_group_hit_counter = counterp->ft_group_hit_counter;
105 * The flag is set at the very end of add operation and reset
106 * at the beginning of delete operation. Release ordering is
107 * paired with acquire ordering on load in counter increment operation.
109 __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
111 sfc_info(sa, "enabled MAE counter #%u with reset pkts=%" PRIu64
112 " bytes=%" PRIu64, mae_counter.id,
113 p->reset.pkts, p->reset.bytes);
117 fail_counter_id_range:
118 (void)efx_mae_counters_free(sa->nic, 1, &unused, &mae_counter, NULL);
120 fail_mae_counter_alloc:
121 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
126 sfc_mae_counter_disable(struct sfc_adapter *sa,
127 struct sfc_mae_counter_id *counter)
129 struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
130 struct sfc_mae_counters *counters = ®->counters;
131 struct sfc_mae_counter *p;
135 if (counter->mae_id.id == EFX_MAE_RSRC_ID_INVALID)
138 SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
140 * The flag is set at the very end of add operation and reset
141 * at the beginning of delete operation. Release ordering is
142 * paired with acquire ordering on load in counter increment operation.
144 p = &counters->mae_counters[counter->mae_id.id];
145 __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
147 rc = efx_mae_counters_free(sa->nic, 1, &unused, &counter->mae_id, NULL);
149 sfc_err(sa, "failed to free MAE counter %u: %s",
150 counter->mae_id.id, rte_strerror(rc));
152 sfc_info(sa, "disabled MAE counter #%u with reset pkts=%" PRIu64
153 " bytes=%" PRIu64, counter->mae_id.id,
154 p->reset.pkts, p->reset.bytes);
157 * Do this regardless of what efx_mae_counters_free() return value is.
158 * If there's some error, the resulting resource leakage is bad, but
159 * nothing sensible can be done in this case.
161 counter->mae_id.id = EFX_MAE_RSRC_ID_INVALID;
167 sfc_mae_counter_increment(struct sfc_adapter *sa,
168 struct sfc_mae_counters *counters,
169 uint32_t mae_counter_id,
170 uint32_t generation_count,
171 uint64_t pkts, uint64_t bytes)
173 struct sfc_mae_counter *p = &counters->mae_counters[mae_counter_id];
174 struct sfc_mae_counters_xstats *xstats = &counters->xstats;
175 union sfc_pkts_bytes cnt_val;
179 * Acquire ordering is paired with release ordering in counter add
180 * and delete operations.
182 __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
185 * Two possible cases include:
186 * 1) Counter is just allocated. Too early counter update
187 * cannot be processed properly.
188 * 2) Stale update of freed and not reallocated counter.
189 * There is no point in processing that update.
191 xstats->not_inuse_update++;
195 if (unlikely(generation_count < p->generation_count)) {
197 * It is a stale update for the reallocated counter
198 * (i.e., freed and the same ID allocated again).
200 xstats->realloc_update++;
204 cnt_val.pkts = p->value.pkts + pkts;
205 cnt_val.bytes = p->value.bytes + bytes;
208 * Ordering is relaxed since it is the only operation on counter value.
209 * And it does not depend on different stores/loads in other threads.
210 * Paired with relaxed ordering on counter reset.
212 __atomic_store(&p->value.pkts_bytes,
213 &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
215 if (p->ft_group_hit_counter != NULL) {
216 uint64_t ft_group_hit_counter;
218 ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
219 __atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
223 sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
224 ", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
225 pkts, cnt_val.pkts, bytes, cnt_val.bytes);
229 sfc_mae_parse_counter_packet(struct sfc_adapter *sa,
230 struct sfc_mae_counter_registry *counter_registry,
231 const struct rte_mbuf *m)
233 uint32_t generation_count;
234 const efx_xword_t *hdr;
235 const efx_oword_t *counters_data;
236 unsigned int version;
238 unsigned int header_offset;
239 unsigned int payload_offset;
240 unsigned int counter_count;
241 unsigned int required_len;
244 if (unlikely(m->nb_segs != 1)) {
245 sfc_err(sa, "unexpectedly scattered MAE counters packet (%u segments)",
250 if (unlikely(m->data_len < ER_RX_SL_PACKETISER_HEADER_WORD_SIZE)) {
251 sfc_err(sa, "too short MAE counters packet (%u bytes)",
257 * The generation count is located in the Rx prefix in the USER_MARK
258 * field which is written into hash.fdir.hi field of an mbuf. See
259 * SF-123581-TC SmartNIC Datapath Offloads section 4.7.5 Counters.
261 generation_count = m->hash.fdir.hi;
263 hdr = rte_pktmbuf_mtod(m, const efx_xword_t *);
265 version = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_VERSION);
266 if (unlikely(version != ERF_SC_PACKETISER_HEADER_VERSION_2)) {
267 sfc_err(sa, "unexpected MAE counters packet version %u",
272 id = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_IDENTIFIER);
273 if (unlikely(id != ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR)) {
274 sfc_err(sa, "unexpected MAE counters source identifier %u", id);
278 /* Packet layout definitions assume fixed header offset in fact */
280 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_HEADER_OFFSET);
281 if (unlikely(header_offset !=
282 ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT)) {
283 sfc_err(sa, "unexpected MAE counters packet header offset %u",
289 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET);
291 counter_count = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_COUNT);
293 required_len = payload_offset +
294 counter_count * sizeof(counters_data[0]);
295 if (unlikely(required_len > m->data_len)) {
296 sfc_err(sa, "truncated MAE counters packet: %u counters, packet length is %u vs %u required",
297 counter_count, m->data_len, required_len);
299 * In theory it is possible process available counters data,
300 * but such condition is really unexpected and it is
301 * better to treat entire packet as corrupted.
306 /* Ensure that counters data is 32-bit aligned */
307 if (unlikely(payload_offset % sizeof(uint32_t) != 0)) {
308 sfc_err(sa, "unsupported MAE counters payload offset %u, must be 32-bit aligned",
312 RTE_BUILD_BUG_ON(sizeof(counters_data[0]) !=
313 ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE);
316 rte_pktmbuf_mtod_offset(m, const efx_oword_t *, payload_offset);
318 sfc_info(sa, "update %u MAE counters with gc=%u",
319 counter_count, generation_count);
321 for (i = 0; i < counter_count; ++i) {
322 uint32_t packet_count_lo;
323 uint32_t packet_count_hi;
324 uint32_t byte_count_lo;
325 uint32_t byte_count_hi;
328 * Use 32-bit field accessors below since counters data
329 * is not 64-bit aligned.
330 * 32-bit alignment is checked above taking into account
331 * that start of packet data is 32-bit aligned
332 * (cache-line size aligned in fact).
335 EFX_OWORD_FIELD32(counters_data[i],
336 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO);
338 EFX_OWORD_FIELD32(counters_data[i],
339 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_HI);
341 EFX_OWORD_FIELD32(counters_data[i],
342 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO);
344 EFX_OWORD_FIELD32(counters_data[i],
345 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_HI);
346 sfc_mae_counter_increment(sa,
347 &counter_registry->counters,
348 EFX_OWORD_FIELD32(counters_data[i],
349 ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX),
351 (uint64_t)packet_count_lo |
352 ((uint64_t)packet_count_hi <<
353 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO_WIDTH),
354 (uint64_t)byte_count_lo |
355 ((uint64_t)byte_count_hi <<
356 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO_WIDTH));
361 sfc_mae_counter_routine(void *arg)
363 struct sfc_adapter *sa = arg;
364 struct sfc_mae_counter_registry *counter_registry =
365 &sa->mae.counter_registry;
366 struct rte_mbuf *mbufs[SFC_MAE_COUNTER_RX_BURST];
367 unsigned int pushed_diff;
373 n = counter_registry->rx_pkt_burst(counter_registry->rx_dp, mbufs,
374 SFC_MAE_COUNTER_RX_BURST);
376 for (i = 0; i < n; i++)
377 sfc_mae_parse_counter_packet(sa, counter_registry, mbufs[i]);
379 rte_pktmbuf_free_bulk(mbufs, n);
381 if (!counter_registry->use_credits)
384 pushed = sfc_rx_get_pushed(sa, counter_registry->rx_dp);
385 pushed_diff = pushed - counter_registry->pushed_n_buffers;
387 if (pushed_diff >= SFC_COUNTER_RXQ_REFILL_LEVEL) {
388 rc = efx_mae_counters_stream_give_credits(sa->nic, pushed_diff);
390 counter_registry->pushed_n_buffers = pushed;
393 * FIXME: counters might be important for the
394 * application. Handle the error in order to recover
397 SFC_GENERIC_LOG(DEBUG, "Give credits failed: %s",
406 sfc_mae_counter_service_unregister(struct sfc_adapter *sa)
408 struct sfc_mae_counter_registry *registry =
409 &sa->mae.counter_registry;
410 const unsigned int wait_ms = 10000;
413 rte_service_runstate_set(registry->service_id, 0);
414 rte_service_component_runstate_set(registry->service_id, 0);
417 * Wait for the counter routine to finish the last iteration.
418 * Give up on timeout.
420 for (i = 0; i < wait_ms; i++) {
421 if (rte_service_may_be_active(registry->service_id) == 0)
427 sfc_warn(sa, "failed to wait for counter service to stop");
429 rte_service_map_lcore_set(registry->service_id,
430 registry->service_core_id, 0);
432 rte_service_component_unregister(registry->service_id);
435 static struct sfc_rxq_info *
436 sfc_counter_rxq_info_get(struct sfc_adapter *sa)
438 return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index];
442 sfc_mae_counter_service_register(struct sfc_adapter *sa,
443 uint32_t counter_stream_flags)
445 struct rte_service_spec service;
446 char counter_service_name[sizeof(service.name)] = "counter_service";
447 struct sfc_mae_counter_registry *counter_registry =
448 &sa->mae.counter_registry;
453 sfc_log_init(sa, "entry");
455 /* Prepare service info */
456 memset(&service, 0, sizeof(service));
457 rte_strscpy(service.name, counter_service_name, sizeof(service.name));
458 service.socket_id = sa->socket_id;
459 service.callback = sfc_mae_counter_routine;
460 service.callback_userdata = sa;
461 counter_registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst;
462 counter_registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp;
463 counter_registry->pushed_n_buffers = 0;
464 counter_registry->use_credits = counter_stream_flags &
465 EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS;
467 cid = sfc_get_service_lcore(sa->socket_id);
468 if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
469 /* Warn and try to allocate on any NUMA node */
471 "failed to get service lcore for counter service at socket %d",
474 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
476 if (cid == RTE_MAX_LCORE) {
478 sfc_err(sa, "failed to get service lcore for counter service");
479 goto fail_get_service_lcore;
482 /* Service core may be in "stopped" state, start it */
483 rc = rte_service_lcore_start(cid);
484 if (rc != 0 && rc != -EALREADY) {
485 sfc_err(sa, "failed to start service core for counter service: %s",
488 goto fail_start_core;
491 /* Register counter service */
492 rc = rte_service_component_register(&service, &sid);
495 sfc_err(sa, "failed to register counter service component");
499 /* Map the service with the service core */
500 rc = rte_service_map_lcore_set(sid, cid, 1);
503 sfc_err(sa, "failed to map lcore for counter service: %s",
508 /* Run the service */
509 rc = rte_service_component_runstate_set(sid, 1);
512 sfc_err(sa, "failed to run counter service component: %s",
514 goto fail_component_runstate_set;
516 rc = rte_service_runstate_set(sid, 1);
519 sfc_err(sa, "failed to run counter service");
520 goto fail_runstate_set;
523 counter_registry->service_core_id = cid;
524 counter_registry->service_id = sid;
526 sfc_log_init(sa, "done");
531 rte_service_component_runstate_set(sid, 0);
533 fail_component_runstate_set:
534 rte_service_map_lcore_set(sid, cid, 0);
537 rte_service_component_unregister(sid);
541 fail_get_service_lcore:
542 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
548 sfc_mae_counters_init(struct sfc_mae_counters *counters,
549 uint32_t nb_counters_max)
553 SFC_GENERIC_LOG(DEBUG, "%s: entry", __func__);
555 counters->mae_counters = rte_zmalloc("sfc_mae_counters",
556 sizeof(*counters->mae_counters) * nb_counters_max, 0);
557 if (counters->mae_counters == NULL) {
559 SFC_GENERIC_LOG(ERR, "%s: failed: %s", __func__,
564 counters->n_mae_counters = nb_counters_max;
566 SFC_GENERIC_LOG(DEBUG, "%s: done", __func__);
572 sfc_mae_counters_fini(struct sfc_mae_counters *counters)
574 rte_free(counters->mae_counters);
575 counters->mae_counters = NULL;
579 sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
581 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
582 char name[RTE_MEMPOOL_NAMESIZE];
583 struct rte_mempool *mp;
584 unsigned int n_elements;
585 unsigned int cache_size;
586 /* The mempool is internal and private area is not required */
587 const uint16_t priv_size = 0;
588 const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
589 SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
592 sfc_log_init(sa, "entry");
594 if (!sas->counters_rxq_allocated) {
595 sfc_log_init(sa, "counter queue is not supported - skip");
600 * At least one element in the ring is always unused to distinguish
601 * between empty and full ring cases.
603 n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
606 * The cache must have sufficient space to put received buckets
607 * before they're reused on refill.
609 cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
610 SFC_MAE_COUNTER_RX_BURST - 1);
612 if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
614 sfc_err(sa, "failed: counter RxQ mempool name is too long");
620 * It could be single-producer single-consumer ring mempool which
621 * requires minimal barriers. However, cache size and refill/burst
622 * policy are aligned, therefore it does not matter which
623 * mempool backend is chosen since backend is unused.
625 mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
626 priv_size, data_room_size, sa->socket_id);
628 sfc_err(sa, "failed to create counter RxQ mempool");
633 sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
634 sa->counter_rxq.mp = mp;
635 sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
637 sfc_log_init(sa, "done");
643 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
649 sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
651 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
653 sfc_log_init(sa, "entry");
655 if (!sas->counters_rxq_allocated) {
656 sfc_log_init(sa, "counter queue is not supported - skip");
660 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
661 sfc_log_init(sa, "counter queue is not attached - skip");
665 rte_mempool_free(sa->counter_rxq.mp);
666 sa->counter_rxq.mp = NULL;
667 sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
669 sfc_log_init(sa, "done");
673 sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
675 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
676 const struct rte_eth_rxconf rxconf = {
677 .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
680 uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
683 sfc_log_init(sa, "entry");
685 if (!sas->counters_rxq_allocated) {
686 sfc_log_init(sa, "counter queue is not supported - skip");
690 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
691 sfc_log_init(sa, "counter queue is not attached - skip");
695 nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
696 nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
698 rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
699 EFX_RXQ_FLAG_USER_MARK);
701 goto fail_counter_rxq_init_info;
703 rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
704 sa->socket_id, &rxconf, sa->counter_rxq.mp);
706 sfc_err(sa, "failed to init counter RxQ");
707 goto fail_counter_rxq_init;
710 sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
712 sfc_log_init(sa, "done");
716 fail_counter_rxq_init:
717 fail_counter_rxq_init_info:
718 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
724 sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
726 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
728 sfc_log_init(sa, "entry");
730 if (!sas->counters_rxq_allocated) {
731 sfc_log_init(sa, "counter queue is not supported - skip");
735 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
736 sfc_log_init(sa, "counter queue is not initialized - skip");
740 sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
742 sfc_log_init(sa, "done");
746 sfc_mae_counter_stop(struct sfc_adapter *sa)
748 struct sfc_mae *mae = &sa->mae;
750 sfc_log_init(sa, "entry");
752 if (!mae->counter_rxq_running) {
753 sfc_log_init(sa, "counter queue is not running - skip");
757 sfc_mae_counter_service_unregister(sa);
758 efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
760 mae->counter_rxq_running = false;
762 sfc_log_init(sa, "done");
766 sfc_mae_counter_start(struct sfc_adapter *sa)
768 struct sfc_mae *mae = &sa->mae;
772 SFC_ASSERT(sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED);
774 if (mae->counter_rxq_running)
777 sfc_log_init(sa, "entry");
779 rc = efx_mae_counters_stream_start(sa->nic, sa->counter_rxq.sw_index,
780 SFC_MAE_COUNTER_STREAM_PACKET_SIZE,
781 0 /* No flags required */, &flags);
783 sfc_err(sa, "failed to start MAE counters stream: %s",
785 goto fail_counter_stream;
788 sfc_log_init(sa, "stream start flags: 0x%x", flags);
790 rc = sfc_mae_counter_service_register(sa, flags);
792 goto fail_service_register;
794 mae->counter_rxq_running = true;
798 fail_service_register:
799 efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
802 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
808 sfc_mae_counter_get(struct sfc_mae_counters *counters,
809 const struct sfc_mae_counter_id *counter,
810 struct rte_flow_query_count *data)
812 struct sfc_flow_tunnel *ft = counter->ft;
813 uint64_t non_reset_jump_hit_counter;
814 struct sfc_mae_counter *p;
815 union sfc_pkts_bytes value;
817 SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
818 p = &counters->mae_counters[counter->mae_id.id];
821 * Ordering is relaxed since it is the only operation on counter value.
822 * And it does not depend on different stores/loads in other threads.
823 * Paired with relaxed ordering in counter increment.
825 value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
829 data->hits = value.pkts - p->reset.pkts;
832 data->hits += ft->group_hit_counter;
833 non_reset_jump_hit_counter = data->hits;
834 data->hits -= ft->reset_jump_hit_counter;
837 data->bytes = value.bytes - p->reset.bytes;
840 if (data->reset != 0) {
842 ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
844 p->reset.pkts = value.pkts;
845 p->reset.bytes = value.bytes;
853 sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
855 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
856 sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)