418caffe5996f10988d20944aace021c925c34e6
[dpdk.git] / drivers / net / sfc / sfc_mae_counter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2020-2021 Xilinx, Inc.
4  */
5
6 #include <rte_common.h>
7 #include <rte_service_component.h>
8
9 #include "efx.h"
10 #include "efx_regs_counters_pkt_format.h"
11
12 #include "sfc_ev.h"
13 #include "sfc.h"
14 #include "sfc_rx.h"
15 #include "sfc_mae_counter.h"
16 #include "sfc_service.h"
17
18 static uint32_t
19 sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
20 {
21         uint32_t cid;
22
23         cid = sfc_get_service_lcore(sa->socket_id);
24         if (cid != RTE_MAX_LCORE)
25                 return cid;
26
27         if (sa->socket_id != SOCKET_ID_ANY)
28                 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
29
30         if (cid == RTE_MAX_LCORE) {
31                 sfc_warn(sa, "failed to get service lcore for counter service");
32         } else if (sa->socket_id != SOCKET_ID_ANY) {
33                 sfc_warn(sa,
34                         "failed to get service lcore for counter service at socket %d, but got at socket %u",
35                         sa->socket_id, rte_lcore_to_socket_id(cid));
36         }
37         return cid;
38 }
39
40 bool
41 sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
42 {
43         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
44
45         if (encp->enc_mae_supported == B_FALSE)
46                 return false;
47
48         if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE)
49                 return false;
50
51         return true;
52 }
53
54 int
55 sfc_mae_counter_enable(struct sfc_adapter *sa,
56                        struct sfc_mae_counter_id *counterp)
57 {
58         struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
59         struct sfc_mae_counters *counters = &reg->counters;
60         struct sfc_mae_counter *p;
61         efx_counter_t mae_counter;
62         uint32_t generation_count;
63         uint32_t unused;
64         int rc;
65
66         /*
67          * The actual count of counters allocated is ignored since a failure
68          * to allocate a single counter is indicated by non-zero return code.
69          */
70         rc = efx_mae_counters_alloc(sa->nic, 1, &unused, &mae_counter,
71                                     &generation_count);
72         if (rc != 0) {
73                 sfc_err(sa, "failed to alloc MAE counter: %s",
74                         rte_strerror(rc));
75                 goto fail_mae_counter_alloc;
76         }
77
78         if (mae_counter.id >= counters->n_mae_counters) {
79                 /*
80                  * ID of a counter is expected to be within the range
81                  * between 0 and the maximum count of counters to always
82                  * fit into a pre-allocated array size of maximum counter ID.
83                  */
84                 sfc_err(sa, "MAE counter ID is out of expected range");
85                 rc = EFAULT;
86                 goto fail_counter_id_range;
87         }
88
89         counterp->mae_id = mae_counter;
90
91         p = &counters->mae_counters[mae_counter.id];
92
93         /*
94          * Ordering is relaxed since it is the only operation on counter value.
95          * And it does not depend on different stores/loads in other threads.
96          * Paired with relaxed ordering in counter increment.
97          */
98         __atomic_store(&p->reset.pkts_bytes.int128,
99                        &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
100         p->generation_count = generation_count;
101
102         p->ft_group_hit_counter = counterp->ft_group_hit_counter;
103
104         /*
105          * The flag is set at the very end of add operation and reset
106          * at the beginning of delete operation. Release ordering is
107          * paired with acquire ordering on load in counter increment operation.
108          */
109         __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
110
111         sfc_info(sa, "enabled MAE counter #%u with reset pkts=%" PRIu64
112                  " bytes=%" PRIu64, mae_counter.id,
113                  p->reset.pkts, p->reset.bytes);
114
115         return 0;
116
117 fail_counter_id_range:
118         (void)efx_mae_counters_free(sa->nic, 1, &unused, &mae_counter, NULL);
119
120 fail_mae_counter_alloc:
121         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
122         return rc;
123 }
124
125 int
126 sfc_mae_counter_disable(struct sfc_adapter *sa,
127                         struct sfc_mae_counter_id *counter)
128 {
129         struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
130         struct sfc_mae_counters *counters = &reg->counters;
131         struct sfc_mae_counter *p;
132         uint32_t unused;
133         int rc;
134
135         if (counter->mae_id.id == EFX_MAE_RSRC_ID_INVALID)
136                 return 0;
137
138         SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
139         /*
140          * The flag is set at the very end of add operation and reset
141          * at the beginning of delete operation. Release ordering is
142          * paired with acquire ordering on load in counter increment operation.
143          */
144         p = &counters->mae_counters[counter->mae_id.id];
145         __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
146
147         rc = efx_mae_counters_free(sa->nic, 1, &unused, &counter->mae_id, NULL);
148         if (rc != 0)
149                 sfc_err(sa, "failed to free MAE counter %u: %s",
150                         counter->mae_id.id, rte_strerror(rc));
151
152         sfc_info(sa, "disabled MAE counter #%u with reset pkts=%" PRIu64
153                  " bytes=%" PRIu64, counter->mae_id.id,
154                  p->reset.pkts, p->reset.bytes);
155
156         /*
157          * Do this regardless of what efx_mae_counters_free() return value is.
158          * If there's some error, the resulting resource leakage is bad, but
159          * nothing sensible can be done in this case.
160          */
161         counter->mae_id.id = EFX_MAE_RSRC_ID_INVALID;
162
163         return rc;
164 }
165
166 static void
167 sfc_mae_counter_increment(struct sfc_adapter *sa,
168                           struct sfc_mae_counters *counters,
169                           uint32_t mae_counter_id,
170                           uint32_t generation_count,
171                           uint64_t pkts, uint64_t bytes)
172 {
173         struct sfc_mae_counter *p = &counters->mae_counters[mae_counter_id];
174         struct sfc_mae_counters_xstats *xstats = &counters->xstats;
175         union sfc_pkts_bytes cnt_val;
176         bool inuse;
177
178         /*
179          * Acquire ordering is paired with release ordering in counter add
180          * and delete operations.
181          */
182         __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
183         if (!inuse) {
184                 /*
185                  * Two possible cases include:
186                  * 1) Counter is just allocated. Too early counter update
187                  *    cannot be processed properly.
188                  * 2) Stale update of freed and not reallocated counter.
189                  *    There is no point in processing that update.
190                  */
191                 xstats->not_inuse_update++;
192                 return;
193         }
194
195         if (unlikely(generation_count < p->generation_count)) {
196                 /*
197                  * It is a stale update for the reallocated counter
198                  * (i.e., freed and the same ID allocated again).
199                  */
200                 xstats->realloc_update++;
201                 return;
202         }
203
204         cnt_val.pkts = p->value.pkts + pkts;
205         cnt_val.bytes = p->value.bytes + bytes;
206
207         /*
208          * Ordering is relaxed since it is the only operation on counter value.
209          * And it does not depend on different stores/loads in other threads.
210          * Paired with relaxed ordering on counter reset.
211          */
212         __atomic_store(&p->value.pkts_bytes,
213                        &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
214
215         if (p->ft_group_hit_counter != NULL) {
216                 uint64_t ft_group_hit_counter;
217
218                 ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
219                 __atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
220                                  __ATOMIC_RELAXED);
221         }
222
223         sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
224                  ", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
225                  pkts, cnt_val.pkts, bytes, cnt_val.bytes);
226 }
227
228 static void
229 sfc_mae_parse_counter_packet(struct sfc_adapter *sa,
230                              struct sfc_mae_counter_registry *counter_registry,
231                              const struct rte_mbuf *m)
232 {
233         uint32_t generation_count;
234         const efx_xword_t *hdr;
235         const efx_oword_t *counters_data;
236         unsigned int version;
237         unsigned int id;
238         unsigned int header_offset;
239         unsigned int payload_offset;
240         unsigned int counter_count;
241         unsigned int required_len;
242         unsigned int i;
243
244         if (unlikely(m->nb_segs != 1)) {
245                 sfc_err(sa, "unexpectedly scattered MAE counters packet (%u segments)",
246                         m->nb_segs);
247                 return;
248         }
249
250         if (unlikely(m->data_len < ER_RX_SL_PACKETISER_HEADER_WORD_SIZE)) {
251                 sfc_err(sa, "too short MAE counters packet (%u bytes)",
252                         m->data_len);
253                 return;
254         }
255
256         /*
257          * The generation count is located in the Rx prefix in the USER_MARK
258          * field which is written into hash.fdir.hi field of an mbuf. See
259          * SF-123581-TC SmartNIC Datapath Offloads section 4.7.5 Counters.
260          */
261         generation_count = m->hash.fdir.hi;
262
263         hdr = rte_pktmbuf_mtod(m, const efx_xword_t *);
264
265         version = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_VERSION);
266         if (unlikely(version != ERF_SC_PACKETISER_HEADER_VERSION_2)) {
267                 sfc_err(sa, "unexpected MAE counters packet version %u",
268                         version);
269                 return;
270         }
271
272         id = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_IDENTIFIER);
273         if (unlikely(id != ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR)) {
274                 sfc_err(sa, "unexpected MAE counters source identifier %u", id);
275                 return;
276         }
277
278         /* Packet layout definitions assume fixed header offset in fact */
279         header_offset =
280                 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_HEADER_OFFSET);
281         if (unlikely(header_offset !=
282                      ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT)) {
283                 sfc_err(sa, "unexpected MAE counters packet header offset %u",
284                         header_offset);
285                 return;
286         }
287
288         payload_offset =
289                 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET);
290
291         counter_count = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_COUNT);
292
293         required_len = payload_offset +
294                         counter_count * sizeof(counters_data[0]);
295         if (unlikely(required_len > m->data_len)) {
296                 sfc_err(sa, "truncated MAE counters packet: %u counters, packet length is %u vs %u required",
297                         counter_count, m->data_len, required_len);
298                 /*
299                  * In theory it is possible process available counters data,
300                  * but such condition is really unexpected and it is
301                  * better to treat entire packet as corrupted.
302                  */
303                 return;
304         }
305
306         /* Ensure that counters data is 32-bit aligned */
307         if (unlikely(payload_offset % sizeof(uint32_t) != 0)) {
308                 sfc_err(sa, "unsupported MAE counters payload offset %u, must be 32-bit aligned",
309                         payload_offset);
310                 return;
311         }
312         RTE_BUILD_BUG_ON(sizeof(counters_data[0]) !=
313                         ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE);
314
315         counters_data =
316                 rte_pktmbuf_mtod_offset(m, const efx_oword_t *, payload_offset);
317
318         sfc_info(sa, "update %u MAE counters with gc=%u",
319                  counter_count, generation_count);
320
321         for (i = 0; i < counter_count; ++i) {
322                 uint32_t packet_count_lo;
323                 uint32_t packet_count_hi;
324                 uint32_t byte_count_lo;
325                 uint32_t byte_count_hi;
326
327                 /*
328                  * Use 32-bit field accessors below since counters data
329                  * is not 64-bit aligned.
330                  * 32-bit alignment is checked above taking into account
331                  * that start of packet data is 32-bit aligned
332                  * (cache-line size aligned in fact).
333                  */
334                 packet_count_lo =
335                         EFX_OWORD_FIELD32(counters_data[i],
336                                 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO);
337                 packet_count_hi =
338                         EFX_OWORD_FIELD32(counters_data[i],
339                                 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_HI);
340                 byte_count_lo =
341                         EFX_OWORD_FIELD32(counters_data[i],
342                                 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO);
343                 byte_count_hi =
344                         EFX_OWORD_FIELD32(counters_data[i],
345                                 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_HI);
346                 sfc_mae_counter_increment(sa,
347                         &counter_registry->counters,
348                         EFX_OWORD_FIELD32(counters_data[i],
349                                 ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX),
350                         generation_count,
351                         (uint64_t)packet_count_lo |
352                         ((uint64_t)packet_count_hi <<
353                          ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO_WIDTH),
354                         (uint64_t)byte_count_lo |
355                         ((uint64_t)byte_count_hi <<
356                          ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO_WIDTH));
357         }
358 }
359
360 static int32_t
361 sfc_mae_counter_routine(void *arg)
362 {
363         struct sfc_adapter *sa = arg;
364         struct sfc_mae_counter_registry *counter_registry =
365                 &sa->mae.counter_registry;
366         struct rte_mbuf *mbufs[SFC_MAE_COUNTER_RX_BURST];
367         unsigned int pushed_diff;
368         unsigned int pushed;
369         unsigned int i;
370         uint16_t n;
371         int rc;
372
373         n = counter_registry->rx_pkt_burst(counter_registry->rx_dp, mbufs,
374                                            SFC_MAE_COUNTER_RX_BURST);
375
376         for (i = 0; i < n; i++)
377                 sfc_mae_parse_counter_packet(sa, counter_registry, mbufs[i]);
378
379         rte_pktmbuf_free_bulk(mbufs, n);
380
381         if (!counter_registry->use_credits)
382                 return 0;
383
384         pushed = sfc_rx_get_pushed(sa, counter_registry->rx_dp);
385         pushed_diff = pushed - counter_registry->pushed_n_buffers;
386
387         if (pushed_diff >= SFC_COUNTER_RXQ_REFILL_LEVEL) {
388                 rc = efx_mae_counters_stream_give_credits(sa->nic, pushed_diff);
389                 if (rc == 0) {
390                         counter_registry->pushed_n_buffers = pushed;
391                 } else {
392                         /*
393                          * FIXME: counters might be important for the
394                          * application. Handle the error in order to recover
395                          * from the failure
396                          */
397                         SFC_GENERIC_LOG(DEBUG, "Give credits failed: %s",
398                                         rte_strerror(rc));
399                 }
400         }
401
402         return 0;
403 }
404
405 static void
406 sfc_mae_counter_service_unregister(struct sfc_adapter *sa)
407 {
408         struct sfc_mae_counter_registry *registry =
409                 &sa->mae.counter_registry;
410         const unsigned int wait_ms = 10000;
411         unsigned int i;
412
413         rte_service_runstate_set(registry->service_id, 0);
414         rte_service_component_runstate_set(registry->service_id, 0);
415
416         /*
417          * Wait for the counter routine to finish the last iteration.
418          * Give up on timeout.
419          */
420         for (i = 0; i < wait_ms; i++) {
421                 if (rte_service_may_be_active(registry->service_id) == 0)
422                         break;
423
424                 rte_delay_ms(1);
425         }
426         if (i == wait_ms)
427                 sfc_warn(sa, "failed to wait for counter service to stop");
428
429         rte_service_map_lcore_set(registry->service_id,
430                                   registry->service_core_id, 0);
431
432         rte_service_component_unregister(registry->service_id);
433 }
434
435 static struct sfc_rxq_info *
436 sfc_counter_rxq_info_get(struct sfc_adapter *sa)
437 {
438         return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index];
439 }
440
441 static int
442 sfc_mae_counter_service_register(struct sfc_adapter *sa,
443                                  uint32_t counter_stream_flags)
444 {
445         struct rte_service_spec service;
446         char counter_service_name[sizeof(service.name)] = "counter_service";
447         struct sfc_mae_counter_registry *counter_registry =
448                 &sa->mae.counter_registry;
449         uint32_t cid;
450         uint32_t sid;
451         int rc;
452
453         sfc_log_init(sa, "entry");
454
455         /* Prepare service info */
456         memset(&service, 0, sizeof(service));
457         rte_strscpy(service.name, counter_service_name, sizeof(service.name));
458         service.socket_id = sa->socket_id;
459         service.callback = sfc_mae_counter_routine;
460         service.callback_userdata = sa;
461         counter_registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst;
462         counter_registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp;
463         counter_registry->pushed_n_buffers = 0;
464         counter_registry->use_credits = counter_stream_flags &
465                 EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS;
466
467         cid = sfc_get_service_lcore(sa->socket_id);
468         if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
469                 /* Warn and try to allocate on any NUMA node */
470                 sfc_warn(sa,
471                         "failed to get service lcore for counter service at socket %d",
472                         sa->socket_id);
473
474                 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
475         }
476         if (cid == RTE_MAX_LCORE) {
477                 rc = ENOTSUP;
478                 sfc_err(sa, "failed to get service lcore for counter service");
479                 goto fail_get_service_lcore;
480         }
481
482         /* Service core may be in "stopped" state, start it */
483         rc = rte_service_lcore_start(cid);
484         if (rc != 0 && rc != -EALREADY) {
485                 sfc_err(sa, "failed to start service core for counter service: %s",
486                         rte_strerror(-rc));
487                 rc = ENOTSUP;
488                 goto fail_start_core;
489         }
490
491         /* Register counter service */
492         rc = rte_service_component_register(&service, &sid);
493         if (rc != 0) {
494                 rc = ENOEXEC;
495                 sfc_err(sa, "failed to register counter service component");
496                 goto fail_register;
497         }
498
499         /* Map the service with the service core */
500         rc = rte_service_map_lcore_set(sid, cid, 1);
501         if (rc != 0) {
502                 rc = -rc;
503                 sfc_err(sa, "failed to map lcore for counter service: %s",
504                         rte_strerror(rc));
505                 goto fail_map_lcore;
506         }
507
508         /* Run the service */
509         rc = rte_service_component_runstate_set(sid, 1);
510         if (rc < 0) {
511                 rc = -rc;
512                 sfc_err(sa, "failed to run counter service component: %s",
513                         rte_strerror(rc));
514                 goto fail_component_runstate_set;
515         }
516         rc = rte_service_runstate_set(sid, 1);
517         if (rc < 0) {
518                 rc = -rc;
519                 sfc_err(sa, "failed to run counter service");
520                 goto fail_runstate_set;
521         }
522
523         counter_registry->service_core_id = cid;
524         counter_registry->service_id = sid;
525
526         sfc_log_init(sa, "done");
527
528         return 0;
529
530 fail_runstate_set:
531         rte_service_component_runstate_set(sid, 0);
532
533 fail_component_runstate_set:
534         rte_service_map_lcore_set(sid, cid, 0);
535
536 fail_map_lcore:
537         rte_service_component_unregister(sid);
538
539 fail_register:
540 fail_start_core:
541 fail_get_service_lcore:
542         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
543
544         return rc;
545 }
546
547 int
548 sfc_mae_counters_init(struct sfc_mae_counters *counters,
549                       uint32_t nb_counters_max)
550 {
551         int rc;
552
553         SFC_GENERIC_LOG(DEBUG, "%s: entry", __func__);
554
555         counters->mae_counters = rte_zmalloc("sfc_mae_counters",
556                 sizeof(*counters->mae_counters) * nb_counters_max, 0);
557         if (counters->mae_counters == NULL) {
558                 rc = ENOMEM;
559                 SFC_GENERIC_LOG(ERR, "%s: failed: %s", __func__,
560                                 rte_strerror(rc));
561                 return rc;
562         }
563
564         counters->n_mae_counters = nb_counters_max;
565
566         SFC_GENERIC_LOG(DEBUG, "%s: done", __func__);
567
568         return 0;
569 }
570
571 void
572 sfc_mae_counters_fini(struct sfc_mae_counters *counters)
573 {
574         rte_free(counters->mae_counters);
575         counters->mae_counters = NULL;
576 }
577
578 int
579 sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
580 {
581         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
582         char name[RTE_MEMPOOL_NAMESIZE];
583         struct rte_mempool *mp;
584         unsigned int n_elements;
585         unsigned int cache_size;
586         /* The mempool is internal and private area is not required */
587         const uint16_t priv_size = 0;
588         const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
589                 SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
590         int rc;
591
592         sfc_log_init(sa, "entry");
593
594         if (!sas->counters_rxq_allocated) {
595                 sfc_log_init(sa, "counter queue is not supported - skip");
596                 return 0;
597         }
598
599         /*
600          * At least one element in the ring is always unused to distinguish
601          * between empty and full ring cases.
602          */
603         n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
604
605         /*
606          * The cache must have sufficient space to put received buckets
607          * before they're reused on refill.
608          */
609         cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
610                                      SFC_MAE_COUNTER_RX_BURST - 1);
611
612         if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
613             (int)sizeof(name)) {
614                 sfc_err(sa, "failed: counter RxQ mempool name is too long");
615                 rc = ENAMETOOLONG;
616                 goto fail_long_name;
617         }
618
619         /*
620          * It could be single-producer single-consumer ring mempool which
621          * requires minimal barriers. However, cache size and refill/burst
622          * policy are aligned, therefore it does not matter which
623          * mempool backend is chosen since backend is unused.
624          */
625         mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
626                                      priv_size, data_room_size, sa->socket_id);
627         if (mp == NULL) {
628                 sfc_err(sa, "failed to create counter RxQ mempool");
629                 rc = rte_errno;
630                 goto fail_mp_create;
631         }
632
633         sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
634         sa->counter_rxq.mp = mp;
635         sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
636
637         sfc_log_init(sa, "done");
638
639         return 0;
640
641 fail_mp_create:
642 fail_long_name:
643         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
644
645         return rc;
646 }
647
648 void
649 sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
650 {
651         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
652
653         sfc_log_init(sa, "entry");
654
655         if (!sas->counters_rxq_allocated) {
656                 sfc_log_init(sa, "counter queue is not supported - skip");
657                 return;
658         }
659
660         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
661                 sfc_log_init(sa, "counter queue is not attached - skip");
662                 return;
663         }
664
665         rte_mempool_free(sa->counter_rxq.mp);
666         sa->counter_rxq.mp = NULL;
667         sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
668
669         sfc_log_init(sa, "done");
670 }
671
672 int
673 sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
674 {
675         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
676         const struct rte_eth_rxconf rxconf = {
677                 .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
678                 .rx_drop_en = 1,
679         };
680         uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
681         int rc;
682
683         sfc_log_init(sa, "entry");
684
685         if (!sas->counters_rxq_allocated) {
686                 sfc_log_init(sa, "counter queue is not supported - skip");
687                 return 0;
688         }
689
690         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
691                 sfc_log_init(sa, "counter queue is not attached - skip");
692                 return 0;
693         }
694
695         nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
696         nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
697
698         rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
699                                EFX_RXQ_FLAG_USER_MARK);
700         if (rc != 0)
701                 goto fail_counter_rxq_init_info;
702
703         rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
704                           sa->socket_id, &rxconf, sa->counter_rxq.mp);
705         if (rc != 0) {
706                 sfc_err(sa, "failed to init counter RxQ");
707                 goto fail_counter_rxq_init;
708         }
709
710         sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
711
712         sfc_log_init(sa, "done");
713
714         return 0;
715
716 fail_counter_rxq_init:
717 fail_counter_rxq_init_info:
718         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
719
720         return rc;
721 }
722
723 void
724 sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
725 {
726         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
727
728         sfc_log_init(sa, "entry");
729
730         if (!sas->counters_rxq_allocated) {
731                 sfc_log_init(sa, "counter queue is not supported - skip");
732                 return;
733         }
734
735         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
736                 sfc_log_init(sa, "counter queue is not initialized - skip");
737                 return;
738         }
739
740         sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
741
742         sfc_log_init(sa, "done");
743 }
744
745 void
746 sfc_mae_counter_stop(struct sfc_adapter *sa)
747 {
748         struct sfc_mae *mae = &sa->mae;
749
750         sfc_log_init(sa, "entry");
751
752         if (!mae->counter_rxq_running) {
753                 sfc_log_init(sa, "counter queue is not running - skip");
754                 return;
755         }
756
757         sfc_mae_counter_service_unregister(sa);
758         efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
759
760         mae->counter_rxq_running = false;
761
762         sfc_log_init(sa, "done");
763 }
764
765 int
766 sfc_mae_counter_start(struct sfc_adapter *sa)
767 {
768         struct sfc_mae *mae = &sa->mae;
769         uint32_t flags;
770         int rc;
771
772         SFC_ASSERT(sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED);
773
774         if (mae->counter_rxq_running)
775                 return 0;
776
777         sfc_log_init(sa, "entry");
778
779         rc = efx_mae_counters_stream_start(sa->nic, sa->counter_rxq.sw_index,
780                                            SFC_MAE_COUNTER_STREAM_PACKET_SIZE,
781                                            0 /* No flags required */, &flags);
782         if (rc != 0) {
783                 sfc_err(sa, "failed to start MAE counters stream: %s",
784                         rte_strerror(rc));
785                 goto fail_counter_stream;
786         }
787
788         sfc_log_init(sa, "stream start flags: 0x%x", flags);
789
790         rc = sfc_mae_counter_service_register(sa, flags);
791         if (rc != 0)
792                 goto fail_service_register;
793
794         mae->counter_rxq_running = true;
795
796         return 0;
797
798 fail_service_register:
799         efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
800
801 fail_counter_stream:
802         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
803
804         return rc;
805 }
806
807 int
808 sfc_mae_counter_get(struct sfc_mae_counters *counters,
809                     const struct sfc_mae_counter_id *counter,
810                     struct rte_flow_query_count *data)
811 {
812         struct sfc_flow_tunnel *ft = counter->ft;
813         uint64_t non_reset_jump_hit_counter;
814         struct sfc_mae_counter *p;
815         union sfc_pkts_bytes value;
816
817         SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
818         p = &counters->mae_counters[counter->mae_id.id];
819
820         /*
821          * Ordering is relaxed since it is the only operation on counter value.
822          * And it does not depend on different stores/loads in other threads.
823          * Paired with relaxed ordering in counter increment.
824          */
825         value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
826                                                   __ATOMIC_RELAXED);
827
828         data->hits_set = 1;
829         data->hits = value.pkts - p->reset.pkts;
830
831         if (ft != NULL) {
832                 data->hits += ft->group_hit_counter;
833                 non_reset_jump_hit_counter = data->hits;
834                 data->hits -= ft->reset_jump_hit_counter;
835         } else {
836                 data->bytes_set = 1;
837                 data->bytes = value.bytes - p->reset.bytes;
838         }
839
840         if (data->reset != 0) {
841                 if (ft != NULL) {
842                         ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
843                 } else {
844                         p->reset.pkts = value.pkts;
845                         p->reset.bytes = value.bytes;
846                 }
847         }
848
849         return 0;
850 }
851
852 bool
853 sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
854 {
855         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
856             sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
857                 return B_FALSE;
858         else
859                 return B_TRUE;
860 }