net/sfc: support flow action count in transfer rules
[dpdk.git] / drivers / net / sfc / sfc_mae_counter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2020-2021 Xilinx, Inc.
4  */
5
6 #include <rte_common.h>
7 #include <rte_service_component.h>
8
9 #include "efx.h"
10 #include "efx_regs_counters_pkt_format.h"
11
12 #include "sfc_ev.h"
13 #include "sfc.h"
14 #include "sfc_rx.h"
15 #include "sfc_mae_counter.h"
16 #include "sfc_service.h"
17
18 static uint32_t
19 sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
20 {
21         uint32_t cid;
22
23         cid = sfc_get_service_lcore(sa->socket_id);
24         if (cid != RTE_MAX_LCORE)
25                 return cid;
26
27         if (sa->socket_id != SOCKET_ID_ANY)
28                 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
29
30         if (cid == RTE_MAX_LCORE) {
31                 sfc_warn(sa, "failed to get service lcore for counter service");
32         } else if (sa->socket_id != SOCKET_ID_ANY) {
33                 sfc_warn(sa,
34                         "failed to get service lcore for counter service at socket %d, but got at socket %u",
35                         sa->socket_id, rte_lcore_to_socket_id(cid));
36         }
37         return cid;
38 }
39
40 bool
41 sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
42 {
43         const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
44
45         if (encp->enc_mae_supported == B_FALSE)
46                 return false;
47
48         if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE)
49                 return false;
50
51         return true;
52 }
53
54 int
55 sfc_mae_counter_enable(struct sfc_adapter *sa,
56                        struct sfc_mae_counter_id *counterp)
57 {
58         struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
59         struct sfc_mae_counters *counters = &reg->counters;
60         struct sfc_mae_counter *p;
61         efx_counter_t mae_counter;
62         uint32_t generation_count;
63         uint32_t unused;
64         int rc;
65
66         /*
67          * The actual count of counters allocated is ignored since a failure
68          * to allocate a single counter is indicated by non-zero return code.
69          */
70         rc = efx_mae_counters_alloc(sa->nic, 1, &unused, &mae_counter,
71                                     &generation_count);
72         if (rc != 0) {
73                 sfc_err(sa, "failed to alloc MAE counter: %s",
74                         rte_strerror(rc));
75                 goto fail_mae_counter_alloc;
76         }
77
78         if (mae_counter.id >= counters->n_mae_counters) {
79                 /*
80                  * ID of a counter is expected to be within the range
81                  * between 0 and the maximum count of counters to always
82                  * fit into a pre-allocated array size of maximum counter ID.
83                  */
84                 sfc_err(sa, "MAE counter ID is out of expected range");
85                 rc = EFAULT;
86                 goto fail_counter_id_range;
87         }
88
89         counterp->mae_id = mae_counter;
90
91         p = &counters->mae_counters[mae_counter.id];
92
93         /*
94          * Ordering is relaxed since it is the only operation on counter value.
95          * And it does not depend on different stores/loads in other threads.
96          * Paired with relaxed ordering in counter increment.
97          */
98         __atomic_store(&p->reset.pkts_bytes.int128,
99                        &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
100         p->generation_count = generation_count;
101
102         /*
103          * The flag is set at the very end of add operation and reset
104          * at the beginning of delete operation. Release ordering is
105          * paired with acquire ordering on load in counter increment operation.
106          */
107         __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
108
109         sfc_info(sa, "enabled MAE counter #%u with reset pkts=%" PRIu64
110                  " bytes=%" PRIu64, mae_counter.id,
111                  p->reset.pkts, p->reset.bytes);
112
113         return 0;
114
115 fail_counter_id_range:
116         (void)efx_mae_counters_free(sa->nic, 1, &unused, &mae_counter, NULL);
117
118 fail_mae_counter_alloc:
119         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
120         return rc;
121 }
122
123 int
124 sfc_mae_counter_disable(struct sfc_adapter *sa,
125                         struct sfc_mae_counter_id *counter)
126 {
127         struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
128         struct sfc_mae_counters *counters = &reg->counters;
129         struct sfc_mae_counter *p;
130         uint32_t unused;
131         int rc;
132
133         if (counter->mae_id.id == EFX_MAE_RSRC_ID_INVALID)
134                 return 0;
135
136         SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
137         /*
138          * The flag is set at the very end of add operation and reset
139          * at the beginning of delete operation. Release ordering is
140          * paired with acquire ordering on load in counter increment operation.
141          */
142         p = &counters->mae_counters[counter->mae_id.id];
143         __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
144
145         rc = efx_mae_counters_free(sa->nic, 1, &unused, &counter->mae_id, NULL);
146         if (rc != 0)
147                 sfc_err(sa, "failed to free MAE counter %u: %s",
148                         counter->mae_id.id, rte_strerror(rc));
149
150         sfc_info(sa, "disabled MAE counter #%u with reset pkts=%" PRIu64
151                  " bytes=%" PRIu64, counter->mae_id.id,
152                  p->reset.pkts, p->reset.bytes);
153
154         /*
155          * Do this regardless of what efx_mae_counters_free() return value is.
156          * If there's some error, the resulting resource leakage is bad, but
157          * nothing sensible can be done in this case.
158          */
159         counter->mae_id.id = EFX_MAE_RSRC_ID_INVALID;
160
161         return rc;
162 }
163
164 static void
165 sfc_mae_counter_increment(struct sfc_adapter *sa,
166                           struct sfc_mae_counters *counters,
167                           uint32_t mae_counter_id,
168                           uint32_t generation_count,
169                           uint64_t pkts, uint64_t bytes)
170 {
171         struct sfc_mae_counter *p = &counters->mae_counters[mae_counter_id];
172         struct sfc_mae_counters_xstats *xstats = &counters->xstats;
173         union sfc_pkts_bytes cnt_val;
174         bool inuse;
175
176         /*
177          * Acquire ordering is paired with release ordering in counter add
178          * and delete operations.
179          */
180         __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
181         if (!inuse) {
182                 /*
183                  * Two possible cases include:
184                  * 1) Counter is just allocated. Too early counter update
185                  *    cannot be processed properly.
186                  * 2) Stale update of freed and not reallocated counter.
187                  *    There is no point in processing that update.
188                  */
189                 xstats->not_inuse_update++;
190                 return;
191         }
192
193         if (unlikely(generation_count < p->generation_count)) {
194                 /*
195                  * It is a stale update for the reallocated counter
196                  * (i.e., freed and the same ID allocated again).
197                  */
198                 xstats->realloc_update++;
199                 return;
200         }
201
202         cnt_val.pkts = p->value.pkts + pkts;
203         cnt_val.bytes = p->value.bytes + bytes;
204
205         /*
206          * Ordering is relaxed since it is the only operation on counter value.
207          * And it does not depend on different stores/loads in other threads.
208          * Paired with relaxed ordering on counter reset.
209          */
210         __atomic_store(&p->value.pkts_bytes,
211                        &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
212
213         sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
214                  ", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
215                  pkts, cnt_val.pkts, bytes, cnt_val.bytes);
216 }
217
218 static void
219 sfc_mae_parse_counter_packet(struct sfc_adapter *sa,
220                              struct sfc_mae_counter_registry *counter_registry,
221                              const struct rte_mbuf *m)
222 {
223         uint32_t generation_count;
224         const efx_xword_t *hdr;
225         const efx_oword_t *counters_data;
226         unsigned int version;
227         unsigned int id;
228         unsigned int header_offset;
229         unsigned int payload_offset;
230         unsigned int counter_count;
231         unsigned int required_len;
232         unsigned int i;
233
234         if (unlikely(m->nb_segs != 1)) {
235                 sfc_err(sa, "unexpectedly scattered MAE counters packet (%u segments)",
236                         m->nb_segs);
237                 return;
238         }
239
240         if (unlikely(m->data_len < ER_RX_SL_PACKETISER_HEADER_WORD_SIZE)) {
241                 sfc_err(sa, "too short MAE counters packet (%u bytes)",
242                         m->data_len);
243                 return;
244         }
245
246         /*
247          * The generation count is located in the Rx prefix in the USER_MARK
248          * field which is written into hash.fdir.hi field of an mbuf. See
249          * SF-123581-TC SmartNIC Datapath Offloads section 4.7.5 Counters.
250          */
251         generation_count = m->hash.fdir.hi;
252
253         hdr = rte_pktmbuf_mtod(m, const efx_xword_t *);
254
255         version = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_VERSION);
256         if (unlikely(version != ERF_SC_PACKETISER_HEADER_VERSION_2)) {
257                 sfc_err(sa, "unexpected MAE counters packet version %u",
258                         version);
259                 return;
260         }
261
262         id = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_IDENTIFIER);
263         if (unlikely(id != ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR)) {
264                 sfc_err(sa, "unexpected MAE counters source identifier %u", id);
265                 return;
266         }
267
268         /* Packet layout definitions assume fixed header offset in fact */
269         header_offset =
270                 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_HEADER_OFFSET);
271         if (unlikely(header_offset !=
272                      ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT)) {
273                 sfc_err(sa, "unexpected MAE counters packet header offset %u",
274                         header_offset);
275                 return;
276         }
277
278         payload_offset =
279                 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET);
280
281         counter_count = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_COUNT);
282
283         required_len = payload_offset +
284                         counter_count * sizeof(counters_data[0]);
285         if (unlikely(required_len > m->data_len)) {
286                 sfc_err(sa, "truncated MAE counters packet: %u counters, packet length is %u vs %u required",
287                         counter_count, m->data_len, required_len);
288                 /*
289                  * In theory it is possible process available counters data,
290                  * but such condition is really unexpected and it is
291                  * better to treat entire packet as corrupted.
292                  */
293                 return;
294         }
295
296         /* Ensure that counters data is 32-bit aligned */
297         if (unlikely(payload_offset % sizeof(uint32_t) != 0)) {
298                 sfc_err(sa, "unsupported MAE counters payload offset %u, must be 32-bit aligned",
299                         payload_offset);
300                 return;
301         }
302         RTE_BUILD_BUG_ON(sizeof(counters_data[0]) !=
303                         ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE);
304
305         counters_data =
306                 rte_pktmbuf_mtod_offset(m, const efx_oword_t *, payload_offset);
307
308         sfc_info(sa, "update %u MAE counters with gc=%u",
309                  counter_count, generation_count);
310
311         for (i = 0; i < counter_count; ++i) {
312                 uint32_t packet_count_lo;
313                 uint32_t packet_count_hi;
314                 uint32_t byte_count_lo;
315                 uint32_t byte_count_hi;
316
317                 /*
318                  * Use 32-bit field accessors below since counters data
319                  * is not 64-bit aligned.
320                  * 32-bit alignment is checked above taking into account
321                  * that start of packet data is 32-bit aligned
322                  * (cache-line size aligned in fact).
323                  */
324                 packet_count_lo =
325                         EFX_OWORD_FIELD32(counters_data[i],
326                                 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO);
327                 packet_count_hi =
328                         EFX_OWORD_FIELD32(counters_data[i],
329                                 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_HI);
330                 byte_count_lo =
331                         EFX_OWORD_FIELD32(counters_data[i],
332                                 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO);
333                 byte_count_hi =
334                         EFX_OWORD_FIELD32(counters_data[i],
335                                 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_HI);
336                 sfc_mae_counter_increment(sa,
337                         &counter_registry->counters,
338                         EFX_OWORD_FIELD32(counters_data[i],
339                                 ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX),
340                         generation_count,
341                         (uint64_t)packet_count_lo |
342                         ((uint64_t)packet_count_hi <<
343                          ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO_WIDTH),
344                         (uint64_t)byte_count_lo |
345                         ((uint64_t)byte_count_hi <<
346                          ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO_WIDTH));
347         }
348 }
349
350 static int32_t
351 sfc_mae_counter_routine(void *arg)
352 {
353         struct sfc_adapter *sa = arg;
354         struct sfc_mae_counter_registry *counter_registry =
355                 &sa->mae.counter_registry;
356         struct rte_mbuf *mbufs[SFC_MAE_COUNTER_RX_BURST];
357         unsigned int pushed_diff;
358         unsigned int pushed;
359         unsigned int i;
360         uint16_t n;
361         int rc;
362
363         n = counter_registry->rx_pkt_burst(counter_registry->rx_dp, mbufs,
364                                            SFC_MAE_COUNTER_RX_BURST);
365
366         for (i = 0; i < n; i++)
367                 sfc_mae_parse_counter_packet(sa, counter_registry, mbufs[i]);
368
369         rte_pktmbuf_free_bulk(mbufs, n);
370
371         if (!counter_registry->use_credits)
372                 return 0;
373
374         pushed = sfc_rx_get_pushed(sa, counter_registry->rx_dp);
375         pushed_diff = pushed - counter_registry->pushed_n_buffers;
376
377         if (pushed_diff >= SFC_COUNTER_RXQ_REFILL_LEVEL) {
378                 rc = efx_mae_counters_stream_give_credits(sa->nic, pushed_diff);
379                 if (rc == 0) {
380                         counter_registry->pushed_n_buffers = pushed;
381                 } else {
382                         /*
383                          * FIXME: counters might be important for the
384                          * application. Handle the error in order to recover
385                          * from the failure
386                          */
387                         SFC_GENERIC_LOG(DEBUG, "Give credits failed: %s",
388                                         rte_strerror(rc));
389                 }
390         }
391
392         return 0;
393 }
394
395 static void
396 sfc_mae_counter_service_unregister(struct sfc_adapter *sa)
397 {
398         struct sfc_mae_counter_registry *registry =
399                 &sa->mae.counter_registry;
400         const unsigned int wait_ms = 10000;
401         unsigned int i;
402
403         rte_service_runstate_set(registry->service_id, 0);
404         rte_service_component_runstate_set(registry->service_id, 0);
405
406         /*
407          * Wait for the counter routine to finish the last iteration.
408          * Give up on timeout.
409          */
410         for (i = 0; i < wait_ms; i++) {
411                 if (rte_service_may_be_active(registry->service_id) == 0)
412                         break;
413
414                 rte_delay_ms(1);
415         }
416         if (i == wait_ms)
417                 sfc_warn(sa, "failed to wait for counter service to stop");
418
419         rte_service_map_lcore_set(registry->service_id,
420                                   registry->service_core_id, 0);
421
422         rte_service_component_unregister(registry->service_id);
423 }
424
425 static struct sfc_rxq_info *
426 sfc_counter_rxq_info_get(struct sfc_adapter *sa)
427 {
428         return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index];
429 }
430
431 static int
432 sfc_mae_counter_service_register(struct sfc_adapter *sa,
433                                  uint32_t counter_stream_flags)
434 {
435         struct rte_service_spec service;
436         char counter_service_name[sizeof(service.name)] = "counter_service";
437         struct sfc_mae_counter_registry *counter_registry =
438                 &sa->mae.counter_registry;
439         uint32_t cid;
440         uint32_t sid;
441         int rc;
442
443         sfc_log_init(sa, "entry");
444
445         /* Prepare service info */
446         memset(&service, 0, sizeof(service));
447         rte_strscpy(service.name, counter_service_name, sizeof(service.name));
448         service.socket_id = sa->socket_id;
449         service.callback = sfc_mae_counter_routine;
450         service.callback_userdata = sa;
451         counter_registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst;
452         counter_registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp;
453         counter_registry->pushed_n_buffers = 0;
454         counter_registry->use_credits = counter_stream_flags &
455                 EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS;
456
457         cid = sfc_get_service_lcore(sa->socket_id);
458         if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
459                 /* Warn and try to allocate on any NUMA node */
460                 sfc_warn(sa,
461                         "failed to get service lcore for counter service at socket %d",
462                         sa->socket_id);
463
464                 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
465         }
466         if (cid == RTE_MAX_LCORE) {
467                 rc = ENOTSUP;
468                 sfc_err(sa, "failed to get service lcore for counter service");
469                 goto fail_get_service_lcore;
470         }
471
472         /* Service core may be in "stopped" state, start it */
473         rc = rte_service_lcore_start(cid);
474         if (rc != 0 && rc != -EALREADY) {
475                 sfc_err(sa, "failed to start service core for counter service: %s",
476                         rte_strerror(-rc));
477                 rc = ENOTSUP;
478                 goto fail_start_core;
479         }
480
481         /* Register counter service */
482         rc = rte_service_component_register(&service, &sid);
483         if (rc != 0) {
484                 rc = ENOEXEC;
485                 sfc_err(sa, "failed to register counter service component");
486                 goto fail_register;
487         }
488
489         /* Map the service with the service core */
490         rc = rte_service_map_lcore_set(sid, cid, 1);
491         if (rc != 0) {
492                 rc = -rc;
493                 sfc_err(sa, "failed to map lcore for counter service: %s",
494                         rte_strerror(rc));
495                 goto fail_map_lcore;
496         }
497
498         /* Run the service */
499         rc = rte_service_component_runstate_set(sid, 1);
500         if (rc < 0) {
501                 rc = -rc;
502                 sfc_err(sa, "failed to run counter service component: %s",
503                         rte_strerror(rc));
504                 goto fail_component_runstate_set;
505         }
506         rc = rte_service_runstate_set(sid, 1);
507         if (rc < 0) {
508                 rc = -rc;
509                 sfc_err(sa, "failed to run counter service");
510                 goto fail_runstate_set;
511         }
512
513         counter_registry->service_core_id = cid;
514         counter_registry->service_id = sid;
515
516         sfc_log_init(sa, "done");
517
518         return 0;
519
520 fail_runstate_set:
521         rte_service_component_runstate_set(sid, 0);
522
523 fail_component_runstate_set:
524         rte_service_map_lcore_set(sid, cid, 0);
525
526 fail_map_lcore:
527         rte_service_component_unregister(sid);
528
529 fail_register:
530 fail_start_core:
531 fail_get_service_lcore:
532         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
533
534         return rc;
535 }
536
537 int
538 sfc_mae_counters_init(struct sfc_mae_counters *counters,
539                       uint32_t nb_counters_max)
540 {
541         int rc;
542
543         SFC_GENERIC_LOG(DEBUG, "%s: entry", __func__);
544
545         counters->mae_counters = rte_zmalloc("sfc_mae_counters",
546                 sizeof(*counters->mae_counters) * nb_counters_max, 0);
547         if (counters->mae_counters == NULL) {
548                 rc = ENOMEM;
549                 SFC_GENERIC_LOG(ERR, "%s: failed: %s", __func__,
550                                 rte_strerror(rc));
551                 return rc;
552         }
553
554         counters->n_mae_counters = nb_counters_max;
555
556         SFC_GENERIC_LOG(DEBUG, "%s: done", __func__);
557
558         return 0;
559 }
560
561 void
562 sfc_mae_counters_fini(struct sfc_mae_counters *counters)
563 {
564         rte_free(counters->mae_counters);
565         counters->mae_counters = NULL;
566 }
567
568 int
569 sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
570 {
571         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
572         char name[RTE_MEMPOOL_NAMESIZE];
573         struct rte_mempool *mp;
574         unsigned int n_elements;
575         unsigned int cache_size;
576         /* The mempool is internal and private area is not required */
577         const uint16_t priv_size = 0;
578         const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
579                 SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
580         int rc;
581
582         sfc_log_init(sa, "entry");
583
584         if (!sas->counters_rxq_allocated) {
585                 sfc_log_init(sa, "counter queue is not supported - skip");
586                 return 0;
587         }
588
589         /*
590          * At least one element in the ring is always unused to distinguish
591          * between empty and full ring cases.
592          */
593         n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
594
595         /*
596          * The cache must have sufficient space to put received buckets
597          * before they're reused on refill.
598          */
599         cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
600                                      SFC_MAE_COUNTER_RX_BURST - 1);
601
602         if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
603             (int)sizeof(name)) {
604                 sfc_err(sa, "failed: counter RxQ mempool name is too long");
605                 rc = ENAMETOOLONG;
606                 goto fail_long_name;
607         }
608
609         /*
610          * It could be single-producer single-consumer ring mempool which
611          * requires minimal barriers. However, cache size and refill/burst
612          * policy are aligned, therefore it does not matter which
613          * mempool backend is chosen since backend is unused.
614          */
615         mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
616                                      priv_size, data_room_size, sa->socket_id);
617         if (mp == NULL) {
618                 sfc_err(sa, "failed to create counter RxQ mempool");
619                 rc = rte_errno;
620                 goto fail_mp_create;
621         }
622
623         sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
624         sa->counter_rxq.mp = mp;
625         sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
626
627         sfc_log_init(sa, "done");
628
629         return 0;
630
631 fail_mp_create:
632 fail_long_name:
633         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
634
635         return rc;
636 }
637
638 void
639 sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
640 {
641         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
642
643         sfc_log_init(sa, "entry");
644
645         if (!sas->counters_rxq_allocated) {
646                 sfc_log_init(sa, "counter queue is not supported - skip");
647                 return;
648         }
649
650         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
651                 sfc_log_init(sa, "counter queue is not attached - skip");
652                 return;
653         }
654
655         rte_mempool_free(sa->counter_rxq.mp);
656         sa->counter_rxq.mp = NULL;
657         sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
658
659         sfc_log_init(sa, "done");
660 }
661
662 int
663 sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
664 {
665         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
666         const struct rte_eth_rxconf rxconf = {
667                 .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
668                 .rx_drop_en = 1,
669         };
670         uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
671         int rc;
672
673         sfc_log_init(sa, "entry");
674
675         if (!sas->counters_rxq_allocated) {
676                 sfc_log_init(sa, "counter queue is not supported - skip");
677                 return 0;
678         }
679
680         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
681                 sfc_log_init(sa, "counter queue is not attached - skip");
682                 return 0;
683         }
684
685         nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
686         nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
687
688         rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
689                                EFX_RXQ_FLAG_USER_MARK);
690         if (rc != 0)
691                 goto fail_counter_rxq_init_info;
692
693         rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
694                           sa->socket_id, &rxconf, sa->counter_rxq.mp);
695         if (rc != 0) {
696                 sfc_err(sa, "failed to init counter RxQ");
697                 goto fail_counter_rxq_init;
698         }
699
700         sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
701
702         sfc_log_init(sa, "done");
703
704         return 0;
705
706 fail_counter_rxq_init:
707 fail_counter_rxq_init_info:
708         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
709
710         return rc;
711 }
712
713 void
714 sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
715 {
716         struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
717
718         sfc_log_init(sa, "entry");
719
720         if (!sas->counters_rxq_allocated) {
721                 sfc_log_init(sa, "counter queue is not supported - skip");
722                 return;
723         }
724
725         if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
726                 sfc_log_init(sa, "counter queue is not initialized - skip");
727                 return;
728         }
729
730         sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
731
732         sfc_log_init(sa, "done");
733 }
734
735 void
736 sfc_mae_counter_stop(struct sfc_adapter *sa)
737 {
738         struct sfc_mae *mae = &sa->mae;
739
740         sfc_log_init(sa, "entry");
741
742         if (!mae->counter_rxq_running) {
743                 sfc_log_init(sa, "counter queue is not running - skip");
744                 return;
745         }
746
747         sfc_mae_counter_service_unregister(sa);
748         efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
749
750         mae->counter_rxq_running = false;
751
752         sfc_log_init(sa, "done");
753 }
754
755 int
756 sfc_mae_counter_start(struct sfc_adapter *sa)
757 {
758         struct sfc_mae *mae = &sa->mae;
759         uint32_t flags;
760         int rc;
761
762         SFC_ASSERT(sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED);
763
764         if (mae->counter_rxq_running)
765                 return 0;
766
767         sfc_log_init(sa, "entry");
768
769         rc = efx_mae_counters_stream_start(sa->nic, sa->counter_rxq.sw_index,
770                                            SFC_MAE_COUNTER_STREAM_PACKET_SIZE,
771                                            0 /* No flags required */, &flags);
772         if (rc != 0) {
773                 sfc_err(sa, "failed to start MAE counters stream: %s",
774                         rte_strerror(rc));
775                 goto fail_counter_stream;
776         }
777
778         sfc_log_init(sa, "stream start flags: 0x%x", flags);
779
780         rc = sfc_mae_counter_service_register(sa, flags);
781         if (rc != 0)
782                 goto fail_service_register;
783
784         mae->counter_rxq_running = true;
785
786         return 0;
787
788 fail_service_register:
789         efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
790
791 fail_counter_stream:
792         sfc_log_init(sa, "failed: %s", rte_strerror(rc));
793
794         return rc;
795 }