1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2020-2021 Xilinx, Inc.
6 #include <rte_common.h>
7 #include <rte_service_component.h>
10 #include "efx_regs_counters_pkt_format.h"
15 #include "sfc_mae_counter.h"
16 #include "sfc_service.h"
19 sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
23 cid = sfc_get_service_lcore(sa->socket_id);
24 if (cid != RTE_MAX_LCORE)
27 if (sa->socket_id != SOCKET_ID_ANY)
28 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
30 if (cid == RTE_MAX_LCORE) {
31 sfc_warn(sa, "failed to get service lcore for counter service");
32 } else if (sa->socket_id != SOCKET_ID_ANY) {
34 "failed to get service lcore for counter service at socket %d, but got at socket %u",
35 sa->socket_id, rte_lcore_to_socket_id(cid));
41 sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
43 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
45 if (encp->enc_mae_supported == B_FALSE)
48 if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE)
55 sfc_mae_counter_enable(struct sfc_adapter *sa,
56 struct sfc_mae_counter_id *counterp)
58 struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
59 struct sfc_mae_counters *counters = ®->counters;
60 struct sfc_mae_counter *p;
61 efx_counter_t mae_counter;
62 uint32_t generation_count;
67 * The actual count of counters allocated is ignored since a failure
68 * to allocate a single counter is indicated by non-zero return code.
70 rc = efx_mae_counters_alloc(sa->nic, 1, &unused, &mae_counter,
73 sfc_err(sa, "failed to alloc MAE counter: %s",
75 goto fail_mae_counter_alloc;
78 if (mae_counter.id >= counters->n_mae_counters) {
80 * ID of a counter is expected to be within the range
81 * between 0 and the maximum count of counters to always
82 * fit into a pre-allocated array size of maximum counter ID.
84 sfc_err(sa, "MAE counter ID is out of expected range");
86 goto fail_counter_id_range;
89 counterp->mae_id = mae_counter;
91 p = &counters->mae_counters[mae_counter.id];
94 * Ordering is relaxed since it is the only operation on counter value.
95 * And it does not depend on different stores/loads in other threads.
96 * Paired with relaxed ordering in counter increment.
98 __atomic_store(&p->reset.pkts_bytes.int128,
99 &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
100 p->generation_count = generation_count;
103 * The flag is set at the very end of add operation and reset
104 * at the beginning of delete operation. Release ordering is
105 * paired with acquire ordering on load in counter increment operation.
107 __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
109 sfc_info(sa, "enabled MAE counter #%u with reset pkts=%" PRIu64
110 " bytes=%" PRIu64, mae_counter.id,
111 p->reset.pkts, p->reset.bytes);
115 fail_counter_id_range:
116 (void)efx_mae_counters_free(sa->nic, 1, &unused, &mae_counter, NULL);
118 fail_mae_counter_alloc:
119 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
124 sfc_mae_counter_disable(struct sfc_adapter *sa,
125 struct sfc_mae_counter_id *counter)
127 struct sfc_mae_counter_registry *reg = &sa->mae.counter_registry;
128 struct sfc_mae_counters *counters = ®->counters;
129 struct sfc_mae_counter *p;
133 if (counter->mae_id.id == EFX_MAE_RSRC_ID_INVALID)
136 SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
138 * The flag is set at the very end of add operation and reset
139 * at the beginning of delete operation. Release ordering is
140 * paired with acquire ordering on load in counter increment operation.
142 p = &counters->mae_counters[counter->mae_id.id];
143 __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
145 rc = efx_mae_counters_free(sa->nic, 1, &unused, &counter->mae_id, NULL);
147 sfc_err(sa, "failed to free MAE counter %u: %s",
148 counter->mae_id.id, rte_strerror(rc));
150 sfc_info(sa, "disabled MAE counter #%u with reset pkts=%" PRIu64
151 " bytes=%" PRIu64, counter->mae_id.id,
152 p->reset.pkts, p->reset.bytes);
155 * Do this regardless of what efx_mae_counters_free() return value is.
156 * If there's some error, the resulting resource leakage is bad, but
157 * nothing sensible can be done in this case.
159 counter->mae_id.id = EFX_MAE_RSRC_ID_INVALID;
165 sfc_mae_counter_increment(struct sfc_adapter *sa,
166 struct sfc_mae_counters *counters,
167 uint32_t mae_counter_id,
168 uint32_t generation_count,
169 uint64_t pkts, uint64_t bytes)
171 struct sfc_mae_counter *p = &counters->mae_counters[mae_counter_id];
172 struct sfc_mae_counters_xstats *xstats = &counters->xstats;
173 union sfc_pkts_bytes cnt_val;
177 * Acquire ordering is paired with release ordering in counter add
178 * and delete operations.
180 __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
183 * Two possible cases include:
184 * 1) Counter is just allocated. Too early counter update
185 * cannot be processed properly.
186 * 2) Stale update of freed and not reallocated counter.
187 * There is no point in processing that update.
189 xstats->not_inuse_update++;
193 if (unlikely(generation_count < p->generation_count)) {
195 * It is a stale update for the reallocated counter
196 * (i.e., freed and the same ID allocated again).
198 xstats->realloc_update++;
202 cnt_val.pkts = p->value.pkts + pkts;
203 cnt_val.bytes = p->value.bytes + bytes;
206 * Ordering is relaxed since it is the only operation on counter value.
207 * And it does not depend on different stores/loads in other threads.
208 * Paired with relaxed ordering on counter reset.
210 __atomic_store(&p->value.pkts_bytes,
211 &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
213 sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
214 ", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
215 pkts, cnt_val.pkts, bytes, cnt_val.bytes);
219 sfc_mae_parse_counter_packet(struct sfc_adapter *sa,
220 struct sfc_mae_counter_registry *counter_registry,
221 const struct rte_mbuf *m)
223 uint32_t generation_count;
224 const efx_xword_t *hdr;
225 const efx_oword_t *counters_data;
226 unsigned int version;
228 unsigned int header_offset;
229 unsigned int payload_offset;
230 unsigned int counter_count;
231 unsigned int required_len;
234 if (unlikely(m->nb_segs != 1)) {
235 sfc_err(sa, "unexpectedly scattered MAE counters packet (%u segments)",
240 if (unlikely(m->data_len < ER_RX_SL_PACKETISER_HEADER_WORD_SIZE)) {
241 sfc_err(sa, "too short MAE counters packet (%u bytes)",
247 * The generation count is located in the Rx prefix in the USER_MARK
248 * field which is written into hash.fdir.hi field of an mbuf. See
249 * SF-123581-TC SmartNIC Datapath Offloads section 4.7.5 Counters.
251 generation_count = m->hash.fdir.hi;
253 hdr = rte_pktmbuf_mtod(m, const efx_xword_t *);
255 version = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_VERSION);
256 if (unlikely(version != ERF_SC_PACKETISER_HEADER_VERSION_2)) {
257 sfc_err(sa, "unexpected MAE counters packet version %u",
262 id = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_IDENTIFIER);
263 if (unlikely(id != ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR)) {
264 sfc_err(sa, "unexpected MAE counters source identifier %u", id);
268 /* Packet layout definitions assume fixed header offset in fact */
270 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_HEADER_OFFSET);
271 if (unlikely(header_offset !=
272 ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT)) {
273 sfc_err(sa, "unexpected MAE counters packet header offset %u",
279 EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET);
281 counter_count = EFX_XWORD_FIELD(*hdr, ERF_SC_PACKETISER_HEADER_COUNT);
283 required_len = payload_offset +
284 counter_count * sizeof(counters_data[0]);
285 if (unlikely(required_len > m->data_len)) {
286 sfc_err(sa, "truncated MAE counters packet: %u counters, packet length is %u vs %u required",
287 counter_count, m->data_len, required_len);
289 * In theory it is possible process available counters data,
290 * but such condition is really unexpected and it is
291 * better to treat entire packet as corrupted.
296 /* Ensure that counters data is 32-bit aligned */
297 if (unlikely(payload_offset % sizeof(uint32_t) != 0)) {
298 sfc_err(sa, "unsupported MAE counters payload offset %u, must be 32-bit aligned",
302 RTE_BUILD_BUG_ON(sizeof(counters_data[0]) !=
303 ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE);
306 rte_pktmbuf_mtod_offset(m, const efx_oword_t *, payload_offset);
308 sfc_info(sa, "update %u MAE counters with gc=%u",
309 counter_count, generation_count);
311 for (i = 0; i < counter_count; ++i) {
312 uint32_t packet_count_lo;
313 uint32_t packet_count_hi;
314 uint32_t byte_count_lo;
315 uint32_t byte_count_hi;
318 * Use 32-bit field accessors below since counters data
319 * is not 64-bit aligned.
320 * 32-bit alignment is checked above taking into account
321 * that start of packet data is 32-bit aligned
322 * (cache-line size aligned in fact).
325 EFX_OWORD_FIELD32(counters_data[i],
326 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO);
328 EFX_OWORD_FIELD32(counters_data[i],
329 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_HI);
331 EFX_OWORD_FIELD32(counters_data[i],
332 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO);
334 EFX_OWORD_FIELD32(counters_data[i],
335 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_HI);
336 sfc_mae_counter_increment(sa,
337 &counter_registry->counters,
338 EFX_OWORD_FIELD32(counters_data[i],
339 ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX),
341 (uint64_t)packet_count_lo |
342 ((uint64_t)packet_count_hi <<
343 ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LO_WIDTH),
344 (uint64_t)byte_count_lo |
345 ((uint64_t)byte_count_hi <<
346 ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LO_WIDTH));
351 sfc_mae_counter_routine(void *arg)
353 struct sfc_adapter *sa = arg;
354 struct sfc_mae_counter_registry *counter_registry =
355 &sa->mae.counter_registry;
356 struct rte_mbuf *mbufs[SFC_MAE_COUNTER_RX_BURST];
357 unsigned int pushed_diff;
363 n = counter_registry->rx_pkt_burst(counter_registry->rx_dp, mbufs,
364 SFC_MAE_COUNTER_RX_BURST);
366 for (i = 0; i < n; i++)
367 sfc_mae_parse_counter_packet(sa, counter_registry, mbufs[i]);
369 rte_pktmbuf_free_bulk(mbufs, n);
371 if (!counter_registry->use_credits)
374 pushed = sfc_rx_get_pushed(sa, counter_registry->rx_dp);
375 pushed_diff = pushed - counter_registry->pushed_n_buffers;
377 if (pushed_diff >= SFC_COUNTER_RXQ_REFILL_LEVEL) {
378 rc = efx_mae_counters_stream_give_credits(sa->nic, pushed_diff);
380 counter_registry->pushed_n_buffers = pushed;
383 * FIXME: counters might be important for the
384 * application. Handle the error in order to recover
387 SFC_GENERIC_LOG(DEBUG, "Give credits failed: %s",
396 sfc_mae_counter_service_unregister(struct sfc_adapter *sa)
398 struct sfc_mae_counter_registry *registry =
399 &sa->mae.counter_registry;
400 const unsigned int wait_ms = 10000;
403 rte_service_runstate_set(registry->service_id, 0);
404 rte_service_component_runstate_set(registry->service_id, 0);
407 * Wait for the counter routine to finish the last iteration.
408 * Give up on timeout.
410 for (i = 0; i < wait_ms; i++) {
411 if (rte_service_may_be_active(registry->service_id) == 0)
417 sfc_warn(sa, "failed to wait for counter service to stop");
419 rte_service_map_lcore_set(registry->service_id,
420 registry->service_core_id, 0);
422 rte_service_component_unregister(registry->service_id);
425 static struct sfc_rxq_info *
426 sfc_counter_rxq_info_get(struct sfc_adapter *sa)
428 return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index];
432 sfc_mae_counter_service_register(struct sfc_adapter *sa,
433 uint32_t counter_stream_flags)
435 struct rte_service_spec service;
436 char counter_service_name[sizeof(service.name)] = "counter_service";
437 struct sfc_mae_counter_registry *counter_registry =
438 &sa->mae.counter_registry;
443 sfc_log_init(sa, "entry");
445 /* Prepare service info */
446 memset(&service, 0, sizeof(service));
447 rte_strscpy(service.name, counter_service_name, sizeof(service.name));
448 service.socket_id = sa->socket_id;
449 service.callback = sfc_mae_counter_routine;
450 service.callback_userdata = sa;
451 counter_registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst;
452 counter_registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp;
453 counter_registry->pushed_n_buffers = 0;
454 counter_registry->use_credits = counter_stream_flags &
455 EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS;
457 cid = sfc_get_service_lcore(sa->socket_id);
458 if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) {
459 /* Warn and try to allocate on any NUMA node */
461 "failed to get service lcore for counter service at socket %d",
464 cid = sfc_get_service_lcore(SOCKET_ID_ANY);
466 if (cid == RTE_MAX_LCORE) {
468 sfc_err(sa, "failed to get service lcore for counter service");
469 goto fail_get_service_lcore;
472 /* Service core may be in "stopped" state, start it */
473 rc = rte_service_lcore_start(cid);
474 if (rc != 0 && rc != -EALREADY) {
475 sfc_err(sa, "failed to start service core for counter service: %s",
478 goto fail_start_core;
481 /* Register counter service */
482 rc = rte_service_component_register(&service, &sid);
485 sfc_err(sa, "failed to register counter service component");
489 /* Map the service with the service core */
490 rc = rte_service_map_lcore_set(sid, cid, 1);
493 sfc_err(sa, "failed to map lcore for counter service: %s",
498 /* Run the service */
499 rc = rte_service_component_runstate_set(sid, 1);
502 sfc_err(sa, "failed to run counter service component: %s",
504 goto fail_component_runstate_set;
506 rc = rte_service_runstate_set(sid, 1);
509 sfc_err(sa, "failed to run counter service");
510 goto fail_runstate_set;
513 counter_registry->service_core_id = cid;
514 counter_registry->service_id = sid;
516 sfc_log_init(sa, "done");
521 rte_service_component_runstate_set(sid, 0);
523 fail_component_runstate_set:
524 rte_service_map_lcore_set(sid, cid, 0);
527 rte_service_component_unregister(sid);
531 fail_get_service_lcore:
532 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
538 sfc_mae_counters_init(struct sfc_mae_counters *counters,
539 uint32_t nb_counters_max)
543 SFC_GENERIC_LOG(DEBUG, "%s: entry", __func__);
545 counters->mae_counters = rte_zmalloc("sfc_mae_counters",
546 sizeof(*counters->mae_counters) * nb_counters_max, 0);
547 if (counters->mae_counters == NULL) {
549 SFC_GENERIC_LOG(ERR, "%s: failed: %s", __func__,
554 counters->n_mae_counters = nb_counters_max;
556 SFC_GENERIC_LOG(DEBUG, "%s: done", __func__);
562 sfc_mae_counters_fini(struct sfc_mae_counters *counters)
564 rte_free(counters->mae_counters);
565 counters->mae_counters = NULL;
569 sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
571 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
572 char name[RTE_MEMPOOL_NAMESIZE];
573 struct rte_mempool *mp;
574 unsigned int n_elements;
575 unsigned int cache_size;
576 /* The mempool is internal and private area is not required */
577 const uint16_t priv_size = 0;
578 const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
579 SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
582 sfc_log_init(sa, "entry");
584 if (!sas->counters_rxq_allocated) {
585 sfc_log_init(sa, "counter queue is not supported - skip");
590 * At least one element in the ring is always unused to distinguish
591 * between empty and full ring cases.
593 n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
596 * The cache must have sufficient space to put received buckets
597 * before they're reused on refill.
599 cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
600 SFC_MAE_COUNTER_RX_BURST - 1);
602 if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
604 sfc_err(sa, "failed: counter RxQ mempool name is too long");
610 * It could be single-producer single-consumer ring mempool which
611 * requires minimal barriers. However, cache size and refill/burst
612 * policy are aligned, therefore it does not matter which
613 * mempool backend is chosen since backend is unused.
615 mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
616 priv_size, data_room_size, sa->socket_id);
618 sfc_err(sa, "failed to create counter RxQ mempool");
623 sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
624 sa->counter_rxq.mp = mp;
625 sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
627 sfc_log_init(sa, "done");
633 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
639 sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
641 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
643 sfc_log_init(sa, "entry");
645 if (!sas->counters_rxq_allocated) {
646 sfc_log_init(sa, "counter queue is not supported - skip");
650 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
651 sfc_log_init(sa, "counter queue is not attached - skip");
655 rte_mempool_free(sa->counter_rxq.mp);
656 sa->counter_rxq.mp = NULL;
657 sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
659 sfc_log_init(sa, "done");
663 sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
665 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
666 const struct rte_eth_rxconf rxconf = {
667 .rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
670 uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
673 sfc_log_init(sa, "entry");
675 if (!sas->counters_rxq_allocated) {
676 sfc_log_init(sa, "counter queue is not supported - skip");
680 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
681 sfc_log_init(sa, "counter queue is not attached - skip");
685 nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
686 nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
688 rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
689 EFX_RXQ_FLAG_USER_MARK);
691 goto fail_counter_rxq_init_info;
693 rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
694 sa->socket_id, &rxconf, sa->counter_rxq.mp);
696 sfc_err(sa, "failed to init counter RxQ");
697 goto fail_counter_rxq_init;
700 sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
702 sfc_log_init(sa, "done");
706 fail_counter_rxq_init:
707 fail_counter_rxq_init_info:
708 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
714 sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
716 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
718 sfc_log_init(sa, "entry");
720 if (!sas->counters_rxq_allocated) {
721 sfc_log_init(sa, "counter queue is not supported - skip");
725 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
726 sfc_log_init(sa, "counter queue is not initialized - skip");
730 sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
732 sfc_log_init(sa, "done");
736 sfc_mae_counter_stop(struct sfc_adapter *sa)
738 struct sfc_mae *mae = &sa->mae;
740 sfc_log_init(sa, "entry");
742 if (!mae->counter_rxq_running) {
743 sfc_log_init(sa, "counter queue is not running - skip");
747 sfc_mae_counter_service_unregister(sa);
748 efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
750 mae->counter_rxq_running = false;
752 sfc_log_init(sa, "done");
756 sfc_mae_counter_start(struct sfc_adapter *sa)
758 struct sfc_mae *mae = &sa->mae;
762 SFC_ASSERT(sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED);
764 if (mae->counter_rxq_running)
767 sfc_log_init(sa, "entry");
769 rc = efx_mae_counters_stream_start(sa->nic, sa->counter_rxq.sw_index,
770 SFC_MAE_COUNTER_STREAM_PACKET_SIZE,
771 0 /* No flags required */, &flags);
773 sfc_err(sa, "failed to start MAE counters stream: %s",
775 goto fail_counter_stream;
778 sfc_log_init(sa, "stream start flags: 0x%x", flags);
780 rc = sfc_mae_counter_service_register(sa, flags);
782 goto fail_service_register;
784 mae->counter_rxq_running = true;
788 fail_service_register:
789 efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL);
792 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
798 sfc_mae_counter_get(struct sfc_mae_counters *counters,
799 const struct sfc_mae_counter_id *counter,
800 struct rte_flow_query_count *data)
802 struct sfc_mae_counter *p;
803 union sfc_pkts_bytes value;
805 SFC_ASSERT(counter->mae_id.id < counters->n_mae_counters);
806 p = &counters->mae_counters[counter->mae_id.id];
809 * Ordering is relaxed since it is the only operation on counter value.
810 * And it does not depend on different stores/loads in other threads.
811 * Paired with relaxed ordering in counter increment.
813 value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
818 data->hits = value.pkts - p->reset.pkts;
819 data->bytes = value.bytes - p->reset.bytes;
821 if (data->reset != 0) {
822 p->reset.pkts = value.pkts;
823 p->reset.bytes = value.bytes;