#include <rte_alarm.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
+#include <rte_eal_paging.h>
+
+#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_common_os.h"
+static const char * const mlx5_txpp_stat_names[] = {
+ "txpp_err_miss_int", /* Missed service interrupt. */
+ "txpp_err_rearm_queue", /* Rearm Queue errors. */
+ "txpp_err_clock_queue", /* Clock Queue errors. */
+ "txpp_err_ts_past", /* Timestamp in the past. */
+ "txpp_err_ts_future", /* Timestamp in the distant future. */
+ "txpp_jitter", /* Timestamp jitter (one Clock Queue completion). */
+ "txpp_wander", /* Timestamp jitter (half of Clock Queue completions). */
+ "txpp_sync_lost", /* Scheduling synchronization lost. */
+};
+
/* Destroy Event Queue Notification Channel. */
static void
mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
rte_errno = errno;
return -errno;
}
- if (!sh->txpp.pp->index) {
+ if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
DRV_LOG(ERR, "Zero packet pacing index allocated.");
mlx5_txpp_free_pp_index(sh);
rte_errno = ENOTSUP;
return -ENOTSUP;
}
- sh->txpp.pp_id = sh->txpp.pp->index;
+ sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
return 0;
#else
RTE_SET_USED(sh);
if (wq->sq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
if (wq->sq_buf)
- rte_free((void *)(uintptr_t)wq->sq_buf);
+ mlx5_free((void *)(uintptr_t)wq->sq_buf);
if (wq->cq)
claim_zero(mlx5_devx_cmd_destroy(wq->cq));
if (wq->cq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
if (wq->cq_buf)
- rte_free((void *)(uintptr_t)wq->cq_buf);
+ mlx5_free((void *)(uintptr_t)wq->cq_buf);
memset(wq, 0, sizeof(*wq));
}
mlx5_txpp_destroy_send_queue(wq);
if (sh->txpp.tsa) {
- rte_free(sh->txpp.tsa);
+ mlx5_free(sh->txpp.tsa);
sh->txpp.tsa = NULL;
}
}
uint32_t w32[2];
uint64_t w64;
} cs;
+ void *reg_addr;
wq->sq_ci = ci + 1;
cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
/* Make sure the doorbell record is updated. */
rte_wmb();
/* Write to doorbel register to start processing. */
- __mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);
+ reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+ __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
rte_wmb();
}
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
uint32_t umem_size, umem_dbrec;
int ret;
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return -ENOMEM;
+ }
/* Allocate memory buffer for CQEs and doorbell record. */
umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->cq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
return -ENOMEM;
/* Create completion queue object for Rearm Queue. */
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
umem_size = MLX5_WQE_SIZE * wq->sq_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->sq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
rte_errno = ENOMEM;
sq_attr.tis_num = sh->tis->id;
sq_attr.cqn = wq->cq->id;
sq_attr.cd_master = 1;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
- size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t page_size;
uint32_t umem_size, umem_dbrec;
int ret;
- sh->txpp.tsa = rte_zmalloc_socket(__func__,
- MLX5_TXPP_REARM_SQ_SIZE *
- sizeof(struct mlx5_txpp_ts),
- 0, sh->numa_node);
+ page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return -ENOMEM;
+ }
+ sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ MLX5_TXPP_REARM_SQ_SIZE *
+ sizeof(struct mlx5_txpp_ts),
+ 0, sh->numa_node);
if (!sh->txpp.tsa) {
DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
return -ENOMEM;
umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
+ wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
page_size, sh->numa_node);
if (!wq->cq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.use_first_only = 1;
cq_attr.overrun_ignore = 1;
- cq_attr.uar_page_id = sh->tx_uar->page_id;
+ cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->txpp.eqn;
cq_attr.q_umem_valid = 1;
cq_attr.q_umem_offset = 0;
- cq_attr.q_umem_id = wq->cq_umem->umem_id;
+ cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
cq_attr.db_umem_valid = 1;
cq_attr.db_umem_offset = umem_dbrec;
- cq_attr.db_umem_id = wq->cq_umem->umem_id;
+ cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
cq_attr.log_page_size = rte_log2_u32(page_size);
wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
umem_size = MLX5_WQE_SIZE * wq->sq_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->sq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
rte_errno = ENOMEM;
sq_attr.cqn = wq->cq->id;
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
sq_attr.wq_attr.cd_slave = 1;
- sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
sq_attr.wq_attr.dbr_umem_valid = 1;
sq_attr.wq_attr.dbr_addr = umem_dbrec;
- sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
+ sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
sq_attr.wq_attr.wq_umem_valid = 1;
- sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
+ sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
/* umem_offset must be zero for static_sq_wq queue. */
sq_attr.wq_attr.wq_umem_offset = 0;
wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
static inline void
mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
{
+ void *base_addr;
+
struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
- uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);
+ base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
+ uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
rte_compiler_barrier();
aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
{
uint16_t event_nums[1] = {0};
- int flags;
int ret;
+ int fd;
+ rte_atomic32_set(&sh->txpp.err_miss_int, 0);
+ rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
+ rte_atomic32_set(&sh->txpp.err_clock_queue, 0);
+ rte_atomic32_set(&sh->txpp.err_ts_past, 0);
+ rte_atomic32_set(&sh->txpp.err_ts_future, 0);
/* Attach interrupt handler to process Rearm Queue completions. */
- flags = fcntl(sh->txpp.echan->fd, F_GETFL);
- ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);
+ fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+ ret = mlx5_os_set_nonblock_channel_fd(fd);
if (ret) {
DRV_LOG(ERR, "Failed to change event channel FD.");
rte_errno = errno;
return -rte_errno;
}
memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
- sh->txpp.intr_handle.fd = sh->txpp.echan->fd;
+ fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+ sh->txpp.intr_handle.fd = fd;
sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
if (rte_intr_callback_register(&sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh)) {
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
}
+
+/*
+ * Read the current clock counter of an Ethernet device
+ *
+ * This returns the current raw clock value of an Ethernet device. It is
+ * a raw amount of ticks, with no given time reference.
+ * The value returned here is from the same clock than the one
+ * filling timestamp field of Rx/Tx packets when using hardware timestamp
+ * offload. Therefore it can be used to compute a precise conversion of
+ * the device clock to the real time.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param clock
+ * Pointer to the uint64_t that holds the raw clock value.
+ *
+ * @return
+ * - 0: Success.
+ * - -ENOTSUP: The function is not supported in this mode. Requires
+ * packet pacing module configured and started (tx_pp devarg)
+ */
+int
+mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ int ret;
+
+ if (sh->txpp.refcnt) {
+ struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
+ struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+ union {
+ rte_int128_t u128;
+ struct mlx5_cqe_ts cts;
+ } to;
+ uint64_t ts;
+
+ mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
+ if (to.cts.op_own >> 4) {
+ DRV_LOG(DEBUG, "Clock Queue error sync lost.");
+ rte_atomic32_inc(&sh->txpp.err_clock_queue);
+ sh->txpp.sync_lost = 1;
+ return -EIO;
+ }
+ ts = rte_be_to_cpu_64(to.cts.timestamp);
+ ts = mlx5_txpp_convert_rx_ts(sh, ts);
+ *timestamp = ts;
+ return 0;
+ }
+ /* Not supported in isolated mode - kernel does not see the CQEs. */
+ if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -ENOTSUP;
+ ret = mlx5_read_clock(dev, timestamp);
+ return ret;
+}
+
+/**
+ * DPDK callback to clear device extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success and stats is reset, negative errno value otherwise and
+ * rte_errno is set.
+ */
+int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+ rte_atomic32_set(&sh->txpp.err_miss_int, 0);
+ rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
+ rte_atomic32_set(&sh->txpp.err_clock_queue, 0);
+ rte_atomic32_set(&sh->txpp.err_ts_past, 0);
+ rte_atomic32_set(&sh->txpp.err_ts_future, 0);
+ return 0;
+}
+
+/**
+ * Routine to retrieve names of extended device statistics
+ * for packet send scheduling. It appends the specific stats names
+ * after the parts filled by preceding modules (eth stats, etc.)
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ * Buffer to insert names into.
+ * @param n
+ * Number of names.
+ * @param n_used
+ * Number of names filled by preceding statistics modules.
+ *
+ * @return
+ * Number of xstats names.
+ */
+int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n, unsigned int n_used)
+{
+ unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
+ unsigned int i;
+
+ if (n >= n_used + n_txpp && xstats_names) {
+ for (i = 0; i < n_txpp; ++i) {
+ strncpy(xstats_names[i + n_used].name,
+ mlx5_txpp_stat_names[i],
+ RTE_ETH_XSTATS_NAME_SIZE);
+ xstats_names[i + n_used].name
+ [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
+ }
+ }
+ return n_used + n_txpp;
+}
+
+static inline void
+mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
+ struct mlx5_txpp_ts *tsa, uint16_t idx)
+{
+ do {
+ int64_t ts, ci;
+
+ ts = rte_atomic64_read(&txpp->tsa[idx].ts);
+ ci = rte_atomic64_read(&txpp->tsa[idx].ci_ts);
+ rte_compiler_barrier();
+ if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
+ continue;
+ if (rte_atomic64_read(&txpp->tsa[idx].ts) != ts)
+ continue;
+ if (rte_atomic64_read(&txpp->tsa[idx].ci_ts) != ci)
+ continue;
+ rte_atomic64_set(&tsa->ts, ts);
+ rte_atomic64_set(&tsa->ci_ts, ci);
+ return;
+ } while (true);
+}
+
+/*
+ * Jitter reflects the clock change between
+ * neighbours Clock Queue completions.
+ */
+static uint64_t
+mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
+{
+ struct mlx5_txpp_ts tsa0, tsa1;
+ int64_t dts, dci;
+ uint16_t ts_p;
+
+ if (txpp->ts_n < 2) {
+ /* No gathered enough reports yet. */
+ return 0;
+ }
+ do {
+ int ts_0, ts_1;
+
+ ts_p = txpp->ts_p;
+ rte_compiler_barrier();
+ ts_0 = ts_p - 2;
+ if (ts_0 < 0)
+ ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
+ ts_1 = ts_p - 1;
+ if (ts_1 < 0)
+ ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
+ mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
+ mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
+ rte_compiler_barrier();
+ } while (ts_p != txpp->ts_p);
+ /* We have two neighbor reports, calculate the jitter. */
+ dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts);
+ dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) -
+ (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH));
+ if (dci < 0)
+ dci += 1 << MLX5_CQ_INDEX_WIDTH;
+ dci *= txpp->tick;
+ return (dts > dci) ? dts - dci : dci - dts;
+}
+
+/*
+ * Wander reflects the long-term clock change
+ * over the entire length of all Clock Queue completions.
+ */
+static uint64_t
+mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
+{
+ struct mlx5_txpp_ts tsa0, tsa1;
+ int64_t dts, dci;
+ uint16_t ts_p;
+
+ if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
+ /* No gathered enough reports yet. */
+ return 0;
+ }
+ do {
+ int ts_0, ts_1;
+
+ ts_p = txpp->ts_p;
+ rte_compiler_barrier();
+ ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
+ if (ts_0 < 0)
+ ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
+ ts_1 = ts_p - 1;
+ if (ts_1 < 0)
+ ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
+ mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
+ mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
+ rte_compiler_barrier();
+ } while (ts_p != txpp->ts_p);
+ /* We have two neighbor reports, calculate the jitter. */
+ dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts);
+ dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) -
+ (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH));
+ dci += 1 << MLX5_CQ_INDEX_WIDTH;
+ dci *= txpp->tick;
+ return (dts > dci) ? dts - dci : dci - dts;
+}
+
+/**
+ * Routine to retrieve extended device statistics
+ * for packet send scheduling. It appends the specific statistics
+ * after the parts filled by preceding modules (eth stats, etc.)
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] stats
+ * Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
+ * @param n_used
+ * Number of stats filled by preceding statistics modules.
+ *
+ * @return
+ * Number of extended stats on success and stats is filled,
+ * negative on error and rte_errno is set.
+ */
+int
+mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats,
+ unsigned int n, unsigned int n_used)
+{
+ unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
+
+ if (n >= n_used + n_txpp && stats) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ unsigned int i;
+
+ for (i = 0; i < n_txpp; ++i)
+ stats[n_used + i].id = n_used + i;
+ stats[n_used + 0].value =
+ rte_atomic32_read(&sh->txpp.err_miss_int);
+ stats[n_used + 1].value =
+ rte_atomic32_read(&sh->txpp.err_rearm_queue);
+ stats[n_used + 2].value =
+ rte_atomic32_read(&sh->txpp.err_clock_queue);
+ stats[n_used + 3].value =
+ rte_atomic32_read(&sh->txpp.err_ts_past);
+ stats[n_used + 4].value =
+ rte_atomic32_read(&sh->txpp.err_ts_future);
+ stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
+ stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
+ stats[n_used + 7].value = sh->txpp.sync_lost;
+ }
+ return n_used + n_txpp;
+}