X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txpp.c;h=8aad92f13734d2367f0cf18172939bbb5ea73be5;hb=cb99500de915040339f294b6ad2fc8edb4085b19;hp=95b91278a2ecedda7f15dec36fe1133209a7bf9d;hpb=085ff447f01c52c3595ea3196511b97bf54866f0;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index 95b91278a2..8aad92f137 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -10,43 +10,45 @@ #include #include #include +#include + +#include #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_common_os.h" +static const char * const mlx5_txpp_stat_names[] = { + "txpp_err_miss_int", /* Missed service interrupt. */ + "txpp_err_rearm_queue", /* Rearm Queue errors. */ + "txpp_err_clock_queue", /* Clock Queue errors. */ + "txpp_err_ts_past", /* Timestamp in the past. */ + "txpp_err_ts_future", /* Timestamp in the distant future. */ + "txpp_jitter", /* Timestamp jitter (one Clock Queue completion). */ + "txpp_wander", /* Timestamp jitter (half of Clock Queue completions). */ + "txpp_sync_lost", /* Scheduling synchronization lost. */ +}; + /* Destroy Event Queue Notification Channel. */ static void -mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh) +mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh) { if (sh->txpp.echan) { mlx5_glue->devx_destroy_event_channel(sh->txpp.echan); sh->txpp.echan = NULL; } - sh->txpp.eqn = 0; } /* Create Event Queue Notification Channel. */ static int -mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh) +mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh) { - uint32_t lcore; - MLX5_ASSERT(!sh->txpp.echan); - lcore = (uint32_t)rte_lcore_to_cpu_id(-1); - if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) { - rte_errno = errno; - DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno); - sh->txpp.eqn = 0; - return -rte_errno; - } sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx, MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA); if (!sh->txpp.echan) { - sh->txpp.eqn = 0; rte_errno = errno; - DRV_LOG(ERR, "Failed to create event channel %d.", - rte_errno); + DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno); return -rte_errno; } return 0; @@ -99,13 +101,13 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh) rte_errno = errno; return -errno; } - if (!sh->txpp.pp->index) { + if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) { DRV_LOG(ERR, "Zero packet pacing index allocated."); mlx5_txpp_free_pp_index(sh); rte_errno = ENOTSUP; return -ENOTSUP; } - sh->txpp.pp_id = sh->txpp.pp->index; + sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index; return 0; #else RTE_SET_USED(sh); @@ -123,13 +125,13 @@ mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq) if (wq->sq_umem) claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem)); if (wq->sq_buf) - rte_free((void *)(uintptr_t)wq->sq_buf); + mlx5_free((void *)(uintptr_t)wq->sq_buf); if (wq->cq) claim_zero(mlx5_devx_cmd_destroy(wq->cq)); if (wq->cq_umem) claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem)); if (wq->cq_buf) - rte_free((void *)(uintptr_t)wq->cq_buf); + mlx5_free((void *)(uintptr_t)wq->cq_buf); memset(wq, 0, sizeof(*wq)); } @@ -148,7 +150,7 @@ mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh) mlx5_txpp_destroy_send_queue(wq); if (sh->txpp.tsa) { - rte_free(sh->txpp.tsa); + mlx5_free(sh->txpp.tsa); sh->txpp.tsa = NULL; } } @@ -161,6 +163,7 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci) uint32_t w32[2]; uint64_t w64; } cs; + void *reg_addr; wq->sq_ci = ci + 1; cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32 @@ -172,7 +175,8 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci) /* Make sure the doorbell record is updated. */ rte_wmb(); /* Write to doorbel register to start processing. */ - __mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL); + reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); + __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL); rte_wmb(); } @@ -236,16 +240,21 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; struct mlx5_devx_cq_attr cq_attr = { 0 }; struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; - size_t page_size = sysconf(_SC_PAGESIZE); + size_t page_size; uint32_t umem_size, umem_dbrec; int ret; + page_size = rte_mem_page_size(); + if (page_size == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); + return -ENOMEM; + } /* Allocate memory buffer for CQEs and doorbell record. */ umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE; umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); umem_size += MLX5_DBR_SIZE; - wq->cq_buf = rte_zmalloc_socket(__func__, umem_size, - page_size, sh->numa_node); + wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, + page_size, sh->numa_node); if (!wq->cq_buf) { DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); return -ENOMEM; @@ -263,8 +272,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) /* Create completion queue object for Rearm Queue. */ cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; - cq_attr.uar_page_id = sh->tx_uar->page_id; - cq_attr.eqn = sh->txpp.eqn; + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); + cq_attr.eqn = sh->eqn; cq_attr.q_umem_valid = 1; cq_attr.q_umem_offset = 0; cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem); @@ -293,8 +302,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) umem_size = MLX5_WQE_SIZE * wq->sq_size; umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); umem_size += MLX5_DBR_SIZE; - wq->sq_buf = rte_zmalloc_socket(__func__, umem_size, - page_size, sh->numa_node); + wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, + page_size, sh->numa_node); if (!wq->sq_buf) { DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); rte_errno = ENOMEM; @@ -316,7 +325,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) sq_attr.tis_num = sh->tis->id; sq_attr.cqn = wq->cq->id; sq_attr.cd_master = 1; - sq_attr.wq_attr.uar_page = sh->tx_uar->page_id; + sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar); sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; sq_attr.wq_attr.pd = sh->pdn; sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); @@ -459,14 +468,19 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; struct mlx5_devx_cq_attr cq_attr = { 0 }; struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; - size_t page_size = sysconf(_SC_PAGESIZE); + size_t page_size; uint32_t umem_size, umem_dbrec; int ret; - sh->txpp.tsa = rte_zmalloc_socket(__func__, - MLX5_TXPP_REARM_SQ_SIZE * - sizeof(struct mlx5_txpp_ts), - 0, sh->numa_node); + page_size = rte_mem_page_size(); + if (page_size == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); + return -ENOMEM; + } + sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, + MLX5_TXPP_REARM_SQ_SIZE * + sizeof(struct mlx5_txpp_ts), + 0, sh->numa_node); if (!sh->txpp.tsa) { DRV_LOG(ERR, "Failed to allocate memory for CQ stats."); return -ENOMEM; @@ -477,7 +491,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE; umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); umem_size += MLX5_DBR_SIZE; - wq->cq_buf = rte_zmalloc_socket(__func__, umem_size, + wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, page_size, sh->numa_node); if (!wq->cq_buf) { DRV_LOG(ERR, "Failed to allocate memory for Clock Queue."); @@ -498,14 +512,14 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; cq_attr.use_first_only = 1; cq_attr.overrun_ignore = 1; - cq_attr.uar_page_id = sh->tx_uar->page_id; - cq_attr.eqn = sh->txpp.eqn; + cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar); + cq_attr.eqn = sh->eqn; cq_attr.q_umem_valid = 1; cq_attr.q_umem_offset = 0; - cq_attr.q_umem_id = wq->cq_umem->umem_id; + cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem); cq_attr.db_umem_valid = 1; cq_attr.db_umem_offset = umem_dbrec; - cq_attr.db_umem_id = wq->cq_umem->umem_id; + cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem); cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE); cq_attr.log_page_size = rte_log2_u32(page_size); wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr); @@ -532,8 +546,8 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) umem_size = MLX5_WQE_SIZE * wq->sq_size; umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); umem_size += MLX5_DBR_SIZE; - wq->sq_buf = rte_zmalloc_socket(__func__, umem_size, - page_size, sh->numa_node); + wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, + page_size, sh->numa_node); if (!wq->sq_buf) { DRV_LOG(ERR, "Failed to allocate memory for Clock Queue."); rte_errno = ENOMEM; @@ -563,16 +577,16 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh) sq_attr.cqn = wq->cq->id; sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id; sq_attr.wq_attr.cd_slave = 1; - sq_attr.wq_attr.uar_page = sh->tx_uar->page_id; + sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar); sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; sq_attr.wq_attr.pd = sh->pdn; sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size); sq_attr.wq_attr.dbr_umem_valid = 1; sq_attr.wq_attr.dbr_addr = umem_dbrec; - sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id; + sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem); sq_attr.wq_attr.wq_umem_valid = 1; - sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id; + sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem); /* umem_offset must be zero for static_sq_wq queue. */ sq_attr.wq_attr.wq_umem_offset = 0; wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); @@ -606,11 +620,14 @@ error: static inline void mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh) { + void *base_addr; + struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue; uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET; uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci; uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id); - uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL); + base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar); + uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL); rte_compiler_barrier(); aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi); @@ -625,6 +642,32 @@ mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh) aq->arm_sn++; } +#if defined(RTE_ARCH_X86_64) +static inline int +mlx5_atomic128_compare_exchange(rte_int128_t *dst, + rte_int128_t *exp, + const rte_int128_t *src) +{ + uint8_t res; + + asm volatile (MPLOCKED + "cmpxchg16b %[dst];" + " sete %[res]" + : [dst] "=m" (dst->val[0]), + "=a" (exp->val[0]), + "=d" (exp->val[1]), + [res] "=r" (res) + : "b" (src->val[0]), + "c" (src->val[1]), + "a" (exp->val[0]), + "d" (exp->val[1]), + "m" (dst->val[0]) + : "memory"); + + return res; +} +#endif + static inline void mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts) { @@ -633,31 +676,33 @@ mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts) * update by hardware with soecified rate. We have to * read timestump and WQE completion index atomically. */ -#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) +#if defined(RTE_ARCH_X86_64) rte_int128_t src; memset(&src, 0, sizeof(src)); *ts = src; /* if (*from == *ts) *from = *src else *ts = *from; */ - rte_atomic128_cmp_exchange(from, ts, &src, 0, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); + mlx5_atomic128_compare_exchange(from, ts, &src); #else - rte_atomic64_t *cqe = (rte_atomic64_t *)from; + uint64_t *cqe = (uint64_t *)from; - /* Power architecture does not support 16B compare-and-swap. */ + /* + * Power architecture does not support 16B compare-and-swap. + * ARM implements it in software, code below is more relevant. + */ for (;;) { - int64_t tm, op; - int64_t *ps; + uint64_t tm, op; + uint64_t *ps; rte_compiler_barrier(); - tm = rte_atomic64_read(cqe + 0); - op = rte_atomic64_read(cqe + 1); + tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED); + op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED); rte_compiler_barrier(); - if (tm != rte_atomic64_read(cqe + 0)) + if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED)) continue; - if (op != rte_atomic64_read(cqe + 1)) + if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED)) continue; - ps = (int64_t *)ts; + ps = (uint64_t *)ts; ps[0] = tm; ps[1] = op; return; @@ -673,8 +718,8 @@ mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh, ci = ci << (64 - MLX5_CQ_INDEX_WIDTH); ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH; rte_compiler_barrier(); - rte_atomic64_set(&sh->txpp.ts.ts, ts); - rte_atomic64_set(&sh->txpp.ts.ci_ts, ci); + __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED); rte_wmb(); } @@ -696,7 +741,8 @@ mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh) mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); if (to.cts.op_own >> 4) { DRV_LOG(DEBUG, "Clock Queue error sync lost."); - rte_atomic32_inc(&sh->txpp.err_clock_queue); + __atomic_fetch_add(&sh->txpp.err_clock_queue, + 1, __ATOMIC_RELAXED); sh->txpp.sync_lost = 1; return; } @@ -741,7 +787,10 @@ mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh) if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n) return; MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE); - sh->txpp.tsa[sh->txpp.ts_p] = sh->txpp.ts; + __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts, + sh->txpp.ts.ts, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts, + sh->txpp.ts.ci_ts, __ATOMIC_RELAXED); if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE) sh->txpp.ts_p = 0; if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE) @@ -782,7 +831,8 @@ mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh) /* Check whether we have missed interrupts. */ if (cq_ci - wq->cq_ci != 1) { DRV_LOG(DEBUG, "Rearm Queue missed interrupt."); - rte_atomic32_inc(&sh->txpp.err_miss_int); + __atomic_fetch_add(&sh->txpp.err_miss_int, + 1, __ATOMIC_RELAXED); /* Check sync lost on wqe index. */ if (cq_ci - wq->cq_ci >= (((1UL << MLX5_WQ_INDEX_WIDTH) / @@ -797,7 +847,8 @@ mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh) /* Fire new requests to Rearm Queue. */ if (error) { DRV_LOG(DEBUG, "Rearm Queue error sync lost."); - rte_atomic32_inc(&sh->txpp.err_rearm_queue); + __atomic_fetch_add(&sh->txpp.err_rearm_queue, + 1, __ATOMIC_RELAXED); sh->txpp.sync_lost = 1; } } @@ -857,24 +908,25 @@ static int mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh) { uint16_t event_nums[1] = {0}; - int flags; int ret; + int fd; - rte_atomic32_set(&sh->txpp.err_miss_int, 0); - rte_atomic32_set(&sh->txpp.err_rearm_queue, 0); - rte_atomic32_set(&sh->txpp.err_clock_queue, 0); - rte_atomic32_set(&sh->txpp.err_ts_past, 0); - rte_atomic32_set(&sh->txpp.err_ts_future, 0); + sh->txpp.err_miss_int = 0; + sh->txpp.err_rearm_queue = 0; + sh->txpp.err_clock_queue = 0; + sh->txpp.err_ts_past = 0; + sh->txpp.err_ts_future = 0; /* Attach interrupt handler to process Rearm Queue completions. */ - flags = fcntl(sh->txpp.echan->fd, F_GETFL); - ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK); + fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); + ret = mlx5_os_set_nonblock_channel_fd(fd); if (ret) { DRV_LOG(ERR, "Failed to change event channel FD."); rte_errno = errno; return -rte_errno; } memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle)); - sh->txpp.intr_handle.fd = sh->txpp.echan->fd; + fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); + sh->txpp.intr_handle.fd = fd; sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT; if (rte_intr_callback_register(&sh->txpp.intr_handle, mlx5_txpp_interrupt_handler, sh)) { @@ -921,7 +973,7 @@ mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv) sh->txpp.test = !!(tx_pp < 0); sh->txpp.skew = priv->config.tx_skew; sh->txpp.freq = priv->config.hca_attr.dev_freq_khz; - ret = mlx5_txpp_create_eqn(sh); + ret = mlx5_txpp_create_event_channel(sh); if (ret) goto exit; ret = mlx5_txpp_alloc_pp_index(sh); @@ -942,7 +994,7 @@ exit: mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); mlx5_txpp_free_pp_index(sh); - mlx5_txpp_destroy_eqn(sh); + mlx5_txpp_destroy_event_channel(sh); sh->txpp.tick = 0; sh->txpp.test = 0; sh->txpp.skew = 0; @@ -964,7 +1016,7 @@ mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh) mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); mlx5_txpp_free_pp_index(sh); - mlx5_txpp_destroy_eqn(sh); + mlx5_txpp_destroy_event_channel(sh); sh->txpp.tick = 0; sh->txpp.test = 0; sh->txpp.skew = 0; @@ -1058,3 +1110,275 @@ mlx5_txpp_stop(struct rte_eth_dev *dev) MLX5_ASSERT(!ret); RTE_SET_USED(ret); } + +/* + * Read the current clock counter of an Ethernet device + * + * This returns the current raw clock value of an Ethernet device. It is + * a raw amount of ticks, with no given time reference. + * The value returned here is from the same clock than the one + * filling timestamp field of Rx/Tx packets when using hardware timestamp + * offload. Therefore it can be used to compute a precise conversion of + * the device clock to the real time. + * + * @param dev + * Pointer to Ethernet device structure. + * @param clock + * Pointer to the uint64_t that holds the raw clock value. + * + * @return + * - 0: Success. + * - -ENOTSUP: The function is not supported in this mode. Requires + * packet pacing module configured and started (tx_pp devarg) + */ +int +mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + int ret; + + if (sh->txpp.refcnt) { + struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; + struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; + union { + rte_int128_t u128; + struct mlx5_cqe_ts cts; + } to; + uint64_t ts; + + mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); + if (to.cts.op_own >> 4) { + DRV_LOG(DEBUG, "Clock Queue error sync lost."); + __atomic_fetch_add(&sh->txpp.err_clock_queue, + 1, __ATOMIC_RELAXED); + sh->txpp.sync_lost = 1; + return -EIO; + } + ts = rte_be_to_cpu_64(to.cts.timestamp); + ts = mlx5_txpp_convert_rx_ts(sh, ts); + *timestamp = ts; + return 0; + } + /* Not supported in isolated mode - kernel does not see the CQEs. */ + if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY) + return -ENOTSUP; + ret = mlx5_read_clock(dev, timestamp); + return ret; +} + +/** + * DPDK callback to clear device extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success and stats is reset, negative errno value otherwise and + * rte_errno is set. + */ +int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + + __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED); + return 0; +} + +/** + * Routine to retrieve names of extended device statistics + * for packet send scheduling. It appends the specific stats names + * after the parts filled by preceding modules (eth stats, etc.) + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] xstats_names + * Buffer to insert names into. + * @param n + * Number of names. + * @param n_used + * Number of names filled by preceding statistics modules. + * + * @return + * Number of xstats names. + */ +int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int n, unsigned int n_used) +{ + unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); + unsigned int i; + + if (n >= n_used + n_txpp && xstats_names) { + for (i = 0; i < n_txpp; ++i) { + strncpy(xstats_names[i + n_used].name, + mlx5_txpp_stat_names[i], + RTE_ETH_XSTATS_NAME_SIZE); + xstats_names[i + n_used].name + [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + return n_used + n_txpp; +} + +static inline void +mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp, + struct mlx5_txpp_ts *tsa, uint16_t idx) +{ + do { + uint64_t ts, ci; + + ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED); + ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED); + rte_compiler_barrier(); + if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0) + continue; + if (__atomic_load_n(&txpp->tsa[idx].ts, + __ATOMIC_RELAXED) != ts) + continue; + if (__atomic_load_n(&txpp->tsa[idx].ci_ts, + __ATOMIC_RELAXED) != ci) + continue; + tsa->ts = ts; + tsa->ci_ts = ci; + return; + } while (true); +} + +/* + * Jitter reflects the clock change between + * neighbours Clock Queue completions. + */ +static uint64_t +mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp) +{ + struct mlx5_txpp_ts tsa0, tsa1; + int64_t dts, dci; + uint16_t ts_p; + + if (txpp->ts_n < 2) { + /* No gathered enough reports yet. */ + return 0; + } + do { + int ts_0, ts_1; + + ts_p = txpp->ts_p; + rte_compiler_barrier(); + ts_0 = ts_p - 2; + if (ts_0 < 0) + ts_0 += MLX5_TXPP_REARM_SQ_SIZE; + ts_1 = ts_p - 1; + if (ts_1 < 0) + ts_1 += MLX5_TXPP_REARM_SQ_SIZE; + mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); + mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); + rte_compiler_barrier(); + } while (ts_p != txpp->ts_p); + /* We have two neighbor reports, calculate the jitter. */ + dts = tsa1.ts - tsa0.ts; + dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - + (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); + if (dci < 0) + dci += 1 << MLX5_CQ_INDEX_WIDTH; + dci *= txpp->tick; + return (dts > dci) ? dts - dci : dci - dts; +} + +/* + * Wander reflects the long-term clock change + * over the entire length of all Clock Queue completions. + */ +static uint64_t +mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp) +{ + struct mlx5_txpp_ts tsa0, tsa1; + int64_t dts, dci; + uint16_t ts_p; + + if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) { + /* No gathered enough reports yet. */ + return 0; + } + do { + int ts_0, ts_1; + + ts_p = txpp->ts_p; + rte_compiler_barrier(); + ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1; + if (ts_0 < 0) + ts_0 += MLX5_TXPP_REARM_SQ_SIZE; + ts_1 = ts_p - 1; + if (ts_1 < 0) + ts_1 += MLX5_TXPP_REARM_SQ_SIZE; + mlx5_txpp_read_tsa(txpp, &tsa0, ts_0); + mlx5_txpp_read_tsa(txpp, &tsa1, ts_1); + rte_compiler_barrier(); + } while (ts_p != txpp->ts_p); + /* We have two neighbor reports, calculate the jitter. */ + dts = tsa1.ts - tsa0.ts; + dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - + (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); + dci += 1 << MLX5_CQ_INDEX_WIDTH; + dci *= txpp->tick; + return (dts > dci) ? dts - dci : dci - dts; +} + +/** + * Routine to retrieve extended device statistics + * for packet send scheduling. It appends the specific statistics + * after the parts filled by preceding modules (eth stats, etc.) + * + * @param dev + * Pointer to Ethernet device. + * @param[out] stats + * Pointer to rte extended stats table. + * @param n + * The size of the stats table. + * @param n_used + * Number of stats filled by preceding statistics modules. + * + * @return + * Number of extended stats on success and stats is filled, + * negative on error and rte_errno is set. + */ +int +mlx5_txpp_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, + unsigned int n, unsigned int n_used) +{ + unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names); + + if (n >= n_used + n_txpp && stats) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + unsigned int i; + + for (i = 0; i < n_txpp; ++i) + stats[n_used + i].id = n_used + i; + stats[n_used + 0].value = + __atomic_load_n(&sh->txpp.err_miss_int, + __ATOMIC_RELAXED); + stats[n_used + 1].value = + __atomic_load_n(&sh->txpp.err_rearm_queue, + __ATOMIC_RELAXED); + stats[n_used + 2].value = + __atomic_load_n(&sh->txpp.err_clock_queue, + __ATOMIC_RELAXED); + stats[n_used + 3].value = + __atomic_load_n(&sh->txpp.err_ts_past, + __ATOMIC_RELAXED); + stats[n_used + 4].value = + __atomic_load_n(&sh->txpp.err_ts_future, + __ATOMIC_RELAXED); + stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp); + stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp); + stats[n_used + 7].value = sh->txpp.sync_lost; + } + return n_used + n_txpp; +}