net/mlx5: remove redundant flag in device config
[dpdk.git] / drivers / net / mlx5 / mlx5_txpp.c
index cdb0079..2be7e71 100644 (file)
@@ -5,59 +5,55 @@
 #include <stdint.h>
 
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_interrupts.h>
 #include <rte_alarm.h>
 #include <rte_malloc.h>
 #include <rte_cycles.h>
+#include <rte_eal_paging.h>
+
+#include <mlx5_malloc.h>
+#include <mlx5_common_devx.h>
 
 #include "mlx5.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
 #include "mlx5_common_os.h"
 
+static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
+               "Wrong timestamp CQE part size");
+
 static const char * const mlx5_txpp_stat_names[] = {
-       "txpp_err_miss_int", /* Missed service interrupt. */
-       "txpp_err_rearm_queue", /* Rearm Queue errors. */
-       "txpp_err_clock_queue", /* Clock Queue errors. */
-       "txpp_err_ts_past", /* Timestamp in the past. */
-       "txpp_err_ts_future", /* Timestamp in the distant future. */
-       "txpp_jitter", /* Timestamp jitter (one Clock Queue completion). */
-       "txpp_wander", /* Timestamp jitter (half of Clock Queue completions). */
-       "txpp_sync_lost", /* Scheduling synchronization lost. */
+       "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
+       "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
+       "tx_pp_clock_queue_errors", /* Clock Queue errors. */
+       "tx_pp_timestamp_past_errors", /* Timestamp in the past. */
+       "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
+       "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
+       "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
+       "tx_pp_sync_lost", /* Scheduling synchronization lost. */
 };
 
 /* Destroy Event Queue Notification Channel. */
 static void
-mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
 {
        if (sh->txpp.echan) {
-               mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
+               mlx5_os_devx_destroy_event_channel(sh->txpp.echan);
                sh->txpp.echan = NULL;
        }
-       sh->txpp.eqn = 0;
 }
 
 /* Create Event Queue Notification Channel. */
 static int
-mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
 {
-       uint32_t lcore;
-
        MLX5_ASSERT(!sh->txpp.echan);
-       lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
-       if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
-               sh->txpp.eqn = 0;
-               return -rte_errno;
-       }
-       sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
+       sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
                        MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
        if (!sh->txpp.echan) {
-               sh->txpp.eqn = 0;
                rte_errno = errno;
-               DRV_LOG(ERR, "Failed to create event channel %d.",
-                       rte_errno);
+               DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
                return -rte_errno;
        }
        return 0;
@@ -66,11 +62,16 @@ mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
 static void
 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
 {
+#ifdef HAVE_MLX5DV_PP_ALLOC
        if (sh->txpp.pp) {
                mlx5_glue->dv_free_pp(sh->txpp.pp);
                sh->txpp.pp = NULL;
                sh->txpp.pp_id = 0;
        }
+#else
+       RTE_SET_USED(sh);
+       DRV_LOG(ERR, "Freeing pacing index is not supported.");
+#endif
 }
 
 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
@@ -110,13 +111,13 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
                rte_errno = errno;
                return -errno;
        }
-       if (!sh->txpp.pp->index) {
+       if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
                DRV_LOG(ERR, "Zero packet pacing index allocated.");
                mlx5_txpp_free_pp_index(sh);
                rte_errno = ENOTSUP;
                return -ENOTSUP;
        }
-       sh->txpp.pp_id = sh->txpp.pp->index;
+       sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
        return 0;
 #else
        RTE_SET_USED(sh);
@@ -129,18 +130,8 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
 static void
 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
 {
-       if (wq->sq)
-               claim_zero(mlx5_devx_cmd_destroy(wq->sq));
-       if (wq->sq_umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
-       if (wq->sq_buf)
-               rte_free((void *)(uintptr_t)wq->sq_buf);
-       if (wq->cq)
-               claim_zero(mlx5_devx_cmd_destroy(wq->cq));
-       if (wq->cq_umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
-       if (wq->cq_buf)
-               rte_free((void *)(uintptr_t)wq->cq_buf);
+       mlx5_devx_sq_destroy(&wq->sq_obj);
+       mlx5_devx_cq_destroy(&wq->cq_obj);
        memset(wq, 0, sizeof(*wq));
 }
 
@@ -159,7 +150,7 @@ mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
 
        mlx5_txpp_destroy_send_queue(wq);
        if (sh->txpp.tsa) {
-               rte_free(sh->txpp.tsa);
+               mlx5_free(sh->txpp.tsa);
                sh->txpp.tsa = NULL;
        }
 }
@@ -168,43 +159,33 @@ static void
 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
 {
        struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+       struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
        union {
                uint32_t w32[2];
                uint64_t w64;
        } cs;
+       void *reg_addr;
 
        wq->sq_ci = ci + 1;
        cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
-                  (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
-       cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];
+                       (wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
+       cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
        /* Update SQ doorbell record with new SQ ci. */
        rte_compiler_barrier();
-       *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);
+       *wq->sq_obj.db_rec = rte_cpu_to_be_32(wq->sq_ci);
        /* Make sure the doorbell record is updated. */
        rte_wmb();
        /* Write to doorbel register to start processing. */
-       __mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);
+       reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
+       __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
        rte_wmb();
 }
 
-static void
-mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
-{
-       struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-       struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
-       uint32_t i;
-
-       for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
-               cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
-               ++cqe;
-       }
-}
-
 static void
 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 {
        struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-       struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
+       struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
        uint32_t i;
 
        for (i = 0; i < wq->sq_size; i += 2) {
@@ -215,7 +196,7 @@ mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                /* Build SEND_EN request with slave WQE index. */
                cs = &wqe[i + 0].cseg;
                cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
-               cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+               cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
                cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
                                     MLX5_COMP_MODE_OFFSET);
                cs->misc = RTE_BE32(0);
@@ -223,11 +204,12 @@ mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
                        ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
                qs->max_index = rte_cpu_to_be_32(index);
-               qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
+               qs->qpn_cqn =
+                          rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id);
                /* Build WAIT request with slave CQE index. */
                cs = &wqe[i + 1].cseg;
                cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
-               cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+               cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
                cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
                                     MLX5_COMP_MODE_OFFSET);
                cs->misc = RTE_BE32(0);
@@ -235,7 +217,8 @@ mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
                        ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
                qs->max_index = rte_cpu_to_be_32(index);
-               qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
+               qs->qpn_cqn =
+                          rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
        }
 }
 
@@ -243,115 +226,53 @@ mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 static int
 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_devx_create_sq_attr sq_attr = { 0 };
+       struct mlx5_devx_create_sq_attr sq_attr = {
+               .cd_master = 1,
+               .state = MLX5_SQC_STATE_RST,
+               .tis_lst_sz = 1,
+               .tis_num = sh->tis->id,
+               .wq_attr = (struct mlx5_devx_wq_attr){
+                       .pd = sh->pdn,
+                       .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+               },
+               .ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
+       };
        struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
-       struct mlx5_devx_cq_attr cq_attr = { 0 };
+       struct mlx5_devx_cq_attr cq_attr = {
+               .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+       };
        struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
-       size_t page_size = sysconf(_SC_PAGESIZE);
-       uint32_t umem_size, umem_dbrec;
        int ret;
 
-       /* Allocate memory buffer for CQEs and doorbell record. */
-       umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
-       umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-       umem_size += MLX5_DBR_SIZE;
-       wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
-       if (!wq->cq_buf) {
-               DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
-               return -ENOMEM;
-       }
-       /* Register allocated buffer in user space with DevX. */
-       wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-                                              (void *)(uintptr_t)wq->cq_buf,
-                                              umem_size,
-                                              IBV_ACCESS_LOCAL_WRITE);
-       if (!wq->cq_umem) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
-               goto error;
-       }
        /* Create completion queue object for Rearm Queue. */
-       cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
-                           MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
-       cq_attr.uar_page_id = sh->tx_uar->page_id;
-       cq_attr.eqn = sh->txpp.eqn;
-       cq_attr.q_umem_valid = 1;
-       cq_attr.q_umem_offset = 0;
-       cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-       cq_attr.db_umem_valid = 1;
-       cq_attr.db_umem_offset = umem_dbrec;
-       cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
-       cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
-       cq_attr.log_page_size = rte_log2_u32(page_size);
-       wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
-       if (!wq->cq) {
-               rte_errno = errno;
+       ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+                                 log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
+                                 sh->numa_node);
+       if (ret) {
                DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
-               goto error;
+               return ret;
        }
-       wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
        wq->cq_ci = 0;
        wq->arm_sn = 0;
-       /* Mark all CQEs initially as invalid. */
-       mlx5_txpp_fill_cqe_rearm_queue(sh);
-       /*
-        * Allocate memory buffer for Send Queue WQEs.
-        * There should be no WQE leftovers in the cyclic queue.
-        */
        wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
        MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
-       umem_size =  MLX5_WQE_SIZE * wq->sq_size;
-       umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-       umem_size += MLX5_DBR_SIZE;
-       wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
-       if (!wq->sq_buf) {
-               DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       /* Register allocated buffer in user space with DevX. */
-       wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-                                              (void *)(uintptr_t)wq->sq_buf,
-                                              umem_size,
-                                              IBV_ACCESS_LOCAL_WRITE);
-       if (!wq->sq_umem) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
-               goto error;
-       }
        /* Create send queue object for Rearm Queue. */
-       sq_attr.state = MLX5_SQC_STATE_RST;
-       sq_attr.tis_lst_sz = 1;
-       sq_attr.tis_num = sh->tis->id;
-       sq_attr.cqn = wq->cq->id;
-       sq_attr.cd_master = 1;
-       sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
-       sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
-       sq_attr.wq_attr.pd = sh->pdn;
-       sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
-       sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
-       sq_attr.wq_attr.dbr_umem_valid = 1;
-       sq_attr.wq_attr.dbr_addr = umem_dbrec;
-       sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-       sq_attr.wq_attr.wq_umem_valid = 1;
-       sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
-       sq_attr.wq_attr.wq_umem_offset = 0;
-       wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
-       if (!wq->sq) {
+       sq_attr.cqn = wq->cq_obj.cq->id;
+       /* There should be no WQE leftovers in the cyclic queue. */
+       ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
+                                 log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
+                                 sh->numa_node);
+       if (ret) {
                rte_errno = errno;
                DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
                goto error;
        }
-       wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
-                                  MLX5_SND_DBR * sizeof(uint32_t));
        /* Build the WQEs in the Send Queue before goto Ready state. */
        mlx5_txpp_fill_wqe_rearm_queue(sh);
        /* Change queue state to ready. */
        msq_attr.sq_state = MLX5_SQC_STATE_RST;
        msq_attr.state = MLX5_SQC_STATE_RDY;
-       ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
+       ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
        if (ret) {
                DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
                goto error;
@@ -368,7 +289,7 @@ static void
 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
 {
        struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-       struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
+       struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
        struct mlx5_wqe_cseg *cs = &wqe->cseg;
        uint32_t wqe_size, opcode, i;
        uint8_t *dst;
@@ -386,7 +307,7 @@ mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
                opcode = MLX5_OPCODE_NOP;
        }
        cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
-       cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
+       cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) |
                                     (wqe_size / MLX5_WSEG_SIZE));
        cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
        cs->misc = RTE_BE32(0);
@@ -412,8 +333,8 @@ mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
                /* Build test packet L2 header (Ethernet). */
                dst = (uint8_t *)&es->inline_data;
                eth_hdr = (struct rte_ether_hdr *)dst;
-               rte_eth_random_addr(&eth_hdr->d_addr.addr_bytes[0]);
-               rte_eth_random_addr(&eth_hdr->s_addr.addr_bytes[0]);
+               rte_eth_random_addr(&eth_hdr->dst_addr.addr_bytes[0]);
+               rte_eth_random_addr(&eth_hdr->src_addr.addr_bytes[0]);
                eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
                /* Build test packet L3 header (IP v4). */
                dst += sizeof(struct rte_ether_hdr);
@@ -455,10 +376,11 @@ mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
        }
 wcopy:
        /* Duplicate the pattern to the next WQEs. */
-       dst = (uint8_t *)(uintptr_t)wq->sq_buf;
+       dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf;
        for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
                dst += wqe_size;
-               rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
+               rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf,
+                          wqe_size);
        }
 }
 
@@ -468,64 +390,32 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
 {
        struct mlx5_devx_create_sq_attr sq_attr = { 0 };
        struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
-       struct mlx5_devx_cq_attr cq_attr = { 0 };
+       struct mlx5_devx_cq_attr cq_attr = {
+               .use_first_only = 1,
+               .overrun_ignore = 1,
+               .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+       };
        struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-       size_t page_size = sysconf(_SC_PAGESIZE);
-       uint32_t umem_size, umem_dbrec;
        int ret;
 
-       sh->txpp.tsa = rte_zmalloc_socket(__func__,
-                                          MLX5_TXPP_REARM_SQ_SIZE *
-                                          sizeof(struct mlx5_txpp_ts),
-                                          0, sh->numa_node);
+       sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                  MLX5_TXPP_REARM_SQ_SIZE *
+                                  sizeof(struct mlx5_txpp_ts),
+                                  0, sh->numa_node);
        if (!sh->txpp.tsa) {
                DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
                return -ENOMEM;
        }
        sh->txpp.ts_p = 0;
        sh->txpp.ts_n = 0;
-       /* Allocate memory buffer for CQEs and doorbell record. */
-       umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
-       umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-       umem_size += MLX5_DBR_SIZE;
-       wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
-       if (!wq->cq_buf) {
-               DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
-               return -ENOMEM;
-       }
-       /* Register allocated buffer in user space with DevX. */
-       wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-                                              (void *)(uintptr_t)wq->cq_buf,
-                                              umem_size,
-                                              IBV_ACCESS_LOCAL_WRITE);
-       if (!wq->cq_umem) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
-               goto error;
-       }
        /* Create completion queue object for Clock Queue. */
-       cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
-                           MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
-       cq_attr.use_first_only = 1;
-       cq_attr.overrun_ignore = 1;
-       cq_attr.uar_page_id = sh->tx_uar->page_id;
-       cq_attr.eqn = sh->txpp.eqn;
-       cq_attr.q_umem_valid = 1;
-       cq_attr.q_umem_offset = 0;
-       cq_attr.q_umem_id = wq->cq_umem->umem_id;
-       cq_attr.db_umem_valid = 1;
-       cq_attr.db_umem_offset = umem_dbrec;
-       cq_attr.db_umem_id = wq->cq_umem->umem_id;
-       cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
-       cq_attr.log_page_size = rte_log2_u32(page_size);
-       wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
-       if (!wq->cq) {
-               rte_errno = errno;
+       ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+                                 log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
+                                 sh->numa_node);
+       if (ret) {
                DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
                goto error;
        }
-       wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
        wq->cq_ci = 0;
        /* Allocate memory buffer for Send Queue WQEs. */
        if (sh->txpp.test) {
@@ -540,26 +430,6 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        }
        /* There should not be WQE leftovers in the cyclic queue. */
        MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
-       umem_size =  MLX5_WQE_SIZE * wq->sq_size;
-       umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
-       umem_size += MLX5_DBR_SIZE;
-       wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
-       if (!wq->sq_buf) {
-               DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       /* Register allocated buffer in user space with DevX. */
-       wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
-                                              (void *)(uintptr_t)wq->sq_buf,
-                                              umem_size,
-                                              IBV_ACCESS_LOCAL_WRITE);
-       if (!wq->sq_umem) {
-               rte_errno = errno;
-               DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
-               goto error;
-       }
        /* Create send queue object for Clock Queue. */
        if (sh->txpp.test) {
                sq_attr.tis_lst_sz = 1;
@@ -570,37 +440,26 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
                sq_attr.non_wire = 1;
                sq_attr.static_sq_wq = 1;
        }
-       sq_attr.state = MLX5_SQC_STATE_RST;
-       sq_attr.cqn = wq->cq->id;
+       sq_attr.cqn = wq->cq_obj.cq->id;
        sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
        sq_attr.wq_attr.cd_slave = 1;
-       sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
-       sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+       sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
        sq_attr.wq_attr.pd = sh->pdn;
-       sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
-       sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
-       sq_attr.wq_attr.dbr_umem_valid = 1;
-       sq_attr.wq_attr.dbr_addr = umem_dbrec;
-       sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
-       sq_attr.wq_attr.wq_umem_valid = 1;
-       sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
-       /* umem_offset must be zero for static_sq_wq queue. */
-       sq_attr.wq_attr.wq_umem_offset = 0;
-       wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
-       if (!wq->sq) {
+       sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
+       ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
+                                 &sq_attr, sh->numa_node);
+       if (ret) {
                rte_errno = errno;
                DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
                goto error;
        }
-       wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
-                                  MLX5_SND_DBR * sizeof(uint32_t));
        /* Build the WQEs in the Send Queue before goto Ready state. */
        mlx5_txpp_fill_wqe_clock_queue(sh);
        /* Change queue state to ready. */
        msq_attr.sq_state = MLX5_SQC_STATE_RST;
        msq_attr.state = MLX5_SQC_STATE_RDY;
        wq->sq_ci = 0;
-       ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
+       ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
        if (ret) {
                DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
                goto error;
@@ -617,14 +476,18 @@ error:
 static inline void
 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
 {
+       void *base_addr;
+
        struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
        uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
        uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
-       uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
-       uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);
+       uint64_t db_be =
+               rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
+       base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
+       uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
 
        rte_compiler_barrier();
-       aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
+       aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
        rte_wmb();
 #ifdef RTE_ARCH_64
        *(uint64_t *)addr = db_be;
@@ -636,39 +499,67 @@ mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
        aq->arm_sn++;
 }
 
+#if defined(RTE_ARCH_X86_64)
+static inline int
+mlx5_atomic128_compare_exchange(rte_int128_t *dst,
+                               rte_int128_t *exp,
+                               const rte_int128_t *src)
+{
+       uint8_t res;
+
+       asm volatile (MPLOCKED
+                     "cmpxchg16b %[dst];"
+                     " sete %[res]"
+                     : [dst] "=m" (dst->val[0]),
+                       "=a" (exp->val[0]),
+                       "=d" (exp->val[1]),
+                       [res] "=r" (res)
+                     : "b" (src->val[0]),
+                       "c" (src->val[1]),
+                       "a" (exp->val[0]),
+                       "d" (exp->val[1]),
+                       "m" (dst->val[0])
+                     : "memory");
+
+       return res;
+}
+#endif
+
 static inline void
 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
 {
        /*
         * The only CQE of Clock Queue is being continuously
-        * update by hardware with soecified rate. We have to
-        * read timestump and WQE completion index atomically.
+        * updated by hardware with specified rate. We must
+        * read timestamp and WQE completion index atomically.
         */
-#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64)
+#if defined(RTE_ARCH_X86_64)
        rte_int128_t src;
 
        memset(&src, 0, sizeof(src));
        *ts = src;
        /* if (*from == *ts) *from = *src else *ts = *from; */
-       rte_atomic128_cmp_exchange(from, ts, &src, 0,
-                                  __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+       mlx5_atomic128_compare_exchange(from, ts, &src);
 #else
-       rte_atomic64_t *cqe = (rte_atomic64_t *)from;
+       uint64_t *cqe = (uint64_t *)from;
 
-       /* Power architecture does not support 16B compare-and-swap. */
+       /*
+        * Power architecture does not support 16B compare-and-swap.
+        * ARM implements it in software, code below is more relevant.
+        */
        for (;;) {
-               int64_t tm, op;
-               int64_t *ps;
+               uint64_t tm, op;
+               uint64_t *ps;
 
                rte_compiler_barrier();
-               tm = rte_atomic64_read(cqe + 0);
-               op = rte_atomic64_read(cqe + 1);
+               tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
+               op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
                rte_compiler_barrier();
-               if (tm != rte_atomic64_read(cqe + 0))
+               if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
                        continue;
-               if (op != rte_atomic64_read(cqe + 1))
+               if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
                        continue;
-               ps = (int64_t *)ts;
+               ps = (uint64_t *)ts;
                ps[0] = tm;
                ps[1] = op;
                return;
@@ -684,8 +575,8 @@ mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
        ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
        ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
        rte_compiler_barrier();
-       rte_atomic64_set(&sh->txpp.ts.ts, ts);
-       rte_atomic64_set(&sh->txpp.ts.ci_ts, ci);
+       __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
        rte_wmb();
 }
 
@@ -694,21 +585,29 @@ static inline void
 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
 {
        struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-       struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+       struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
        union {
                rte_int128_t u128;
                struct mlx5_cqe_ts cts;
        } to;
        uint64_t ts;
        uint16_t ci;
+       uint8_t opcode;
 
-       static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
-                     "Wrong timestamp CQE part size");
        mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
-       if (to.cts.op_own >> 4) {
-               DRV_LOG(DEBUG, "Clock Queue error sync lost.");
-               rte_atomic32_inc(&sh->txpp.err_clock_queue);
-               sh->txpp.sync_lost = 1;
+       opcode = MLX5_CQE_OPCODE(to.cts.op_own);
+       if (opcode) {
+               if (opcode != MLX5_CQE_INVALID) {
+                       /*
+                        * Commit the error state if and only if
+                        * we have got at least one actual completion.
+                        */
+                       DRV_LOG(DEBUG,
+                               "Clock Queue error sync lost (%X).", opcode);
+                               __atomic_fetch_add(&sh->txpp.err_clock_queue,
+                                  1, __ATOMIC_RELAXED);
+                       sh->txpp.sync_lost = 1;
+               }
                return;
        }
        ci = rte_be_to_cpu_16(to.cts.wqe_counter);
@@ -729,15 +628,11 @@ mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
        sh->txpp.ts_p = 0;
        sh->txpp.ts_n = 0;
        for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
-               struct timespec onems;
-
                mlx5_txpp_update_timestamp(sh);
                if (wq->sq_ci)
                        return;
                /* Wait one millisecond and try again. */
-               onems.tv_sec = 0;
-               onems.tv_nsec = NS_PER_S / MS_PER_S;
-               nanosleep(&onems, 0);
+               rte_delay_us_sleep(US_PER_S / MS_PER_S);
        }
        DRV_LOG(ERR, "Unable to initialize timestamp.");
        sh->txpp.sync_lost = 1;
@@ -752,7 +647,10 @@ mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
        if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
                return;
        MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
-       sh->txpp.tsa[sh->txpp.ts_p] = sh->txpp.ts;
+       __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+                        sh->txpp.ts.ts, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+                        sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
        if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
                sh->txpp.ts_p = 0;
        if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -771,7 +669,7 @@ mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        do {
                volatile struct mlx5_cqe *cqe;
 
-               cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
+               cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
                ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
                switch (ret) {
                case MLX5_CQE_STATUS_ERR:
@@ -793,7 +691,8 @@ mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                /* Check whether we have missed interrupts. */
                if (cq_ci - wq->cq_ci != 1) {
                        DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
-                       rte_atomic32_inc(&sh->txpp.err_miss_int);
+                       __atomic_fetch_add(&sh->txpp.err_miss_int,
+                                          1, __ATOMIC_RELAXED);
                        /* Check sync lost on wqe index. */
                        if (cq_ci - wq->cq_ci >=
                                (((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -802,13 +701,14 @@ mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                }
                /* Update doorbell record to notify hardware. */
                rte_compiler_barrier();
-               *wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);
+               *wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
                rte_wmb();
                wq->cq_ci = cq_ci;
                /* Fire new requests to Rearm Queue. */
                if (error) {
                        DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
-                       rte_atomic32_inc(&sh->txpp.err_rearm_queue);
+                       __atomic_fetch_add(&sh->txpp.err_rearm_queue,
+                                          1, __ATOMIC_RELAXED);
                        sh->txpp.sync_lost = 1;
                }
        }
@@ -868,24 +768,25 @@ static int
 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
 {
        uint16_t event_nums[1] = {0};
-       int flags;
        int ret;
+       int fd;
 
-       rte_atomic32_set(&sh->txpp.err_miss_int, 0);
-       rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
-       rte_atomic32_set(&sh->txpp.err_clock_queue, 0);
-       rte_atomic32_set(&sh->txpp.err_ts_past, 0);
-       rte_atomic32_set(&sh->txpp.err_ts_future, 0);
+       sh->txpp.err_miss_int = 0;
+       sh->txpp.err_rearm_queue = 0;
+       sh->txpp.err_clock_queue = 0;
+       sh->txpp.err_ts_past = 0;
+       sh->txpp.err_ts_future = 0;
        /* Attach interrupt handler to process Rearm Queue completions. */
-       flags = fcntl(sh->txpp.echan->fd, F_GETFL);
-       ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);
+       fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+       ret = mlx5_os_set_nonblock_channel_fd(fd);
        if (ret) {
                DRV_LOG(ERR, "Failed to change event channel FD.");
                rte_errno = errno;
                return -rte_errno;
        }
        memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
-       sh->txpp.intr_handle.fd = sh->txpp.echan->fd;
+       fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
+       sh->txpp.intr_handle.fd = fd;
        sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
        if (rte_intr_callback_register(&sh->txpp.intr_handle,
                                       mlx5_txpp_interrupt_handler, sh)) {
@@ -894,10 +795,9 @@ mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
                return -rte_errno;
        }
        /* Subscribe CQ event to the event channel controlled by the driver. */
-       ret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,
-                                                  sh->txpp.rearm_queue.cq->obj,
-                                                  sizeof(event_nums),
-                                                  event_nums, 0);
+       ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan,
+                                           sh->txpp.rearm_queue.cq_obj.cq->obj,
+                                            sizeof(event_nums), event_nums, 0);
        if (ret) {
                DRV_LOG(ERR, "Failed to subscribe CQE event.");
                rte_errno = errno;
@@ -932,7 +832,7 @@ mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
        sh->txpp.test = !!(tx_pp < 0);
        sh->txpp.skew = priv->config.tx_skew;
        sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
-       ret = mlx5_txpp_create_eqn(sh);
+       ret = mlx5_txpp_create_event_channel(sh);
        if (ret)
                goto exit;
        ret = mlx5_txpp_alloc_pp_index(sh);
@@ -953,7 +853,7 @@ exit:
                mlx5_txpp_destroy_rearm_queue(sh);
                mlx5_txpp_destroy_clock_queue(sh);
                mlx5_txpp_free_pp_index(sh);
-               mlx5_txpp_destroy_eqn(sh);
+               mlx5_txpp_destroy_event_channel(sh);
                sh->txpp.tick = 0;
                sh->txpp.test = 0;
                sh->txpp.skew = 0;
@@ -975,7 +875,7 @@ mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
        mlx5_txpp_destroy_rearm_queue(sh);
        mlx5_txpp_destroy_clock_queue(sh);
        mlx5_txpp_free_pp_index(sh);
-       mlx5_txpp_destroy_eqn(sh);
+       mlx5_txpp_destroy_event_channel(sh);
        sh->txpp.tick = 0;
        sh->txpp.test = 0;
        sh->txpp.skew = 0;
@@ -1099,7 +999,8 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
 
        if (sh->txpp.refcnt) {
                struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-               struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+               struct mlx5_cqe *cqe =
+                               (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
                union {
                        rte_int128_t u128;
                        struct mlx5_cqe_ts cts;
@@ -1109,7 +1010,8 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
                mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
                if (to.cts.op_own >> 4) {
                        DRV_LOG(DEBUG, "Clock Queue error sync lost.");
-                       rte_atomic32_inc(&sh->txpp.err_clock_queue);
+                       __atomic_fetch_add(&sh->txpp.err_clock_queue,
+                                          1, __ATOMIC_RELAXED);
                        sh->txpp.sync_lost = 1;
                        return -EIO;
                }
@@ -1140,11 +1042,11 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
 
-       rte_atomic32_set(&sh->txpp.err_miss_int, 0);
-       rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
-       rte_atomic32_set(&sh->txpp.err_clock_queue, 0);
-       rte_atomic32_set(&sh->txpp.err_ts_past, 0);
-       rte_atomic32_set(&sh->txpp.err_ts_future, 0);
+       __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
        return 0;
 }
 
@@ -1189,19 +1091,21 @@ mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
                   struct mlx5_txpp_ts *tsa, uint16_t idx)
 {
        do {
-               int64_t ts, ci;
+               uint64_t ts, ci;
 
-               ts = rte_atomic64_read(&txpp->tsa[idx].ts);
-               ci = rte_atomic64_read(&txpp->tsa[idx].ci_ts);
+               ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
+               ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
                rte_compiler_barrier();
                if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
                        continue;
-               if (rte_atomic64_read(&txpp->tsa[idx].ts) != ts)
+               if (__atomic_load_n(&txpp->tsa[idx].ts,
+                                   __ATOMIC_RELAXED) != ts)
                        continue;
-               if (rte_atomic64_read(&txpp->tsa[idx].ci_ts) != ci)
+               if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
+                                   __ATOMIC_RELAXED) != ci)
                        continue;
-               rte_atomic64_set(&tsa->ts, ts);
-               rte_atomic64_set(&tsa->ci_ts, ci);
+               tsa->ts = ts;
+               tsa->ci_ts = ci;
                return;
        } while (true);
 }
@@ -1237,9 +1141,9 @@ mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
                rte_compiler_barrier();
        } while (ts_p != txpp->ts_p);
        /* We have two neighbor reports, calculate the jitter. */
-       dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts);
-       dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) -
-             (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH));
+       dts = tsa1.ts - tsa0.ts;
+       dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
+             (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
        if (dci < 0)
                dci += 1 << MLX5_CQ_INDEX_WIDTH;
        dci *= txpp->tick;
@@ -1277,9 +1181,9 @@ mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
                rte_compiler_barrier();
        } while (ts_p != txpp->ts_p);
        /* We have two neighbor reports, calculate the jitter. */
-       dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts);
-       dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) -
-             (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH));
+       dts = tsa1.ts - tsa0.ts;
+       dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
+             (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
        dci += 1 << MLX5_CQ_INDEX_WIDTH;
        dci *= txpp->tick;
        return (dts > dci) ? dts - dci : dci - dts;
@@ -1318,15 +1222,20 @@ mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
                for (i = 0; i < n_txpp; ++i)
                        stats[n_used + i].id = n_used + i;
                stats[n_used + 0].value =
-                               rte_atomic32_read(&sh->txpp.err_miss_int);
+                               __atomic_load_n(&sh->txpp.err_miss_int,
+                                               __ATOMIC_RELAXED);
                stats[n_used + 1].value =
-                               rte_atomic32_read(&sh->txpp.err_rearm_queue);
+                               __atomic_load_n(&sh->txpp.err_rearm_queue,
+                                               __ATOMIC_RELAXED);
                stats[n_used + 2].value =
-                               rte_atomic32_read(&sh->txpp.err_clock_queue);
+                               __atomic_load_n(&sh->txpp.err_clock_queue,
+                                               __ATOMIC_RELAXED);
                stats[n_used + 3].value =
-                               rte_atomic32_read(&sh->txpp.err_ts_past);
+                               __atomic_load_n(&sh->txpp.err_ts_past,
+                                               __ATOMIC_RELAXED);
                stats[n_used + 4].value =
-                               rte_atomic32_read(&sh->txpp.err_ts_future);
+                               __atomic_load_n(&sh->txpp.err_ts_future,
+                                               __ATOMIC_RELAXED);
                stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
                stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
                stats[n_used + 7].value = sh->txpp.sync_lost;