#include <stdint.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
#include <rte_malloc.h>
#include <mlx5_common_devx.h>
#include "mlx5.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
#include "mlx5_common_os.h"
static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
{
MLX5_ASSERT(!sh->txpp.echan);
- sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
+ sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
if (!sh->txpp.echan) {
rte_errno = errno;
MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
sh->txpp.pp = mlx5_glue->dv_alloc_pp
- (sh->ctx, sizeof(pp), &pp,
+ (sh->cdev->ctx, sizeof(pp), &pp,
MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
if (sh->txpp.pp == NULL) {
DRV_LOG(ERR, "Failed to allocate packet pacing index.");
uint32_t w32[2];
uint64_t w64;
} cs;
- void *reg_addr;
wq->sq_ci = ci + 1;
cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
(wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
/* Update SQ doorbell record with new SQ ci. */
- rte_compiler_barrier();
- *wq->sq_obj.db_rec = rte_cpu_to_be_32(wq->sq_ci);
- /* Make sure the doorbell record is updated. */
- rte_wmb();
- /* Write to doorbel register to start processing. */
- reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
- __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
- rte_wmb();
+ mlx5_doorbell_ring(&sh->tx_uar.bf_db, cs.w64, wq->sq_ci,
+ wq->sq_obj.db_rec, !sh->tx_uar.dbnc);
}
static void
.cd_master = 1,
.state = MLX5_SQC_STATE_RST,
.tis_lst_sz = 1,
- .tis_num = sh->tis->id,
+ .tis_num = sh->tis[0]->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = sh->pdn,
- .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+ .pd = sh->cdev->pdn,
+ .uar_page =
+ mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
},
+ .ts_format = mlx5_ts_format_conv
+ (sh->cdev->config.hca_attr.sq_ts_format),
};
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
struct mlx5_devx_cq_attr cq_attr = {
- .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
};
struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
int ret;
/* Create completion queue object for Rearm Queue. */
- ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
sh->numa_node);
if (ret) {
/* Create send queue object for Rearm Queue. */
sq_attr.cqn = wq->cq_obj.cq->id;
/* There should be no WQE leftovers in the cyclic queue. */
- ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
+ ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
sh->numa_node);
if (ret) {
/* Build test packet L2 header (Ethernet). */
dst = (uint8_t *)&es->inline_data;
eth_hdr = (struct rte_ether_hdr *)dst;
- rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
- rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
+ rte_eth_random_addr(ð_hdr->dst_addr.addr_bytes[0]);
+ rte_eth_random_addr(ð_hdr->src_addr.addr_bytes[0]);
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
/* Build test packet L3 header (IP v4). */
dst += sizeof(struct rte_ether_hdr);
struct mlx5_devx_cq_attr cq_attr = {
.use_first_only = 1,
.overrun_ignore = 1,
- .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
};
struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
int ret;
sh->txpp.ts_p = 0;
sh->txpp.ts_n = 0;
/* Create completion queue object for Clock Queue. */
- ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
sh->numa_node);
if (ret) {
/* Create send queue object for Clock Queue. */
if (sh->txpp.test) {
sq_attr.tis_lst_sz = 1;
- sq_attr.tis_num = sh->tis->id;
+ sq_attr.tis_num = sh->tis[0]->id;
sq_attr.non_wire = 0;
sq_attr.static_sq_wq = 1;
} else {
sq_attr.cqn = wq->cq_obj.cq->id;
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
sq_attr.wq_attr.cd_slave = 1;
- sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- sq_attr.wq_attr.pd = sh->pdn;
- ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
+ sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj);
+ sq_attr.wq_attr.pd = sh->cdev->pdn;
+ sq_attr.ts_format =
+ mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
+ ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
+ log2above(wq->sq_size),
&sq_attr, sh->numa_node);
if (ret) {
rte_errno = errno;
static inline void
mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
{
- void *base_addr;
-
struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
uint64_t db_be =
rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
- base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
- uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
- rte_compiler_barrier();
- aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
- rte_wmb();
-#ifdef RTE_ARCH_64
- *(uint64_t *)addr = db_be;
-#else
- *(uint32_t *)addr = db_be;
- rte_io_wmb();
- *((uint32_t *)addr + 1) = db_be >> 32;
-#endif
+ mlx5_doorbell_ring(&sh->tx_uar.cq_db, db_be, db_hi,
+ &aq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
aq->arm_sn++;
}
{
/*
* The only CQE of Clock Queue is being continuously
- * update by hardware with soecified rate. We have to
- * read timestump and WQE completion index atomically.
+ * updated by hardware with specified rate. We must
+ * read timestamp and WQE completion index atomically.
*/
#if defined(RTE_ARCH_X86_64)
rte_int128_t src;
} to;
uint64_t ts;
uint16_t ci;
+ uint8_t opcode;
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
- if (to.cts.op_own >> 4) {
- DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
+ opcode = MLX5_CQE_OPCODE(to.cts.op_own);
+ if (opcode) {
+ if (opcode != MLX5_CQE_INVALID) {
+ /*
+ * Commit the error state if and only if
+ * we have got at least one actual completion.
+ */
+ DRV_LOG(DEBUG,
+ "Clock Queue error sync lost (%X).", opcode);
+ __atomic_fetch_add(&sh->txpp.err_clock_queue,
1, __ATOMIC_RELAXED);
- sh->txpp.sync_lost = 1;
+ sh->txpp.sync_lost = 1;
+ }
return;
}
ci = rte_be_to_cpu_16(to.cts.wqe_counter);
static void
mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
{
- if (!sh->txpp.intr_handle.fd)
+ if (!rte_intr_fd_get(sh->txpp.intr_handle))
return;
- mlx5_intr_callback_unregister(&sh->txpp.intr_handle,
+ mlx5_intr_callback_unregister(sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh);
- sh->txpp.intr_handle.fd = 0;
+ rte_intr_instance_free(sh->txpp.intr_handle);
}
/* Attach interrupt handler and fires first request to Rearm Queue. */
rte_errno = errno;
return -rte_errno;
}
- memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
+ sh->txpp.intr_handle =
+ rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ if (sh->txpp.intr_handle == NULL) {
+ DRV_LOG(ERR, "Fail to allocate intr_handle");
+ return -ENOMEM;
+ }
fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
- sh->txpp.intr_handle.fd = fd;
- sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
- if (rte_intr_callback_register(&sh->txpp.intr_handle,
+ if (rte_intr_fd_set(sh->txpp.intr_handle, fd))
+ return -rte_errno;
+
+ if (rte_intr_type_set(sh->txpp.intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
+ if (rte_intr_callback_register(sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh)) {
- sh->txpp.intr_handle.fd = 0;
+ rte_intr_fd_set(sh->txpp.intr_handle, 0);
DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
return -rte_errno;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
int err = 0;
- int ret;
if (!priv->config.tx_pp) {
/* Packet pacing is not requested for the device. */
return 0;
}
if (priv->config.tx_pp > 0) {
- ret = rte_mbuf_dynflag_lookup
- (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
- if (ret < 0)
+ err = rte_mbuf_dynflag_lookup
+ (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ /* No flag registered means no service needed. */
+ if (err < 0)
return 0;
+ err = 0;
}
- ret = pthread_mutex_lock(&sh->txpp.mutex);
- MLX5_ASSERT(!ret);
- RTE_SET_USED(ret);
+ claim_zero(pthread_mutex_lock(&sh->txpp.mutex));
if (sh->txpp.refcnt) {
priv->txpp_en = 1;
++sh->txpp.refcnt;
rte_errno = -err;
}
}
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
- MLX5_ASSERT(!ret);
- RTE_SET_USED(ret);
+ claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
return err;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- int ret;
if (!priv->txpp_en) {
/* Packet pacing is already disabled for the device. */
return;
}
priv->txpp_en = 0;
- ret = pthread_mutex_lock(&sh->txpp.mutex);
- MLX5_ASSERT(!ret);
- RTE_SET_USED(ret);
+ claim_zero(pthread_mutex_lock(&sh->txpp.mutex));
MLX5_ASSERT(sh->txpp.refcnt);
- if (!sh->txpp.refcnt || --sh->txpp.refcnt)
+ if (!sh->txpp.refcnt || --sh->txpp.refcnt) {
+ claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
return;
+ }
/* No references any more, do actual destroy. */
mlx5_txpp_destroy(sh);
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
- MLX5_ASSERT(!ret);
- RTE_SET_USED(ret);
+ claim_zero(pthread_mutex_unlock(&sh->txpp.mutex));
}
/*