#include "mlx5.h"
#include "mlx5_flow.h"
-/**
- * Destroy Completion Queue used for ASO access.
- *
- * @param[in] cq
- * ASO CQ to destroy.
- */
-static void
-mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
-{
- if (cq->cq_obj.cq)
- mlx5_devx_cq_destroy(&cq->cq_obj);
- memset(cq, 0, sizeof(*cq));
-}
-
-/**
- * Create Completion Queue used for ASO access.
- *
- * @param[in] ctx
- * Context returned from mlx5 open_device() glue function.
- * @param[in/out] cq
- * Pointer to CQ to create.
- * @param[in] log_desc_n
- * Log of number of descriptors in queue.
- * @param[in] socket
- * Socket to use for allocation.
- * @param[in] uar_page_id
- * UAR page ID to use.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
- int socket, int uar_page_id)
-{
- struct mlx5_devx_cq_attr attr = {
- .uar_page_id = uar_page_id,
- };
-
- cq->log_desc_n = log_desc_n;
- cq->cq_ci = 0;
- return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
-}
-
/**
* Free MR resources.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] mr
* MR to free.
*/
static void
-mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
+mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
{
void *addr = mr->addr;
- sh->share_cache.dereg_mr_cb(mr);
+ cdev->mr_scache.dereg_mr_cb(mr);
mlx5_free(addr);
memset(mr, 0, sizeof(*mr));
}
/**
* Register Memory Region.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] length
* Size of MR buffer.
* @param[in/out] mr
* Pointer to MR to create.
- * @param[in] socket
- * Socket to use for allocation.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
- struct mlx5_pmd_mr *mr, int socket)
+mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
+ struct mlx5_pmd_mr *mr)
{
-
int ret;
mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
- socket);
+ SOCKET_ID_ANY);
if (!mr->addr) {
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
return -1;
}
- ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+ ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
if (ret) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
mlx5_free(mr->addr);
mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
{
mlx5_devx_sq_destroy(&sq->sq_obj);
- mlx5_aso_cq_destroy(&sq->cq);
+ mlx5_devx_cq_destroy(&sq->cq.cq_obj);
memset(sq, 0, sizeof(*sq));
}
/**
* Create Send Queue used for ASO access.
*
- * @param[in] ctx
- * Context returned from mlx5 open_device() glue function.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in/out] sq
* Pointer to SQ to create.
- * @param[in] socket
- * Socket to use for allocation.
* @param[in] uar
* User Access Region object.
- * @param[in] pdn
- * Protection Domain number to use.
- * @param[in] log_desc_n
- * Log of number of descriptors in queue.
- * @param[in] ts_format
- * timestamp format supported by the queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, void *uar,
- uint32_t pdn, uint16_t log_desc_n, uint32_t ts_format)
+mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
+ void *uar)
{
- struct mlx5_devx_create_sq_attr attr = {
+ struct mlx5_devx_cq_attr cq_attr = {
+ .uar_page_id = mlx5_os_get_devx_uar_page_id(uar),
+ };
+ struct mlx5_devx_create_sq_attr sq_attr = {
.user_index = 0xFFFF,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = pdn,
+ .pd = cdev->pdn,
.uar_page = mlx5_os_get_devx_uar_page_id(uar),
},
- .ts_format = mlx5_ts_format_conv(ts_format),
+ .ts_format =
+ mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
};
struct mlx5_devx_modify_sq_attr modify_attr = {
.state = MLX5_SQC_STATE_RDY,
uint16_t log_wqbb_n;
int ret;
- if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
- mlx5_os_get_devx_uar_page_id(uar)))
+ if (mlx5_devx_cq_create(cdev->ctx, &sq->cq.cq_obj,
+ MLX5_ASO_QUEUE_LOG_DESC, &cq_attr,
+ SOCKET_ID_ANY))
goto error;
- sq->log_desc_n = log_desc_n;
- attr.cqn = sq->cq.cq_obj.cq->id;
+ sq->cq.cq_ci = 0;
+ sq->cq.log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
+ sq->log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
+ sq_attr.cqn = sq->cq.cq_obj.cq->id;
/* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
- log_wqbb_n = log_desc_n + 1;
- ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
+ log_wqbb_n = sq->log_desc_n + 1;
+ ret = mlx5_devx_sq_create(cdev->ctx, &sq->sq_obj, log_wqbb_n, &sq_attr,
+ SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Can't create SQ object.");
rte_errno = ENOMEM;
sq->head = 0;
sq->tail = 0;
sq->sqn = sq->sq_obj.sq->id;
- sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
rte_spinlock_init(&sq->sqsl);
return 0;
error:
enum mlx5_access_aso_opc_mod aso_opc_mod)
{
uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+ struct mlx5_common_device *cdev = sh->cdev;
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
- sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
+ if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+ sq_desc_n, &sh->aso_age_mng->aso_sq.mr))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ if (mlx5_aso_sq_create(cdev, &sh->aso_age_mng->aso_sq,
+ sh->tx_uar.obj)) {
+ mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
break;
case ASO_OPC_MOD_POLICER:
- if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format))
+ if (mlx5_aso_sq_create(cdev, &sh->mtrmng->pools_mng.sq,
+ sh->tx_uar.obj))
return -1;
mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
/* 64B per object for query. */
- if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
- &sh->ct_mng->aso_sq.mr, 0))
+ if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
+ &sh->ct_mng->aso_sq.mr))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ if (mlx5_aso_sq_create(cdev, &sh->ct_mng->aso_sq,
+ sh->tx_uar.obj)) {
+ mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
return -1;
}
mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
sq = &sh->aso_age_mng->aso_sq;
break;
case ASO_OPC_MOD_POLICER:
sq = &sh->mtrmng->pools_mng.sq;
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
sq = &sh->ct_mng->aso_sq;
break;
default:
/**
* Write a burst of WQEs to ASO SQ.
*
- * @param[in] mng
- * ASO management data, contains the SQ.
+ * @param[in] sh
+ * Pointer to shared device context.
* @param[in] n
* Index of the last valid pool.
*
* Number of WQEs in burst.
*/
static uint16_t
-mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
+mlx5_aso_sq_enqueue_burst(struct mlx5_dev_ctx_shared *sh, uint16_t n)
{
+ struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
volatile struct mlx5_aso_wqe *wqe;
struct mlx5_aso_sq *sq = &mng->aso_sq;
struct mlx5_aso_age_pool *pool;
wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
/* Fill next WQE. */
- rte_spinlock_lock(&mng->resize_sl);
+ rte_rwlock_read_lock(&mng->resize_rwl);
pool = mng->pools[sq->next];
- rte_spinlock_unlock(&mng->resize_sl);
+ rte_rwlock_read_unlock(&mng->resize_rwl);
sq->elts[sq->head & mask].pool = pool;
wqe->general_cseg.misc =
rte_cpu_to_be_32(((struct mlx5_devx_obj *)
} while (max);
wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
- rte_io_wmb();
- sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
- rte_wmb();
- *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
- rte_wmb();
+ mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
+ sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
+ !sh->tx_uar.dbnc);
return sq->elts[start_head & mask].burst_size;
}
uint32_t us = 100u;
uint16_t n;
- rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
+ rte_rwlock_read_lock(&sh->aso_age_mng->resize_rwl);
n = sh->aso_age_mng->next;
- rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
+ rte_rwlock_read_unlock(&sh->aso_age_mng->resize_rwl);
mlx5_aso_completion_handle(sh);
if (sq->next == n) {
/* End of loop: wait 1 second. */
us = US_PER_S;
sq->next = 0;
}
- mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
+ mlx5_aso_sq_enqueue_burst(sh, n);
if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
}
}
static uint16_t
-mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
- struct mlx5_aso_mtr *aso_mtr)
+mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_sq *sq,
+ struct mlx5_aso_mtr *aso_mtr)
{
volatile struct mlx5_aso_wqe *wqe = NULL;
struct mlx5_flow_meter_info *fm = NULL;
uint16_t res;
uint32_t dseg_idx = 0;
struct mlx5_aso_mtr_pool *pool = NULL;
+ uint32_t param_le;
rte_spinlock_lock(&sq->sqsl);
res = size - (uint16_t)(sq->head - sq->tail);
wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
}
fmp = fm->profile;
- if (fmp->profile.packet_mode)
- wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
- RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
- (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
- (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
+ param_le = (1 << ASO_DSEG_VALID_OFFSET);
+ if (fm->color_aware)
+ param_le |= (MLX5_FLOW_COLOR_UNDEFINED << ASO_DSEG_SC_OFFSET);
else
- wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
- RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
- (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
+ param_le |= (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET);
+ if (fmp->profile.packet_mode)
+ param_le |= (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE);
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm = RTE_BE32(param_le);
+ switch (fmp->profile.alg) {
+ case RTE_MTR_SRTCM_RFC2697:
+ /* Only needed for RFC2697. */
+ if (fm->profile->srtcm_prm.ebs_eir)
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
+ RTE_BE32(1 << ASO_DSEG_BO_OFFSET);
+ break;
+ case RTE_MTR_TRTCM_RFC2698:
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
+ RTE_BE32(1 << ASO_DSEG_BBOG_OFFSET);
+ break;
+ case RTE_MTR_TRTCM_RFC4115:
+ wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
+ RTE_BE32(1 << ASO_DSEG_BO_OFFSET);
+ break;
+ default:
+ break;
+ }
+ /*
+ * Note:
+ * Due to software performance reason, the token fields will not be
+ * set when posting the WQE to ASO SQ. It will be filled by the HW
+ * automatically.
+ */
sq->head++;
sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
- rte_io_wmb();
- sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
- rte_wmb();
- *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
- rte_wmb();
+ mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
+ sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
+ !sh->tx_uar.dbnc);
rte_spinlock_unlock(&sq->sqsl);
return 1;
}
do {
mlx5_aso_mtr_completion_handle(sq);
- if (mlx5_aso_mtr_sq_enqueue_single(sq, mtr))
+ if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr))
return 0;
/* Waiting for wqe resource. */
rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
/*
* Post a WQE to the ASO CT SQ to modify the context.
*
- * @param[in] mng
- * Pointer to the CT pools management structure.
+ * @param[in] sh
+ * Pointer to shared device context.
* @param[in] ct
* Pointer to the generic CT structure related to the context.
* @param[in] profile
* 1 on success (WQE number), 0 on failure.
*/
static uint16_t
-mlx5_aso_ct_sq_enqueue_single(struct mlx5_aso_ct_pools_mng *mng,
+mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct,
const struct rte_flow_action_conntrack *profile)
{
volatile struct mlx5_aso_wqe *wqe = NULL;
- struct mlx5_aso_sq *sq = &mng->aso_sq;
+ struct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;
uint16_t size = 1 << sq->log_desc_n;
uint16_t mask = size - 1;
uint16_t res;
/* Fill next WQE. */
MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);
sq->elts[sq->head & mask].ct = ct;
+ sq->elts[sq->head & mask].query_data = NULL;
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
/* Each WQE will have a single CT object. */
wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
profile->reply_dir.max_ack);
sq->head++;
sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
- rte_io_wmb();
- sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
- rte_wmb();
- *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
- rte_wmb();
+ mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
+ sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
+ !sh->tx_uar.dbnc);
rte_spinlock_unlock(&sq->sqsl);
return 1;
}
ct = sq->elts[idx].ct;
MLX5_ASSERT(ct);
MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_READY);
+ if (sq->elts[idx].query_data)
+ rte_memcpy(sq->elts[idx].query_data,
+ (char *)((uintptr_t)sq->mr.addr + idx * 64),
+ 64);
+ }
+}
+
+/*
+ * Post a WQE to the ASO CT SQ to query the current context.
+ *
+ * @param[in] sh
+ * Pointer to shared device context.
+ * @param[in] ct
+ * Pointer to the generic CT structure related to the context.
+ * @param[in] data
+ * Pointer to data area to be filled.
+ *
+ * @return
+ * 1 on success (WQE number), 0 on failure.
+ */
+static int
+mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct, char *data)
+{
+ volatile struct mlx5_aso_wqe *wqe = NULL;
+ struct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;
+ uint16_t size = 1 << sq->log_desc_n;
+ uint16_t mask = size - 1;
+ uint16_t res;
+ uint16_t wqe_idx;
+ struct mlx5_aso_ct_pool *pool;
+ enum mlx5_aso_ct_state state =
+ __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+ if (state == ASO_CONNTRACK_FREE) {
+ DRV_LOG(ERR, "Fail: No context to query");
+ return -1;
+ } else if (state == ASO_CONNTRACK_WAIT) {
+ return 0;
}
+ rte_spinlock_lock(&sq->sqsl);
+ res = size - (uint16_t)(sq->head - sq->tail);
+ if (unlikely(!res)) {
+ rte_spinlock_unlock(&sq->sqsl);
+ DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
+ return 0;
+ }
+ MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);
+ wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
+ /* Confirm the location and address of the prefetch instruction. */
+ rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
+ /* Fill next WQE. */
+ wqe_idx = sq->head & mask;
+ sq->elts[wqe_idx].ct = ct;
+ sq->elts[wqe_idx].query_data = data;
+ pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ /* Each WQE will have a single CT object. */
+ wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
+ ct->offset);
+ wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
+ (ASO_OPC_MOD_CONNECTION_TRACKING <<
+ WQE_CSEG_OPC_MOD_OFFSET) |
+ sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
+ /*
+ * There is no write request is required.
+ * ASO_OPER_LOGICAL_AND and ASO_OP_ALWAYS_FALSE are both 0.
+ * "BYTEWISE_64BYTE" is needed for a whole context.
+ * Set to 0 directly to reduce an endian swap. (Modify should rewrite.)
+ * "data_mask" is ignored.
+ * Buffer address was already filled during initialization.
+ */
+ wqe->aso_cseg.operand_masks = rte_cpu_to_be_32(BYTEWISE_64BYTE <<
+ ASO_CSEG_DATA_MASK_MODE_OFFSET);
+ wqe->aso_cseg.data_mask = 0;
+ sq->head++;
+ /*
+ * Each WQE contains 2 WQEBB's, even though
+ * data segment is not used in this case.
+ */
+ sq->pi += 2;
+ mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
+ sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
+ !sh->tx_uar.dbnc);
+ rte_spinlock_unlock(&sq->sqsl);
+ return 1;
}
/*
struct mlx5_aso_ct_action *ct,
const struct rte_flow_action_conntrack *profile)
{
- struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
struct mlx5_aso_ct_pool *pool;
MLX5_ASSERT(ct);
do {
- mlx5_aso_ct_completion_handle(mng);
- if (mlx5_aso_ct_sq_enqueue_single(mng, ct, profile))
+ mlx5_aso_ct_completion_handle(sh->ct_mng);
+ if (mlx5_aso_ct_sq_enqueue_single(sh, ct, profile))
return 0;
/* Waiting for wqe resource. */
rte_delay_us_sleep(10u);
ct->offset, pool->index);
return -1;
}
+
+/*
+ * The routine is used to wait for WQE completion to continue with queried data.
+ *
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ * @param[in] ct
+ * Pointer to connection tracking offload object.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct)
+{
+ struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
+ uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
+ struct mlx5_aso_ct_pool *pool;
+
+ if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ ASO_CONNTRACK_READY)
+ return 0;
+ do {
+ mlx5_aso_ct_completion_handle(mng);
+ if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ ASO_CONNTRACK_READY)
+ return 0;
+ /* Waiting for CQE ready, consider should block or sleep. */
+ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
+ } while (--poll_cqe_times);
+ pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ DRV_LOG(ERR, "Fail to poll CQE for ASO CT %d in pool %d",
+ ct->offset, pool->index);
+ return -1;
+}
+
+/*
+ * Convert the hardware conntrack data format into the profile.
+ *
+ * @param[in] profile
+ * Pointer to conntrack profile to be filled after query.
+ * @param[in] wdata
+ * Pointer to data fetched from hardware.
+ */
+static inline void
+mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,
+ char *wdata)
+{
+ void *o_dir = MLX5_ADDR_OF(conn_track_aso, wdata, original_dir);
+ void *r_dir = MLX5_ADDR_OF(conn_track_aso, wdata, reply_dir);
+
+ /* MLX5_GET16 should be taken into consideration. */
+ profile->state = (enum rte_flow_conntrack_state)
+ MLX5_GET(conn_track_aso, wdata, state);
+ profile->enable = !MLX5_GET(conn_track_aso, wdata, freeze_track);
+ profile->selective_ack = MLX5_GET(conn_track_aso, wdata,
+ sack_permitted);
+ profile->live_connection = MLX5_GET(conn_track_aso, wdata,
+ connection_assured);
+ profile->challenge_ack_passed = MLX5_GET(conn_track_aso, wdata,
+ challenged_acked);
+ profile->max_ack_window = MLX5_GET(conn_track_aso, wdata,
+ max_ack_window);
+ profile->retransmission_limit = MLX5_GET(conn_track_aso, wdata,
+ retranmission_limit);
+ profile->last_window = MLX5_GET(conn_track_aso, wdata, last_win);
+ profile->last_direction = MLX5_GET(conn_track_aso, wdata, last_dir);
+ profile->last_index = (enum rte_flow_conntrack_tcp_last_index)
+ MLX5_GET(conn_track_aso, wdata, last_index);
+ profile->last_seq = MLX5_GET(conn_track_aso, wdata, last_seq);
+ profile->last_ack = MLX5_GET(conn_track_aso, wdata, last_ack);
+ profile->last_end = MLX5_GET(conn_track_aso, wdata, last_end);
+ profile->liberal_mode = MLX5_GET(conn_track_aso, wdata,
+ reply_direction_tcp_liberal_enabled) |
+ MLX5_GET(conn_track_aso, wdata,
+ original_direction_tcp_liberal_enabled);
+ /* No liberal in the RTE structure profile. */
+ profile->reply_dir.scale = MLX5_GET(conn_track_aso, wdata,
+ reply_direction_tcp_scale);
+ profile->reply_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
+ reply_direction_tcp_close_initiated);
+ profile->reply_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
+ reply_direction_tcp_data_unacked);
+ profile->reply_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
+ reply_direction_tcp_max_ack);
+ profile->reply_dir.sent_end = MLX5_GET(tcp_window_params,
+ r_dir, sent_end);
+ profile->reply_dir.reply_end = MLX5_GET(tcp_window_params,
+ r_dir, reply_end);
+ profile->reply_dir.max_win = MLX5_GET(tcp_window_params,
+ r_dir, max_win);
+ profile->reply_dir.max_ack = MLX5_GET(tcp_window_params,
+ r_dir, max_ack);
+ profile->original_dir.scale = MLX5_GET(conn_track_aso, wdata,
+ original_direction_tcp_scale);
+ profile->original_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
+ original_direction_tcp_close_initiated);
+ profile->original_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
+ original_direction_tcp_data_unacked);
+ profile->original_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
+ original_direction_tcp_max_ack);
+ profile->original_dir.sent_end = MLX5_GET(tcp_window_params,
+ o_dir, sent_end);
+ profile->original_dir.reply_end = MLX5_GET(tcp_window_params,
+ o_dir, reply_end);
+ profile->original_dir.max_win = MLX5_GET(tcp_window_params,
+ o_dir, max_win);
+ profile->original_dir.max_ack = MLX5_GET(tcp_window_params,
+ o_dir, max_ack);
+}
+
+/*
+ * Query connection tracking information parameter by send WQE.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] ct
+ * Pointer to connection tracking offload object.
+ * @param[out] profile
+ * Pointer to connection tracking TCP information.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct,
+ struct rte_flow_action_conntrack *profile)
+{
+ uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
+ struct mlx5_aso_ct_pool *pool;
+ char out_data[64 * 2];
+ int ret;
+
+ MLX5_ASSERT(ct);
+ do {
+ mlx5_aso_ct_completion_handle(sh->ct_mng);
+ ret = mlx5_aso_ct_sq_query_single(sh, ct, out_data);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ goto data_handle;
+ /* Waiting for wqe resource or state. */
+ else
+ rte_delay_us_sleep(10u);
+ } while (--poll_wqe_times);
+ pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
+ DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
+ ct->offset, pool->index);
+ return -1;
+data_handle:
+ ret = mlx5_aso_ct_wait_ready(sh, ct);
+ if (!ret)
+ mlx5_aso_ct_obj_analyze(profile, out_data);
+ return ret;
+}
+
+/*
+ * Make sure the conntrack context is synchronized with hardware before
+ * creating a flow rule that uses it.
+ *
+ * @param[in] sh
+ * Pointer to shared device context.
+ * @param[in] ct
+ * Pointer to connection tracking offload object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct)
+{
+ struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
+ uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
+ enum mlx5_aso_ct_state state =
+ __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+ if (state == ASO_CONNTRACK_FREE) {
+ rte_errno = ENXIO;
+ return -rte_errno;
+ } else if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY) {
+ return 0;
+ }
+ do {
+ mlx5_aso_ct_completion_handle(mng);
+ state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY)
+ return 0;
+ /* Waiting for CQE ready, consider should block or sleep. */
+ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
+ } while (--poll_cqe_times);
+ rte_errno = EBUSY;
+ return -rte_errno;
+}