git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/mlx5: prepare MR prototypes for DevX
[dpdk.git]
/
drivers
/
net
/
mlx5
/
mlx5_flow_age.c
diff --git
a/drivers/net/mlx5/mlx5_flow_age.c
b/drivers/net/mlx5/mlx5_flow_age.c
index
caa1931
..
cea2cf7
100644
(file)
--- a/
drivers/net/mlx5/mlx5_flow_age.c
+++ b/
drivers/net/mlx5/mlx5_flow_age.c
@@
-196,8
+196,6
@@
mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
}
if (sq->cq.cq)
mlx5_aso_cq_destroy(&sq->cq);
}
if (sq->cq.cq)
mlx5_aso_cq_destroy(&sq->cq);
- if (sq->uar_obj)
- mlx5_glue->devx_free_uar(sq->uar_obj);
mlx5_aso_devx_dereg_mr(&sq->mr);
memset(sq, 0, sizeof(*sq));
}
mlx5_aso_devx_dereg_mr(&sq->mr);
memset(sq, 0, sizeof(*sq));
}
@@
-244,6
+242,8
@@
mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
* Pointer to SQ to create.
* @param[in] socket
* Socket to use for allocation.
* Pointer to SQ to create.
* @param[in] socket
* Socket to use for allocation.
+ * @param[in] uar
+ * User Access Region object.
* @param[in] pdn
* Protection Domain number to use.
* @param[in] eqn
* @param[in] pdn
* Protection Domain number to use.
* @param[in] eqn
@@
-256,7
+256,8
@@
mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
*/
static int
mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
*/
static int
mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
- uint32_t pdn, uint32_t eqn, uint16_t log_desc_n)
+ struct mlx5dv_devx_uar *uar, uint32_t pdn,
+ uint32_t eqn, uint16_t log_desc_n)
{
struct mlx5_devx_create_sq_attr attr = { 0 };
struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
{
struct mlx5_devx_create_sq_attr attr = { 0 };
struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
@@
-269,11
+270,8
@@
mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
sq_desc_n, &sq->mr, socket, pdn))
return -1;
if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
sq_desc_n, &sq->mr, socket, pdn))
return -1;
- sq->uar_obj = mlx5_glue->devx_alloc_uar(ctx, 0);
- if (!sq->uar_obj)
- goto error;
if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
- mlx5_os_get_devx_uar_page_id(
sq->uar_obj
), eqn))
+ mlx5_os_get_devx_uar_page_id(
uar
), eqn))
goto error;
sq->log_desc_n = log_desc_n;
sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
goto error;
sq->log_desc_n = log_desc_n;
sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
@@
-297,7
+295,7
@@
mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
attr.tis_num = 0;
attr.user_index = 0xFFFF;
attr.cqn = sq->cq.cq->id;
attr.tis_num = 0;
attr.user_index = 0xFFFF;
attr.cqn = sq->cq.cq->id;
- wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(
sq->uar_obj
);
+ wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(
uar
);
wq_attr->pd = pdn;
wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
wq_attr->pd = pdn;
wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
@@
-322,12
+320,12
@@
mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
rte_errno = ENOMEM;
goto error;
}
rte_errno = ENOMEM;
goto error;
}
- sq->ci = 0;
sq->pi = 0;
sq->pi = 0;
+ sq->head = 0;
+ sq->tail = 0;
sq->sqn = sq->sq->id;
sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
sq->sqn = sq->sq->id;
sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
- sq->uar_addr = (volatile uint64_t *)((uint8_t *)sq->uar_obj->base_addr +
- 0x800);
+ sq->uar_addr = (volatile uint64_t *)((uint8_t *)uar->base_addr + 0x800);
mlx5_aso_init_sq(sq);
return 0;
error:
mlx5_aso_init_sq(sq);
return 0;
error:
@@
-347,8
+345,9
@@
error:
int
mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
{
int
mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
{
- return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, sh->pdn,
- sh->eqn, MLX5_ASO_QUEUE_LOG_DESC);
+ return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
+ sh->tx_uar, sh->pdn, sh->eqn,
+ MLX5_ASO_QUEUE_LOG_DESC);
}
/**
}
/**
@@
-383,20
+382,20
@@
mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
uint16_t size = 1 << sq->log_desc_n;
uint16_t mask = size - 1;
uint16_t max;
uint16_t size = 1 << sq->log_desc_n;
uint16_t mask = size - 1;
uint16_t max;
- uint16_t start_
pi = sq->pi
;
+ uint16_t start_
head = sq->head
;
- max = RTE_MIN(size - (uint16_t)(sq->
pi - sq->ci
), n - sq->next);
+ max = RTE_MIN(size - (uint16_t)(sq->
head - sq->tail
), n - sq->next);
if (unlikely(!max))
return 0;
if (unlikely(!max))
return 0;
- sq->elts[start_
pi
& mask].burst_size = max;
+ sq->elts[start_
head
& mask].burst_size = max;
do {
do {
- wqe = &sq->wqes[sq->
pi
& mask];
- rte_prefetch0(&sq->wqes[(sq->
pi
+ 1) & mask]);
+ wqe = &sq->wqes[sq->
head
& mask];
+ rte_prefetch0(&sq->wqes[(sq->
head
+ 1) & mask]);
/* Fill next WQE. */
rte_spinlock_lock(&mng->resize_sl);
pool = mng->pools[sq->next];
rte_spinlock_unlock(&mng->resize_sl);
/* Fill next WQE. */
rte_spinlock_lock(&mng->resize_sl);
pool = mng->pools[sq->next];
rte_spinlock_unlock(&mng->resize_sl);
- sq->elts[sq->
pi
& mask].pool = pool;
+ sq->elts[sq->
head
& mask].pool = pool;
wqe->general_cseg.misc =
rte_cpu_to_be_32(((struct mlx5_devx_obj *)
(pool->flow_hit_aso_obj))->id);
wqe->general_cseg.misc =
rte_cpu_to_be_32(((struct mlx5_devx_obj *)
(pool->flow_hit_aso_obj))->id);
@@
-404,20
+403,23
@@
mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
MLX5_COMP_MODE_OFFSET);
wqe->general_cseg.opcode = rte_cpu_to_be_32
(MLX5_OPCODE_ACCESS_ASO |
MLX5_COMP_MODE_OFFSET);
wqe->general_cseg.opcode = rte_cpu_to_be_32
(MLX5_OPCODE_ACCESS_ASO |
- ASO_OP_MOD_FLOW_HIT << 24 |
- sq->pi << 9);
- sq->pi++;
+ (ASO_OPC_MOD_FLOW_HIT <<
+ WQE_CSEG_OPC_MOD_OFFSET) |
+ (sq->pi <<
+ WQE_CSEG_WQE_INDEX_OFFSET));
+ sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
+ sq->head++;
sq->next++;
max--;
} while (max);
wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
rte_io_wmb();
sq->next++;
max--;
} while (max);
wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
rte_io_wmb();
- sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi
<< 1
);
+ sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
rte_wmb();
*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
rte_wmb();
rte_wmb();
*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
rte_wmb();
- return sq->elts[start_
pi
& mask].burst_size;
+ return sq->elts[start_
head
& mask].burst_size;
}
/**
}
/**
@@
-484,7
+486,7
@@
mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
uint16_t i;
for (i = 0; i < n; ++i) {
uint16_t i;
for (i = 0; i < n; ++i) {
- uint16_t idx = (sq->
ci
+ i) & mask;
+ uint16_t idx = (sq->
tail
+ i) & mask;
struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
uint64_t diff = curr - pool->time_of_last_age_check;
uint64_t *addr = sq->mr.buf;
struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
uint64_t diff = curr - pool->time_of_last_age_check;
uint64_t *addr = sq->mr.buf;
@@
-560,7
+562,7
@@
mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
const unsigned int mask = cq_size - 1;
uint32_t idx;
uint32_t next_idx = cq->cq_ci & mask;
const unsigned int mask = cq_size - 1;
uint32_t idx;
uint32_t next_idx = cq->cq_ci & mask;
- const uint16_t max = (uint16_t)(sq->
pi - sq->ci
);
+ const uint16_t max = (uint16_t)(sq->
head - sq->tail
);
uint16_t i = 0;
int ret;
if (unlikely(!max))
uint16_t i = 0;
int ret;
if (unlikely(!max))
@@
-581,13
+583,13
@@
mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
break;
mlx5_aso_cqe_err_handle(sq);
} else {
break;
mlx5_aso_cqe_err_handle(sq);
} else {
- i += sq->elts[(sq->
ci
+ i) & mask].burst_size;
+ i += sq->elts[(sq->
tail
+ i) & mask].burst_size;
}
cq->cq_ci++;
} while (1);
if (likely(i)) {
mlx5_aso_age_action_update(sh, i);
}
cq->cq_ci++;
} while (1);
if (likely(i)) {
mlx5_aso_age_action_update(sh, i);
- sq->
ci
+= i;
+ sq->
tail
+= i;
rte_io_wmb();
cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
}
rte_io_wmb();
cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
}