struct pending_queue *pend_q;
struct cpt_inst_s *inst;
uint16_t lmt_id;
+ uint64_t head;
int ret, i;
pend_q = &qp->pend_q;
- nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
+ const uint64_t pq_mask = pend_q->pq_mask;
+
+ head = pend_q->head;
+ nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
nb_ops = RTE_MIN(nb_ops, nb_allowed);
if (unlikely(nb_ops == 0))
again:
for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
- infl_req = &pend_q->req_queue[pend_q->enq_tail];
+ infl_req = &pend_q->req_queue[head];
infl_req->op_flags = 0;
ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
if (unlikely(ret != 1)) {
plt_dp_err("Could not process op: %p", ops + i);
if (i == 0)
- goto update_pending;
+ goto pend_q_commit;
break;
}
- MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
+ pending_queue_advance(&head, pq_mask);
}
if (i > PKTS_PER_STEORL) {
goto again;
}
-update_pending:
- pend_q->pending_count += count + i;
+pend_q_commit:
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ pend_q->head = head;
pend_q->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
switch (uc_compcode) {
case ROC_IE_OT_UCC_SUCCESS:
if (sa->ip_csum_enable)
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM:
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM:
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (sa->ip_csum_enable)
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM:
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (sa->ip_csum_enable)
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
default:
break;
struct cnxk_cpt_qp *qp = qptr;
struct pending_queue *pend_q;
struct cpt_cn10k_res_s *res;
+ uint64_t infl_cnt, pq_tail;
struct rte_crypto_op *cop;
- int i, nb_pending;
+ int i;
pend_q = &qp->pend_q;
- nb_pending = pend_q->pending_count;
+ const uint64_t pq_mask = pend_q->pq_mask;
+
+ pq_tail = pend_q->tail;
+ infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
+ nb_ops = RTE_MIN(nb_ops, infl_cnt);
- if (nb_ops > nb_pending)
- nb_ops = nb_pending;
+ /* Ensure infl_cnt isn't read before data lands */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
for (i = 0; i < nb_ops; i++) {
- infl_req = &pend_q->req_queue[pend_q->deq_head];
+ infl_req = &pend_q->req_queue[pq_tail];
res = (struct cpt_cn10k_res_s *)&infl_req->res;
break;
}
- MOD_INC(pend_q->deq_head, qp->lf.nb_desc);
+ pending_queue_advance(&pq_tail, pq_mask);
cop = infl_req->cop;
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
}
- pend_q->pending_count -= i;
+ pend_q->tail = pq_tail;
return i;
}