switch (ev->sched_type) {
case SSO_SYNC_ORDERED:
ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
- rte_cio_wmb();
+ rte_io_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_UNTAGGED:
ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
ev->queue_id);
- rte_cio_wmb();
+ rte_io_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_ATOMIC:
- rte_cio_wmb();
+ rte_io_wmb();
break;
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
+static uint16_t __rte_hot
+ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+
+ return otx_crypto_adapter_enqueue(port, ev->event_ptr);
+}
+
void
ssovf_fastpath_fns_set(struct rte_eventdev *dev)
{
dev->enqueue_new_burst = ssows_enq_new_burst;
dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+ dev->ca_enqueue = ssow_crypto_adapter_enqueue;
+
const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
#define T(name, f3, f2, f1, f0, sz, flags) \
[f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name,
case OCCTX_ERRLEV_LC:
if (errcode == OCCTX_EC_IP4_CSUM) {
val |= PKT_RX_IP_CKSUM_BAD;
- val |= PKT_RX_EIP_CKSUM_BAD;
+ val |= PKT_RX_OUTER_IP_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
}