X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx2%2Fotx2_evdev.c;h=cdadbb2b21fddfc5b0f6e985261dc2bc61c0f416;hb=1f411e31a826a9b080c2834a0944b028213e9ed6;hp=a6bf861fb86f4388f3b7df707a553133db26adc9;hpb=67b5f4686459ccf6fe7a430ee42018214a19a260;p=dpdk.git diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c index a6bf861fb8..cdadbb2b21 100644 --- a/drivers/event/octeontx2/otx2_evdev.c +++ b/drivers/event/octeontx2/otx2_evdev.c @@ -7,12 +7,477 @@ #include #include #include -#include +#include #include #include #include +#include "otx2_evdev_stats.h" #include "otx2_evdev.h" +#include "otx2_irq.h" +#include "otx2_tim_evdev.h" + +static inline int +sso_get_msix_offsets(const struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1); + struct otx2_mbox *mbox = dev->mbox; + struct msix_offset_rsp *msix_rsp; + int i, rc; + + /* Get SSO and SSOW MSIX vector offsets */ + otx2_mbox_alloc_msg_msix_offset(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp); + + for (i = 0; i < nb_ports; i++) + dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i]; + + for (i = 0; i < dev->nb_event_queues; i++) + dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i]; + + return rc; +} + +void +sso_fastpath_fns_set(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + /* Single WS modes */ + const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_seg_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + + /* Dual WS modes */ + const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t + ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + /* Tx modes */ + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + event_dev->enqueue = otx2_ssogws_enq; + event_dev->enqueue_burst = otx2_ssogws_enq_burst; + event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst; + event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst; + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_deq_seg + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_seg_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_seg_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_seg_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_deq + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_tx_adptr_enq + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } + + if (dev->dual_ws) { + event_dev->enqueue = otx2_ssogws_dual_enq; + event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst; + event_dev->enqueue_new_burst = + otx2_ssogws_dual_enq_new_burst; + event_dev->enqueue_forward_burst = + otx2_ssogws_dual_enq_fwd_burst; + + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_dual_deq_seg + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_seg_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_seg_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_seg_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_dual_deq + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } + } + + event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue; + rte_mb(); +} static void otx2_sso_info_get(struct rte_eventdev *event_dev, @@ -36,7 +501,84 @@ otx2_sso_info_get(struct rte_eventdev *event_dev, RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | - RTE_EVENT_DEV_CAP_NONSEQ_MODE; + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; +} + +static void +sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable) +{ + uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op); + uint64_t val; + + val = queue; + val |= 0ULL << 12; /* SET 0 */ + val |= 0x8000800080000000; /* Dont modify rest of the masks */ + val |= (uint64_t)enable << 14; /* Enable/Disable Membership. */ + + otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG); +} + +static int +otx2_sso_port_link(struct rte_eventdev *event_dev, void *port, + const uint8_t queues[], const uint8_t priorities[], + uint16_t nb_links) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t port_id = 0; + uint16_t link; + + RTE_SET_USED(priorities); + for (link = 0; link < nb_links; link++) { + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = port; + + port_id = ws->port; + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[0], queues[link], true); + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[1], queues[link], true); + } else { + struct otx2_ssogws *ws = port; + + port_id = ws->port; + sso_port_link_modify(ws, queues[link], true); + } + } + sso_func_trace("Port=%d nb_links=%d", port_id, nb_links); + + return (int)nb_links; +} + +static int +otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t port_id = 0; + uint16_t unlink; + + for (unlink = 0; unlink < nb_unlinks; unlink++) { + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = port; + + port_id = ws->port; + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[0], queues[unlink], + false); + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[1], queues[unlink], + false); + } else { + struct otx2_ssogws *ws = port; + + port_id = ws->port; + sso_port_link_modify(ws, queues[unlink], false); + } + } + sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks); + + return (int)nb_unlinks; } static int @@ -147,7 +689,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox, static void otx2_sso_port_release(void *port) { - rte_free(port); + struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port); + struct otx2_sso_evdev *dev; + int i; + + if (!gws_cookie->configured) + goto free; + + dev = sso_pmd_priv(gws_cookie->event_dev); + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = port; + + for (i = 0; i < dev->nb_event_queues; i++) { + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[0], i, false); + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[1], i, false); + } + memset(ws, 0, sizeof(*ws)); + } else { + struct otx2_ssogws *ws = port; + + for (i = 0; i < dev->nb_event_queues; i++) + sso_port_link_modify(ws, i, false); + memset(ws, 0, sizeof(*ws)); + } + + memset(gws_cookie, 0, sizeof(*gws_cookie)); + +free: + rte_free(gws_cookie); } static void @@ -157,17 +728,134 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id) RTE_SET_USED(queue_id); } +static void +sso_restore_links(const struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint16_t *links_map; + int i, j; + + for (i = 0; i < dev->nb_event_ports; i++) { + links_map = event_dev->data->links_map; + /* Point links_map to this port specific area */ + links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV); + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws; + + ws = event_dev->data->ports[i]; + for (j = 0; j < dev->nb_event_queues; j++) { + if (links_map[j] == 0xdead) + continue; + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[0], j, true); + sso_port_link_modify((struct otx2_ssogws *) + &ws->ws_state[1], j, true); + sso_func_trace("Restoring port %d queue %d " + "link", i, j); + } + } else { + struct otx2_ssogws *ws; + + ws = event_dev->data->ports[i]; + for (j = 0; j < dev->nb_event_queues; j++) { + if (links_map[j] == 0xdead) + continue; + sso_port_link_modify(ws, j, true); + sso_func_trace("Restoring port %d queue %d " + "link", i, j); + } + } + } +} + static void sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base) { ws->tag_op = base + SSOW_LF_GWS_TAG; ws->wqp_op = base + SSOW_LF_GWS_WQP; ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK; - ws->swtp_op = base + SSOW_LF_GWS_SWTP; + ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH; ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM; ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED; } +static int +sso_configure_dual_ports(const struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct otx2_mbox *mbox = dev->mbox; + uint8_t vws = 0; + uint8_t nb_lf; + int i, rc; + + otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports); + + nb_lf = dev->nb_event_ports * 2; + /* Ask AF to attach required LFs. */ + rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true); + if (rc < 0) { + otx2_err("Failed to attach SSO GWS LF"); + return -ENODEV; + } + + if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) { + sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false); + otx2_err("Failed to init SSO GWS LF"); + return -ENODEV; + } + + for (i = 0; i < dev->nb_event_ports; i++) { + struct otx2_ssogws_cookie *gws_cookie; + struct otx2_ssogws_dual *ws; + uintptr_t base; + + if (event_dev->data->ports[i] != NULL) { + ws = event_dev->data->ports[i]; + } else { + /* Allocate event port memory */ + ws = rte_zmalloc_socket("otx2_sso_ws", + sizeof(struct otx2_ssogws_dual) + + RTE_CACHE_LINE_SIZE, + RTE_CACHE_LINE_SIZE, + event_dev->data->socket_id); + if (ws == NULL) { + otx2_err("Failed to alloc memory for port=%d", + i); + rc = -ENOMEM; + break; + } + + /* First cache line is reserved for cookie */ + ws = (struct otx2_ssogws_dual *) + ((uint8_t *)ws + RTE_CACHE_LINE_SIZE); + } + + ws->port = i; + base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12); + sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base); + ws->base[0] = base; + vws++; + + base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12); + sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base); + ws->base[1] = base; + vws++; + + gws_cookie = ssogws_get_cookie(ws); + gws_cookie->event_dev = event_dev; + gws_cookie->configured = 1; + + event_dev->data->ports[i] = ws; + } + + if (rc < 0) { + sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false); + sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false); + } + + return rc; +} + static int sso_configure_ports(const struct rte_eventdev *event_dev) { @@ -193,30 +881,39 @@ sso_configure_ports(const struct rte_eventdev *event_dev) } for (i = 0; i < nb_lf; i++) { + struct otx2_ssogws_cookie *gws_cookie; struct otx2_ssogws *ws; uintptr_t base; - /* Free memory prior to re-allocation if needed */ if (event_dev->data->ports[i] != NULL) { ws = event_dev->data->ports[i]; - rte_free(ws); - ws = NULL; - } + } else { + /* Allocate event port memory */ + ws = rte_zmalloc_socket("otx2_sso_ws", + sizeof(struct otx2_ssogws) + + RTE_CACHE_LINE_SIZE, + RTE_CACHE_LINE_SIZE, + event_dev->data->socket_id); + if (ws == NULL) { + otx2_err("Failed to alloc memory for port=%d", + i); + rc = -ENOMEM; + break; + } - /* Allocate event port memory */ - ws = rte_zmalloc_socket("otx2_sso_ws", - sizeof(struct otx2_ssogws), - RTE_CACHE_LINE_SIZE, - event_dev->data->socket_id); - if (ws == NULL) { - otx2_err("Failed to alloc memory for port=%d", i); - rc = -ENOMEM; - break; + /* First cache line is reserved for cookie */ + ws = (struct otx2_ssogws *) + ((uint8_t *)ws + RTE_CACHE_LINE_SIZE); } ws->port = i; base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12); sso_set_port_ops(ws, base); + ws->base = base; + + gws_cookie = ssogws_get_cookie(ws); + gws_cookie->event_dev = event_dev; + gws_cookie->configured = 1; event_dev->data->ports[i] = ws; } @@ -287,7 +984,7 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev) dev->fc_iova = mz->iova; dev->fc_mem = mz->addr; - + *dev->fc_mem = 0; aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN); memset(aura, 0, sizeof(struct npa_aura_s)); @@ -299,6 +996,9 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev) xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT; if (dev->xae_cnt) xaq_cnt += dev->xae_cnt / dev->xae_waes; + else if (dev->adptr_xae_cnt) + xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) + + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); else xaq_cnt += (dev->iue / dev->xae_waes) + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); @@ -360,6 +1060,19 @@ sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev) return otx2_mbox_process(mbox); } +static int +sso_ggrp_free_xaq(struct otx2_sso_evdev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct sso_release_xaq *req; + + otx2_sso_dbg("Freeing XAQ for GGRPs"); + req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox); + req->hwgrps = dev->nb_event_queues; + + return otx2_mbox_process(mbox); +} + static void sso_lf_teardown(struct otx2_sso_evdev *dev, enum otx2_sso_lf_type lf_type) @@ -372,6 +1085,7 @@ sso_lf_teardown(struct otx2_sso_evdev *dev, break; case SSO_LF_GWS: nb_lf = dev->nb_event_ports; + nb_lf *= dev->dual_ws ? 2 : 1; break; default: return; @@ -422,6 +1136,9 @@ otx2_sso_configure(const struct rte_eventdev *event_dev) return -EINVAL; } + if (dev->configured) + sso_unregister_irqs(event_dev); + if (dev->nb_event_queues) { /* Finit any previous queues. */ sso_lf_teardown(dev, SSO_LF_GGRP); @@ -434,7 +1151,12 @@ otx2_sso_configure(const struct rte_eventdev *event_dev) dev->nb_event_queues = conf->nb_event_queues; dev->nb_event_ports = conf->nb_event_ports; - if (sso_configure_ports(event_dev)) { + if (dev->dual_ws) + rc = sso_configure_dual_ports(event_dev); + else + rc = sso_configure_ports(event_dev); + + if (rc < 0) { otx2_err("Failed to configure event ports"); return -ENODEV; } @@ -450,12 +1172,26 @@ otx2_sso_configure(const struct rte_eventdev *event_dev) goto teardown_hwggrp; } + /* Restore any prior port-queue mapping. */ + sso_restore_links(event_dev); rc = sso_ggrp_alloc_xaq(dev); if (rc < 0) { otx2_err("Failed to alloc xaq to ggrp %d", rc); goto teardown_hwggrp; } + rc = sso_get_msix_offsets(event_dev); + if (rc < 0) { + otx2_err("Failed to get msix offsets %d", rc); + goto teardown_hwggrp; + } + + rc = sso_register_irqs(event_dev); + if (rc < 0) { + otx2_err("Failed to register irq %d", rc); + goto teardown_hwggrp; + } + dev->configured = 1; rte_mb(); @@ -550,16 +1286,308 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, /* Set get_work timeout for HWS */ val = NSEC2USEC(dev->deq_tmo_ns) - 1; - struct otx2_ssogws *ws = event_dev->data->ports[port_id]; + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id]; + + rte_memcpy(ws->grps_base, grps_base, + sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); + ws->fc_mem = dev->fc_mem; + ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; + otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( + ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM); + otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( + ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM); + } else { + struct otx2_ssogws *ws = event_dev->data->ports[port_id]; + uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op); + + rte_memcpy(ws->grps_base, grps_base, + sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); + ws->fc_mem = dev->fc_mem; + ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; + otx2_write64(val, base + SSOW_LF_GWS_NW_TIM); + } + + otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]); + + return 0; +} + +static int +otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, + uint64_t *tmo_ticks) +{ + RTE_SET_USED(event_dev); + *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz()); + + return 0; +} + +static void +ssogws_dump(struct otx2_ssogws *ws, FILE *f) +{ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op); - rte_memcpy(ws->grps_base, grps_base, - sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); - ws->fc_mem = dev->fc_mem; - ws->xaq_lmt = dev->xaq_lmt; - otx2_write64(val, base + SSOW_LF_GWS_NW_TIM); + fprintf(f, "SSOW_LF_GWS Base addr 0x%" PRIx64 "\n", (uint64_t)base); + fprintf(f, "SSOW_LF_GWS_LINKS 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_LINKS)); + fprintf(f, "SSOW_LF_GWS_PENDWQP 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_PENDWQP)); + fprintf(f, "SSOW_LF_GWS_PENDSTATE 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_PENDSTATE)); + fprintf(f, "SSOW_LF_GWS_NW_TIM 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_NW_TIM)); + fprintf(f, "SSOW_LF_GWS_TAG 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_TAG)); + fprintf(f, "SSOW_LF_GWS_WQP 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_TAG)); + fprintf(f, "SSOW_LF_GWS_SWTP 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_SWTP)); + fprintf(f, "SSOW_LF_GWS_PENDTAG 0x%" PRIx64 "\n", + otx2_read64(base + SSOW_LF_GWS_PENDTAG)); +} - otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]); +static void +ssoggrp_dump(uintptr_t base, FILE *f) +{ + fprintf(f, "SSO_LF_GGRP Base addr 0x%" PRIx64 "\n", (uint64_t)base); + fprintf(f, "SSO_LF_GGRP_QCTL 0x%" PRIx64 "\n", + otx2_read64(base + SSO_LF_GGRP_QCTL)); + fprintf(f, "SSO_LF_GGRP_XAQ_CNT 0x%" PRIx64 "\n", + otx2_read64(base + SSO_LF_GGRP_XAQ_CNT)); + fprintf(f, "SSO_LF_GGRP_INT_THR 0x%" PRIx64 "\n", + otx2_read64(base + SSO_LF_GGRP_INT_THR)); + fprintf(f, "SSO_LF_GGRP_INT_CNT 0x%" PRIX64 "\n", + otx2_read64(base + SSO_LF_GGRP_INT_CNT)); + fprintf(f, "SSO_LF_GGRP_AQ_CNT 0x%" PRIX64 "\n", + otx2_read64(base + SSO_LF_GGRP_AQ_CNT)); + fprintf(f, "SSO_LF_GGRP_AQ_THR 0x%" PRIX64 "\n", + otx2_read64(base + SSO_LF_GGRP_AQ_THR)); + fprintf(f, "SSO_LF_GGRP_MISC_CNT 0x%" PRIx64 "\n", + otx2_read64(base + SSO_LF_GGRP_MISC_CNT)); +} + +static void +otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t queue; + uint8_t port; + + fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ? + "dual_ws" : "single_ws"); + /* Dump SSOW registers */ + for (port = 0; port < dev->nb_event_ports; port++) { + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = + event_dev->data->ports[port]; + + fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n", + __func__, port, 0); + ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f); + fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n", + __func__, port, 1); + ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f); + } else { + fprintf(f, "[%s]SSO single workslot[%d] dump\n", + __func__, port); + ssogws_dump(event_dev->data->ports[port], f); + } + } + + /* Dump SSO registers */ + for (queue = 0; queue < dev->nb_event_queues; queue++) { + fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue); + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = event_dev->data->ports[0]; + ssoggrp_dump(ws->grps_base[queue], f); + } else { + struct otx2_ssogws *ws = event_dev->data->ports[0]; + ssoggrp_dump(ws->grps_base[queue], f); + } + } +} + +static void +otx2_handle_event(void *arg, struct rte_event event) +{ + struct rte_eventdev *event_dev = arg; + + if (event_dev->dev_ops->dev_stop_flush != NULL) + event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id, + event, event_dev->data->dev_stop_flush_arg); +} + +static void +sso_qos_cfg(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct sso_grp_qos_cfg *req; + uint16_t i; + + for (i = 0; i < dev->qos_queue_cnt; i++) { + uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt; + uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt; + uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt; + + if (dev->qos_parse_data[i].queue >= dev->nb_event_queues) + continue; + + req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox); + req->xaq_limit = (dev->nb_xaq_cfg * + (xaq_prcnt ? xaq_prcnt : 100)) / 100; + req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK * + (iaq_prcnt ? iaq_prcnt : 100)) / 100; + req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK * + (taq_prcnt ? taq_prcnt : 100)) / 100; + } + + if (dev->qos_queue_cnt) + otx2_mbox_process(dev->mbox); +} + +static void +sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint16_t i; + + for (i = 0; i < dev->nb_event_ports; i++) { + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws; + + ws = event_dev->data->ports[i]; + ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]); + ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]); + ws->swtag_req = 0; + ws->vws = 0; + ws->fc_mem = dev->fc_mem; + ws->xaq_lmt = dev->xaq_lmt; + } else { + struct otx2_ssogws *ws; + + ws = event_dev->data->ports[i]; + ssogws_reset(ws); + ws->swtag_req = 0; + ws->fc_mem = dev->fc_mem; + ws->xaq_lmt = dev->xaq_lmt; + } + } + + rte_mb(); + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = event_dev->data->ports[0]; + struct otx2_ssogws temp_ws; + + memcpy(&temp_ws, &ws->ws_state[0], + sizeof(struct otx2_ssogws_state)); + for (i = 0; i < dev->nb_event_queues; i++) { + /* Consume all the events through HWS0 */ + ssogws_flush_events(&temp_ws, i, ws->grps_base[i], + otx2_handle_event, event_dev); + /* Enable/Disable SSO GGRP */ + otx2_write64(enable, ws->grps_base[i] + + SSO_LF_GGRP_QCTL); + } + } else { + struct otx2_ssogws *ws = event_dev->data->ports[0]; + + for (i = 0; i < dev->nb_event_queues; i++) { + /* Consume all the events through HWS0 */ + ssogws_flush_events(ws, i, ws->grps_base[i], + otx2_handle_event, event_dev); + /* Enable/Disable SSO GGRP */ + otx2_write64(enable, ws->grps_base[i] + + SSO_LF_GGRP_QCTL); + } + } + + /* reset SSO GWS cache */ + otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox); + otx2_mbox_process(dev->mbox); +} + +int +sso_xae_reconfigure(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + int rc = 0; + + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 0); + + rc = sso_ggrp_free_xaq(dev); + if (rc < 0) { + otx2_err("Failed to free XAQ\n"); + return rc; + } + + rte_mempool_free(dev->xaq_pool); + dev->xaq_pool = NULL; + rc = sso_xaq_allocate(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq pool %d", rc); + return rc; + } + rc = sso_ggrp_alloc_xaq(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq to ggrp %d", rc); + return rc; + } + + rte_mb(); + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 1); + + return 0; +} + +static int +otx2_sso_start(struct rte_eventdev *event_dev) +{ + sso_func_trace(); + sso_qos_cfg(event_dev); + sso_cleanup(event_dev, 1); + sso_fastpath_fns_set(event_dev); + + return 0; +} + +static void +otx2_sso_stop(struct rte_eventdev *event_dev) +{ + sso_func_trace(); + sso_cleanup(event_dev, 0); + rte_mb(); +} + +static int +otx2_sso_close(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint16_t i; + + if (!dev->configured) + return 0; + + sso_unregister_irqs(event_dev); + + for (i = 0; i < dev->nb_event_queues; i++) + all_queues[i] = i; + + for (i = 0; i < dev->nb_event_ports; i++) + otx2_sso_port_unlink(event_dev, event_dev->data->ports[i], + all_queues, dev->nb_event_queues); + + sso_lf_teardown(dev, SSO_LF_GGRP); + sso_lf_teardown(dev, SSO_LF_GWS); + dev->nb_event_ports = 0; + dev->nb_event_queues = 0; + rte_mempool_free(dev->xaq_pool); + rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME)); return 0; } @@ -574,14 +1602,122 @@ static struct rte_eventdev_ops otx2_sso_ops = { .port_def_conf = otx2_sso_port_def_conf, .port_setup = otx2_sso_port_setup, .port_release = otx2_sso_port_release, + .port_link = otx2_sso_port_link, + .port_unlink = otx2_sso_port_unlink, + .timeout_ticks = otx2_sso_timeout_ticks, + + .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get, + .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add, + .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del, + .eth_rx_adapter_start = otx2_sso_rx_adapter_start, + .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop, + + .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get, + .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add, + .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del, + + .timer_adapter_caps_get = otx2_tim_caps_get, + + .crypto_adapter_caps_get = otx2_ca_caps_get, + .crypto_adapter_queue_pair_add = otx2_ca_qp_add, + .crypto_adapter_queue_pair_del = otx2_ca_qp_del, + + .xstats_get = otx2_sso_xstats_get, + .xstats_reset = otx2_sso_xstats_reset, + .xstats_get_names = otx2_sso_xstats_get_names, + + .dump = otx2_sso_dump, + .dev_start = otx2_sso_start, + .dev_stop = otx2_sso_stop, + .dev_close = otx2_sso_close, + .dev_selftest = otx2_sso_selftest, }; #define OTX2_SSO_XAE_CNT "xae_cnt" +#define OTX2_SSO_SINGLE_WS "single_ws" +#define OTX2_SSO_GGRP_QOS "qos" + +static void +parse_queue_param(char *value, void *opaque) +{ + struct otx2_sso_qos queue_qos = {0}; + uint8_t *val = (uint8_t *)&queue_qos; + struct otx2_sso_evdev *dev = opaque; + char *tok = strtok(value, "-"); + struct otx2_sso_qos *old_ptr; + + if (!strlen(value)) + return; + + while (tok != NULL) { + *val = atoi(tok); + tok = strtok(NULL, "-"); + val++; + } + + if (val != (&queue_qos.iaq_prcnt + 1)) { + otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]"); + return; + } + + dev->qos_queue_cnt++; + old_ptr = dev->qos_parse_data; + dev->qos_parse_data = rte_realloc(dev->qos_parse_data, + sizeof(struct otx2_sso_qos) * + dev->qos_queue_cnt, 0); + if (dev->qos_parse_data == NULL) { + dev->qos_parse_data = old_ptr; + dev->qos_queue_cnt--; + return; + } + dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos; +} + +static void +parse_qos_list(const char *value, void *opaque) +{ + char *s = strdup(value); + char *start = NULL; + char *end = NULL; + char *f = s; + + while (*s) { + if (*s == '[') + start = s; + else if (*s == ']') + end = s; + + if (start && start < end) { + *end = 0; + parse_queue_param(start + 1, opaque); + s = end; + start = end; + } + s++; + } + + free(f); +} + +static int +parse_sso_kvargs_dict(const char *key, const char *value, void *opaque) +{ + RTE_SET_USED(key); + + /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ',' + * isn't allowed. Everything is expressed in percentages, 0 represents + * default. + */ + parse_qos_list(value, opaque); + + return 0; +} static void sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) { struct rte_kvargs *kvlist; + uint8_t single_ws = 0; if (devargs == NULL) return; @@ -591,7 +1727,12 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value, &dev->xae_cnt); - + rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag, + &single_ws); + rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict, + dev); + otx2_parse_common_devargs(kvlist); + dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); } @@ -621,7 +1762,7 @@ static const struct rte_pci_id pci_sso_map[] = { static struct rte_pci_driver pci_sso = { .id_table = pci_sso_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, .probe = otx2_sso_probe, .remove = otx2_sso_remove, }; @@ -636,8 +1777,10 @@ otx2_sso_init(struct rte_eventdev *event_dev) event_dev->dev_ops = &otx2_sso_ops; /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + sso_fastpath_fns_set(event_dev); return 0; + } dev = sso_pmd_priv(event_dev); @@ -684,13 +1827,21 @@ otx2_sso_init(struct rte_eventdev *event_dev) goto otx2_npa_lf_uninit; } + dev->dual_ws = 1; sso_parse_devargs(dev, pci_dev->device.devargs); + if (dev->dual_ws) { + otx2_sso_dbg("Using dual workslot mode"); + dev->max_event_ports = dev->max_event_ports / 2; + } else { + otx2_sso_dbg("Using single workslot mode"); + } otx2_sso_pf_func_set(dev->pf_func); otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d", event_dev->data->name, dev->max_event_queues, dev->max_event_ports); + otx2_tim_init(pci_dev, (struct otx2_dev *)dev); return 0; @@ -726,6 +1877,7 @@ dev_fini: return -EAGAIN; } + otx2_tim_fini(); otx2_dev_fini(pci_dev, dev); return 0; @@ -734,4 +1886,7 @@ dev_fini: RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso); RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map); RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci"); -RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "="); +RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=" + OTX2_SSO_SINGLE_WS "=1" + OTX2_SSO_GGRP_QOS "=" + OTX2_NPA_LOCK_MASK "=<1-65535>");