X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx2%2Fotx2_evdev.c;h=d20213d78b2e9a5b1627769c5086e6c7ca9eac70;hb=a2c6d3f34f9065a75fbbe70699610388e711ac6c;hp=d6ddee1cd000f59a8874e68a228394f29ffa68d8;hpb=5d6c50ffec1b2ae34b03a2a6bff72e1be3e2fbe7;p=dpdk.git diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c index d6ddee1cd0..d20213d78b 100644 --- a/drivers/event/octeontx2/otx2_evdev.c +++ b/drivers/event/octeontx2/otx2_evdev.c @@ -15,6 +15,7 @@ #include "otx2_evdev_stats.h" #include "otx2_evdev.h" #include "otx2_irq.h" +#include "otx2_tim_evdev.h" static inline int sso_get_msix_offsets(const struct rte_eventdev *event_dev) @@ -42,17 +43,273 @@ void sso_fastpath_fns_set(struct rte_eventdev *event_dev) { struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + /* Single WS modes */ + const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_seg_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_seg_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + + /* Dual WS modes */ + const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t + ssogws_dual_deq_seg_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + /* Tx modes */ + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; event_dev->enqueue = otx2_ssogws_enq; event_dev->enqueue_burst = otx2_ssogws_enq_burst; event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst; event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst; + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_deq_seg + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_seg_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_seg_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_seg_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_deq + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } - event_dev->dequeue = otx2_ssogws_deq; - event_dev->dequeue_burst = otx2_ssogws_deq_burst; - if (dev->is_timeout_deq) { - event_dev->dequeue = otx2_ssogws_deq_timeout; - event_dev->dequeue_burst = otx2_ssogws_deq_timeout_burst; + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_tx_adptr_enq + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; } if (dev->dual_ws) { @@ -62,14 +319,163 @@ sso_fastpath_fns_set(struct rte_eventdev *event_dev) otx2_ssogws_dual_enq_new_burst; event_dev->enqueue_forward_burst = otx2_ssogws_dual_enq_fwd_burst; - event_dev->dequeue = otx2_ssogws_dual_deq; - event_dev->dequeue_burst = otx2_ssogws_dual_deq_burst; - if (dev->is_timeout_deq) { - event_dev->dequeue = otx2_ssogws_dual_deq_timeout; - event_dev->dequeue_burst = - otx2_ssogws_dual_deq_timeout_burst; + + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_dual_deq_seg + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_seg_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_seg_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_seg_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_dual_deq + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; } } + + event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue; rte_mb(); } @@ -528,6 +934,9 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev) xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT; if (dev->xae_cnt) xaq_cnt += dev->xae_cnt / dev->xae_waes; + else if (dev->adptr_xae_cnt) + xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) + + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); else xaq_cnt += (dev->iue / dev->xae_waes) + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); @@ -809,6 +1218,7 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); ws->fc_mem = dev->fc_mem; ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM); otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( @@ -821,6 +1231,7 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); ws->fc_mem = dev->fc_mem; ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; otx2_write64(val, base + SSOW_LF_GWS_NW_TIM); } @@ -934,6 +1345,34 @@ otx2_handle_event(void *arg, struct rte_event event) event, event_dev->data->dev_stop_flush_arg); } +static void +sso_qos_cfg(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct sso_grp_qos_cfg *req; + uint16_t i; + + for (i = 0; i < dev->qos_queue_cnt; i++) { + uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt; + uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt; + uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt; + + if (dev->qos_parse_data[i].queue >= dev->nb_event_queues) + continue; + + req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox); + req->xaq_limit = (dev->nb_xaq_cfg * + (xaq_prcnt ? xaq_prcnt : 100)) / 100; + req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK * + (iaq_prcnt ? iaq_prcnt : 100)) / 100; + req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK * + (taq_prcnt ? taq_prcnt : 100)) / 100; + } + + if (dev->qos_queue_cnt) + otx2_mbox_process(dev->mbox); +} + static void sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable) { @@ -1001,16 +1440,87 @@ sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable) otx2_mbox_process(dev->mbox); } +int +sso_xae_reconfigure(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct rte_mempool *prev_xaq_pool; + int rc = 0; + + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 0); + + prev_xaq_pool = dev->xaq_pool; + dev->xaq_pool = NULL; + rc = sso_xaq_allocate(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq pool %d", rc); + rte_mempool_free(prev_xaq_pool); + return rc; + } + rc = sso_ggrp_alloc_xaq(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq to ggrp %d", rc); + rte_mempool_free(prev_xaq_pool); + return rc; + } + + rte_mempool_free(prev_xaq_pool); + rte_mb(); + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 1); + + return 0; +} + static int otx2_sso_start(struct rte_eventdev *event_dev) { sso_func_trace(); + sso_qos_cfg(event_dev); sso_cleanup(event_dev, 1); sso_fastpath_fns_set(event_dev); return 0; } +static void +otx2_sso_stop(struct rte_eventdev *event_dev) +{ + sso_func_trace(); + sso_cleanup(event_dev, 0); + rte_mb(); +} + +static int +otx2_sso_close(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint16_t i; + + if (!dev->configured) + return 0; + + sso_unregister_irqs(event_dev); + + for (i = 0; i < dev->nb_event_queues; i++) + all_queues[i] = i; + + for (i = 0; i < dev->nb_event_ports; i++) + otx2_sso_port_unlink(event_dev, event_dev->data->ports[i], + all_queues, dev->nb_event_queues); + + sso_lf_teardown(dev, SSO_LF_GGRP); + sso_lf_teardown(dev, SSO_LF_GWS); + dev->nb_event_ports = 0; + dev->nb_event_queues = 0; + rte_mempool_free(dev->xaq_pool); + rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME)); + + return 0; +} + /* Initialize and register event driver with DPDK Application */ static struct rte_eventdev_ops otx2_sso_ops = { .dev_infos_get = otx2_sso_info_get, @@ -1025,16 +1535,109 @@ static struct rte_eventdev_ops otx2_sso_ops = { .port_unlink = otx2_sso_port_unlink, .timeout_ticks = otx2_sso_timeout_ticks, + .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get, + .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add, + .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del, + .eth_rx_adapter_start = otx2_sso_rx_adapter_start, + .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop, + + .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get, + .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add, + .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del, + + .timer_adapter_caps_get = otx2_tim_caps_get, + .xstats_get = otx2_sso_xstats_get, .xstats_reset = otx2_sso_xstats_reset, .xstats_get_names = otx2_sso_xstats_get_names, .dump = otx2_sso_dump, .dev_start = otx2_sso_start, + .dev_stop = otx2_sso_stop, + .dev_close = otx2_sso_close, + .dev_selftest = otx2_sso_selftest, }; #define OTX2_SSO_XAE_CNT "xae_cnt" #define OTX2_SSO_SINGLE_WS "single_ws" +#define OTX2_SSO_GGRP_QOS "qos" +#define OTX2_SSO_SELFTEST "selftest" + +static void +parse_queue_param(char *value, void *opaque) +{ + struct otx2_sso_qos queue_qos = {0}; + uint8_t *val = (uint8_t *)&queue_qos; + struct otx2_sso_evdev *dev = opaque; + char *tok = strtok(value, "-"); + struct otx2_sso_qos *old_ptr; + + if (!strlen(value)) + return; + + while (tok != NULL) { + *val = atoi(tok); + tok = strtok(NULL, "-"); + val++; + } + + if (val != (&queue_qos.iaq_prcnt + 1)) { + otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]"); + return; + } + + dev->qos_queue_cnt++; + old_ptr = dev->qos_parse_data; + dev->qos_parse_data = rte_realloc(dev->qos_parse_data, + sizeof(struct otx2_sso_qos) * + dev->qos_queue_cnt, 0); + if (dev->qos_parse_data == NULL) { + dev->qos_parse_data = old_ptr; + dev->qos_queue_cnt--; + return; + } + dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos; +} + +static void +parse_qos_list(const char *value, void *opaque) +{ + char *s = strdup(value); + char *start = NULL; + char *end = NULL; + char *f = s; + + while (*s) { + if (*s == '[') + start = s; + else if (*s == ']') + end = s; + + if (start && start < end) { + *end = 0; + parse_queue_param(start + 1, opaque); + s = end; + start = end; + } + s++; + } + + free(f); +} + +static int +parse_sso_kvargs_dict(const char *key, const char *value, void *opaque) +{ + RTE_SET_USED(key); + + /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ',' + * isn't allowed. Everything is expressed in percentages, 0 represents + * default. + */ + parse_qos_list(value, opaque); + + return 0; +} static void sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) @@ -1048,10 +1651,14 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) if (kvlist == NULL) return; + rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag, + &dev->selftest); rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value, &dev->xae_cnt); rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); + rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict, + dev); dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); @@ -1083,7 +1690,7 @@ static const struct rte_pci_id pci_sso_map[] = { static struct rte_pci_driver pci_sso = { .id_table = pci_sso_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, .probe = otx2_sso_probe, .remove = otx2_sso_remove, }; @@ -1161,7 +1768,12 @@ otx2_sso_init(struct rte_eventdev *event_dev) otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d", event_dev->data->name, dev->max_event_queues, dev->max_event_ports); + if (dev->selftest) { + event_dev->dev->driver = &pci_sso.driver; + event_dev->dev_ops->dev_selftest(); + } + otx2_tim_init(pci_dev, (struct otx2_dev *)dev); return 0; @@ -1197,6 +1809,7 @@ dev_fini: return -EAGAIN; } + otx2_tim_fini(); otx2_dev_fini(pci_dev, dev); return 0; @@ -1206,4 +1819,6 @@ RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso); RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map); RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=" - OTX2_SSO_SINGLE_WS "=1"); + OTX2_SSO_SINGLE_WS "=1" + OTX2_SSO_GGRP_QOS "=" + OTX2_SSO_SELFTEST "=1");