X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx2%2Fotx2_evdev.c;h=2daeba42c76dbd8434d2cefd3750acf9509314af;hb=c3e33304a7f6;hp=16d5e7dfa0e19ac7c5ec3395bccaa7b9be40f117;hpb=aa62547f7ef8795595c5e77e2e4409426870840a;p=dpdk.git diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c index 16d5e7dfa0..2daeba42c7 100644 --- a/drivers/event/octeontx2/otx2_evdev.c +++ b/drivers/event/octeontx2/otx2_evdev.c @@ -15,6 +15,7 @@ #include "otx2_evdev_stats.h" #include "otx2_evdev.h" #include "otx2_irq.h" +#include "otx2_tim_evdev.h" static inline int sso_get_msix_offsets(const struct rte_eventdev *event_dev) @@ -38,6 +39,406 @@ sso_get_msix_offsets(const struct rte_eventdev *event_dev) return rc; } +void +sso_fastpath_fns_set(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + /* Single WS modes */ + const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_timeout_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + + /* Dual WS modes */ + const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t ssogws_dual_deq_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_timeout_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_t ssogws_dual_deq_seg_timeout[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + const event_dequeue_burst_t + ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_deq_seg_timeout_burst_ ##name, +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + /* Tx modes */ + const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2][2] = { +#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f5][f4][f3][f2][f1][f0] = otx2_ssogws_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq_seg[2][2][2][2][2][2] = { +#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq[2][2][2][2][2][2] = { +#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + const event_tx_adapter_enqueue + ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = { +#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_dual_tx_adptr_enq_seg_ ## name, +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + event_dev->enqueue = otx2_ssogws_enq; + event_dev->enqueue_burst = otx2_ssogws_enq_burst; + event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst; + event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst; + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_deq_seg + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_seg_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_seg_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_seg_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_deq + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_deq_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = ssogws_deq_timeout + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_deq_timeout_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_tx_adptr_enq + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } + + if (dev->dual_ws) { + event_dev->enqueue = otx2_ssogws_dual_enq; + event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst; + event_dev->enqueue_new_burst = + otx2_ssogws_dual_enq_new_burst; + event_dev->enqueue_forward_burst = + otx2_ssogws_dual_enq_fwd_burst; + + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + event_dev->dequeue = ssogws_dual_deq_seg + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_seg_burst + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_seg_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_seg_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } else { + event_dev->dequeue = ssogws_dual_deq + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = ssogws_dual_deq_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)]; + if (dev->is_timeout_deq) { + event_dev->dequeue = + ssogws_dual_deq_timeout + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + event_dev->dequeue_burst = + ssogws_dual_deq_timeout_burst + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offloads & + NIX_RX_OFFLOAD_RSS_F)]; + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { + /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } else { + event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offloads & + NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; + } + } + + event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue; + rte_mb(); +} + static void otx2_sso_info_get(struct rte_eventdev *event_dev, struct rte_event_dev_info *dev_info) @@ -493,6 +894,9 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev) xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT; if (dev->xae_cnt) xaq_cnt += dev->xae_cnt / dev->xae_waes; + else if (dev->adptr_xae_cnt) + xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) + + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); else xaq_cnt += (dev->iue / dev->xae_waes) + (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues); @@ -774,6 +1178,7 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); ws->fc_mem = dev->fc_mem; ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM); otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR( @@ -786,6 +1191,7 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP); ws->fc_mem = dev->fc_mem; ws->xaq_lmt = dev->xaq_lmt; + ws->tstamp = dev->tstamp; otx2_write64(val, base + SSOW_LF_GWS_NW_TIM); } @@ -889,6 +1295,192 @@ otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f) } } +static void +otx2_handle_event(void *arg, struct rte_event event) +{ + struct rte_eventdev *event_dev = arg; + + if (event_dev->dev_ops->dev_stop_flush != NULL) + event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id, + event, event_dev->data->dev_stop_flush_arg); +} + +static void +sso_qos_cfg(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct sso_grp_qos_cfg *req; + uint16_t i; + + for (i = 0; i < dev->qos_queue_cnt; i++) { + uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt; + uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt; + uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt; + + if (dev->qos_parse_data[i].queue >= dev->nb_event_queues) + continue; + + req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox); + req->xaq_limit = (dev->nb_xaq_cfg * + (xaq_prcnt ? xaq_prcnt : 100)) / 100; + req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK * + (iaq_prcnt ? iaq_prcnt : 100)) / 100; + req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK * + (taq_prcnt ? taq_prcnt : 100)) / 100; + } + + if (dev->qos_queue_cnt) + otx2_mbox_process(dev->mbox); +} + +static void +sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint16_t i; + + for (i = 0; i < dev->nb_event_ports; i++) { + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws; + + ws = event_dev->data->ports[i]; + ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]); + ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]); + ws->swtag_req = 0; + ws->vws = 0; + ws->ws_state[0].cur_grp = 0; + ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY; + ws->ws_state[1].cur_grp = 0; + ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY; + } else { + struct otx2_ssogws *ws; + + ws = event_dev->data->ports[i]; + ssogws_reset(ws); + ws->swtag_req = 0; + ws->cur_grp = 0; + ws->cur_tt = SSO_SYNC_EMPTY; + } + } + + rte_mb(); + if (dev->dual_ws) { + struct otx2_ssogws_dual *ws = event_dev->data->ports[0]; + struct otx2_ssogws temp_ws; + + memcpy(&temp_ws, &ws->ws_state[0], + sizeof(struct otx2_ssogws_state)); + for (i = 0; i < dev->nb_event_queues; i++) { + /* Consume all the events through HWS0 */ + ssogws_flush_events(&temp_ws, i, ws->grps_base[i], + otx2_handle_event, event_dev); + /* Enable/Disable SSO GGRP */ + otx2_write64(enable, ws->grps_base[i] + + SSO_LF_GGRP_QCTL); + } + ws->ws_state[0].cur_grp = 0; + ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY; + } else { + struct otx2_ssogws *ws = event_dev->data->ports[0]; + + for (i = 0; i < dev->nb_event_queues; i++) { + /* Consume all the events through HWS0 */ + ssogws_flush_events(ws, i, ws->grps_base[i], + otx2_handle_event, event_dev); + /* Enable/Disable SSO GGRP */ + otx2_write64(enable, ws->grps_base[i] + + SSO_LF_GGRP_QCTL); + } + ws->cur_grp = 0; + ws->cur_tt = SSO_SYNC_EMPTY; + } + + /* reset SSO GWS cache */ + otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox); + otx2_mbox_process(dev->mbox); +} + +int +sso_xae_reconfigure(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + struct rte_mempool *prev_xaq_pool; + int rc = 0; + + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 0); + + prev_xaq_pool = dev->xaq_pool; + dev->xaq_pool = NULL; + rc = sso_xaq_allocate(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq pool %d", rc); + rte_mempool_free(prev_xaq_pool); + return rc; + } + rc = sso_ggrp_alloc_xaq(dev); + if (rc < 0) { + otx2_err("Failed to alloc xaq to ggrp %d", rc); + rte_mempool_free(prev_xaq_pool); + return rc; + } + + rte_mempool_free(prev_xaq_pool); + rte_mb(); + if (event_dev->data->dev_started) + sso_cleanup(event_dev, 1); + + return 0; +} + +static int +otx2_sso_start(struct rte_eventdev *event_dev) +{ + sso_func_trace(); + sso_qos_cfg(event_dev); + sso_cleanup(event_dev, 1); + sso_fastpath_fns_set(event_dev); + + return 0; +} + +static void +otx2_sso_stop(struct rte_eventdev *event_dev) +{ + sso_func_trace(); + sso_cleanup(event_dev, 0); + rte_mb(); +} + +static int +otx2_sso_close(struct rte_eventdev *event_dev) +{ + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); + uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint16_t i; + + if (!dev->configured) + return 0; + + sso_unregister_irqs(event_dev); + + for (i = 0; i < dev->nb_event_queues; i++) + all_queues[i] = i; + + for (i = 0; i < dev->nb_event_ports; i++) + otx2_sso_port_unlink(event_dev, event_dev->data->ports[i], + all_queues, dev->nb_event_queues); + + sso_lf_teardown(dev, SSO_LF_GGRP); + sso_lf_teardown(dev, SSO_LF_GWS); + dev->nb_event_ports = 0; + dev->nb_event_queues = 0; + rte_mempool_free(dev->xaq_pool); + rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME)); + + return 0; +} + /* Initialize and register event driver with DPDK Application */ static struct rte_eventdev_ops otx2_sso_ops = { .dev_infos_get = otx2_sso_info_get, @@ -903,19 +1495,115 @@ static struct rte_eventdev_ops otx2_sso_ops = { .port_unlink = otx2_sso_port_unlink, .timeout_ticks = otx2_sso_timeout_ticks, + .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get, + .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add, + .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del, + .eth_rx_adapter_start = otx2_sso_rx_adapter_start, + .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop, + + .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get, + .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add, + .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del, + + .timer_adapter_caps_get = otx2_tim_caps_get, + .xstats_get = otx2_sso_xstats_get, .xstats_reset = otx2_sso_xstats_reset, .xstats_get_names = otx2_sso_xstats_get_names, .dump = otx2_sso_dump, + .dev_start = otx2_sso_start, + .dev_stop = otx2_sso_stop, + .dev_close = otx2_sso_close, + .dev_selftest = otx2_sso_selftest, }; #define OTX2_SSO_XAE_CNT "xae_cnt" +#define OTX2_SSO_SINGLE_WS "single_ws" +#define OTX2_SSO_GGRP_QOS "qos" +#define OTX2_SSO_SELFTEST "selftest" + +static void +parse_queue_param(char *value, void *opaque) +{ + struct otx2_sso_qos queue_qos = {0}; + uint8_t *val = (uint8_t *)&queue_qos; + struct otx2_sso_evdev *dev = opaque; + char *tok = strtok(value, "-"); + struct otx2_sso_qos *old_ptr; + + if (!strlen(value)) + return; + + while (tok != NULL) { + *val = atoi(tok); + tok = strtok(NULL, "-"); + val++; + } + + if (val != (&queue_qos.iaq_prcnt + 1)) { + otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]"); + return; + } + + dev->qos_queue_cnt++; + old_ptr = dev->qos_parse_data; + dev->qos_parse_data = rte_realloc(dev->qos_parse_data, + sizeof(struct otx2_sso_qos) * + dev->qos_queue_cnt, 0); + if (dev->qos_parse_data == NULL) { + dev->qos_parse_data = old_ptr; + dev->qos_queue_cnt--; + return; + } + dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos; +} + +static void +parse_qos_list(const char *value, void *opaque) +{ + char *s = strdup(value); + char *start = NULL; + char *end = NULL; + char *f = s; + + while (*s) { + if (*s == '[') + start = s; + else if (*s == ']') + end = s; + + if (start && start < end) { + *end = 0; + parse_queue_param(start + 1, opaque); + s = end; + start = end; + } + s++; + } + + free(f); +} + +static int +parse_sso_kvargs_dict(const char *key, const char *value, void *opaque) +{ + RTE_SET_USED(key); + + /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ',' + * isn't allowed. Everything is expressed in percentages, 0 represents + * default. + */ + parse_qos_list(value, opaque); + + return 0; +} static void sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) { struct rte_kvargs *kvlist; + uint8_t single_ws = 0; if (devargs == NULL) return; @@ -923,9 +1611,16 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs) if (kvlist == NULL) return; + rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag, + &dev->selftest); rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value, &dev->xae_cnt); + rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag, + &single_ws); + rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict, + dev); + dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); } @@ -955,7 +1650,7 @@ static const struct rte_pci_id pci_sso_map[] = { static struct rte_pci_driver pci_sso = { .id_table = pci_sso_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, .probe = otx2_sso_probe, .remove = otx2_sso_remove, }; @@ -970,8 +1665,10 @@ otx2_sso_init(struct rte_eventdev *event_dev) event_dev->dev_ops = &otx2_sso_ops; /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + sso_fastpath_fns_set(event_dev); return 0; + } dev = sso_pmd_priv(event_dev); @@ -1031,7 +1728,12 @@ otx2_sso_init(struct rte_eventdev *event_dev) otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d", event_dev->data->name, dev->max_event_queues, dev->max_event_ports); + if (dev->selftest) { + event_dev->dev->driver = &pci_sso.driver; + event_dev->dev_ops->dev_selftest(); + } + otx2_tim_init(pci_dev, (struct otx2_dev *)dev); return 0; @@ -1067,6 +1769,7 @@ dev_fini: return -EAGAIN; } + otx2_tim_fini(); otx2_dev_fini(pci_dev, dev); return 0; @@ -1075,4 +1778,7 @@ dev_fini: RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso); RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map); RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci"); -RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "="); +RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=" + OTX2_SSO_SINGLE_WS "=1" + OTX2_SSO_GGRP_QOS "=" + OTX2_SSO_SELFTEST "=1");