net/octeontx: add framework for Rx/Tx offloads
authorHarman Kalra <hkalra@marvell.com>
Mon, 16 Mar 2020 09:33:38 +0000 (15:03 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Apr 2020 11:57:06 +0000 (13:57 +0200)
Adding macro based framework to hook rx/tx burst function
pointers to the appropriate function based on rx/tx offloads.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
drivers/event/octeontx/ssovf_worker.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/octeontx/octeontx_ethdev.h
drivers/net/octeontx/octeontx_rxtx.c
drivers/net/octeontx/octeontx_rxtx.h

index 208b7e7..ab34233 100644 (file)
@@ -272,7 +272,7 @@ sso_event_tx_adapter_enqueue(void *port,
        struct rte_eth_dev *ethdev;
        struct ssows *ws = port;
        struct octeontx_txq *txq;
-       octeontx_dq_t *dq;
+       uint64_t cmd[4];
 
        RTE_SET_USED(nb_events);
        switch (ev->sched_type) {
@@ -297,11 +297,6 @@ sso_event_tx_adapter_enqueue(void *port,
        queue_id = rte_event_eth_tx_adapter_txq_get(m);
        ethdev = &rte_eth_devices[port_id];
        txq = ethdev->data->tx_queues[queue_id];
-       dq = &txq->dq;
 
-       if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
-                               m, OCCTX_TX_OFFLOAD_NONE) < 0)
-               return 0;
-
-       return 1;
+       return __octeontx_xmit_pkts(txq, &m, 1, cmd, OCCTX_TX_OFFLOAD_NONE);
 }
index 24c4e83..d6adbbc 100644 (file)
@@ -436,27 +436,20 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
        return 0;
 }
 
-static void
-octeontx_set_tx_function(struct rte_eth_dev *dev)
-{
-       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-
-       const eth_tx_burst_t tx_burst_func[2] = {
-               [0] = octeontx_xmit_pkts,
-               [1] = octeontx_xmit_pkts_mseg,
-       };
-
-       dev->tx_pkt_burst =
-               tx_burst_func[!!(nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)];
-}
-
 static int
 octeontx_dev_start(struct rte_eth_dev *dev)
 {
        struct octeontx_nic *nic = octeontx_pmd_priv(dev);
-       int ret;
+       struct octeontx_rxq *rxq;
+       int ret = 0, i;
 
-       ret = 0;
+       /* Rechecking if any new offload set to update
+        * rx/tx burst function pointer accordingly.
+        */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               octeontx_recheck_rx_offloads(rxq);
+       }
 
        PMD_INIT_FUNC_TRACE();
        /*
@@ -1159,7 +1152,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
 
                eth_dev->dev_ops = &octeontx_dev_ops;
                eth_dev->device = &dev->device;
-               eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
+               octeontx_set_tx_function(eth_dev);
                eth_dev->rx_pkt_burst = octeontx_recv_pkts;
                rte_eth_dev_probing_finish(eth_dev);
                return 0;
index 10da6a2..06223e6 100644 (file)
 #define OCTEONTX_MAX_BGX_PORTS                 4
 #define OCTEONTX_MAX_LMAC_PER_BGX              4
 
-#define OCTEONTX_RX_OFFLOADS                   (DEV_RX_OFFLOAD_CHECKSUM     | \
-                                                DEV_RX_OFFLOAD_SCATTER      | \
-                                                DEV_RX_OFFLOAD_JUMBO_FRAME)
+#define OCTEONTX_RX_OFFLOADS           (DEV_RX_OFFLOAD_CHECKSUM     | \
+                                        DEV_RX_OFFLOAD_SCATTER      | \
+                                        DEV_RX_OFFLOAD_JUMBO_FRAME)
 
-#define OCTEONTX_TX_OFFLOADS                   (DEV_TX_OFFLOAD_MT_LOCKFREE  | \
-                                                DEV_TX_OFFLOAD_MULTI_SEGS)
+#define OCTEONTX_TX_OFFLOADS           (DEV_TX_OFFLOAD_MT_LOCKFREE    |  \
+                                        DEV_TX_OFFLOAD_MULTI_SEGS)
 
 static inline struct octeontx_nic *
 octeontx_pmd_priv(struct rte_eth_dev *dev)
@@ -99,4 +99,6 @@ struct octeontx_rxq {
        struct rte_mempool *pool;
 } __rte_cache_aligned;
 
+void
+octeontx_set_tx_function(struct rte_eth_dev *dev);
 #endif /* __OCTEONTX_ETHDEV_H__ */
index 5451cab..2258ed4 100644 (file)
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"
 
-uint16_t __rte_hot
-octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-       int count;
-       struct octeontx_txq *txq = tx_queue;
-       octeontx_dq_t *dq = &txq->dq;
-       int res;
-
-       count = 0;
-
-       rte_cio_wmb();
-       while (count < nb_pkts) {
-               res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
-                                          dq->fc_status_va, tx_pkts[count],
-                                          OCCTX_TX_OFFLOAD_NONE);
-               if (res < 0)
-                       break;
-
-               count++;
-       }
-
-       return count; /* return number of pkts transmitted */
-}
-
-uint16_t __hot
-octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
-                       uint16_t nb_pkts)
-{
-       int count;
-       struct octeontx_txq *txq = tx_queue;
-       octeontx_dq_t *dq = &txq->dq;
-       int res;
-
-       count = 0;
-
-       rte_cio_wmb();
-       while (count < nb_pkts) {
-               res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
-                                          dq->fc_status_va, tx_pkts[count],
-                                          OCCTX_TX_OFFLOAD_NONE |
-                                          OCCTX_TX_MULTI_SEG_F);
-               if (res < 0)
-                       break;
-
-               count++;
-       }
-
-       return count; /* return number of pkts transmitted */
-}
-
 uint16_t __rte_hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
@@ -90,3 +40,34 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
        return count; /* return number of pkts received */
 }
+
+#define T(name, f1, sz, flags)                                 \
+static uint16_t __rte_noinline __rte_hot                               \
+octeontx_xmit_pkts_ ##name(void *tx_queue,                             \
+                       struct rte_mbuf **tx_pkts, uint16_t pkts)       \
+{                                                                      \
+       uint64_t cmd[(sz)];                                             \
+                                                                       \
+       return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd,       \
+                                   flags);                             \
+}
+
+OCCTX_TX_FASTPATH_MODES
+#undef T
+
+void __rte_hot
+octeontx_set_tx_function(struct rte_eth_dev *dev)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+       const eth_tx_burst_t tx_burst_func[2] = {
+#define T(name, f0, sz, flags)                 \
+       [f0] =  octeontx_xmit_pkts_ ##name,
+
+OCCTX_TX_FASTPATH_MODES
+#undef T
+       };
+
+       dev->tx_pkt_burst = tx_burst_func
+               [!!(nic->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
+}
index 83057d5..3cad5ef 100644 (file)
@@ -109,90 +109,119 @@ ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
 
 };
 
-static __rte_always_inline int
-__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
-                       struct rte_mbuf *tx_pkt, const uint16_t flag)
+
+static __rte_always_inline uint16_t
+__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
+                       const uint16_t flag __rte_unused)
+{
+       uint16_t gaura_id, nb_desc = 0;
+
+       /* Setup PKO_SEND_HDR_S */
+       cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
+       cmd_buf[nb_desc++] = 0x0;
+
+       /* Mark mempool object as "put" since it is freed by PKO */
+       if (!(cmd_buf[0] & (1ULL << 58)))
+               __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
+                                       1, 0);
+       /* Get the gaura Id */
+       gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+                                             tx_pkt->pool->pool_id);
+
+       /* Setup PKO_SEND_BUFLINK_S */
+       cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
+               PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
+               PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
+               tx_pkt->data_len;
+       cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+
+       return nb_desc;
+}
+
+static __rte_always_inline uint16_t
+__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
+                       const uint16_t flag __rte_unused)
 {
-       uint8_t sz = (4 + (!!(flag & OCCTX_TX_MULTI_SEG_F) * 10));
-       /* Max size of PKO SEND desc is 112 bytes*/
-       uint64_t cmd_buf[sz] __rte_cache_aligned;
-       uint8_t nb_segs, nb_desc = 0;
+       uint16_t nb_segs, nb_desc = 0;
        uint16_t gaura_id, len = 0;
        struct rte_mbuf *m_next = NULL;
 
-       if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
-               return -ENOSPC;
-
-
-       if (flag & OCCTX_TX_MULTI_SEG_F) {
-               nb_segs = tx_pkt->nb_segs;
-               /* Setup PKO_SEND_HDR_S */
-               cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
-               cmd_buf[nb_desc++] = 0x0;
-
-               do {
-                       m_next = tx_pkt->next;
-                       /* To handle case where mbufs belong to diff pools, like
-                        * fragmentation
-                        */
-                       gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
-                                                       tx_pkt->pool->pool_id);
-
-                       /* Setup PKO_SEND_GATHER_S */
-                       cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC           |
-                                            PKO_SEND_GATHER_LDTYPE(0x1ull)  |
-                                            PKO_SEND_GATHER_GAUAR((long)
-                                                                  gaura_id) |
-                                            tx_pkt->data_len;
-                       /* Mark mempool object as "put" since it is freed by
-                        * PKO.
-                        */
-                       if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
-                               tx_pkt->next = NULL;
-                               __mempool_check_cookies(tx_pkt->pool,
-                                                       (void **)&tx_pkt, 1, 0);
-                       }
-                       nb_desc++;
-
-                       cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
-
-                       nb_segs--;
-                       len += tx_pkt->data_len;
-                       tx_pkt = m_next;
-               } while (nb_segs);
-       } else {
-               /* Setup PKO_SEND_HDR_S */
-               cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
-               cmd_buf[nb_desc++] = 0x0;
-
-               /* Mark mempool object as "put" since it is freed by PKO */
-               if (!(cmd_buf[0] & (1ULL << 58)))
-                       __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
-                                               1, 0);
-               /* Get the gaura Id */
+       nb_segs = tx_pkt->nb_segs;
+       /* Setup PKO_SEND_HDR_S */
+       cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
+       cmd_buf[nb_desc++] = 0x0;
+
+       do {
+               m_next = tx_pkt->next;
+               /* To handle case where mbufs belong to diff pools, like
+                * fragmentation
+                */
                gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
                                                      tx_pkt->pool->pool_id);
 
-               /* Setup PKO_SEND_BUFLINK_S */
-               cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
-                                    PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
-                                    PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
-                                    tx_pkt->data_len;
+               /* Setup PKO_SEND_GATHER_S */
+               cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC                 |
+                                  PKO_SEND_GATHER_LDTYPE(0x1ull)        |
+                                  PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+                                  tx_pkt->data_len;
+
+               /* Mark mempool object as "put" since it is freed by
+                * PKO.
+                */
+               if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
+                       tx_pkt->next = NULL;
+                       __mempool_check_cookies(tx_pkt->pool,
+                                               (void **)&tx_pkt, 1, 0);
+               }
+               nb_desc++;
+
                cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
-       }
-       octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, nb_desc);
 
-       return 0;
-}
+               nb_segs--;
+               len += tx_pkt->data_len;
+               tx_pkt = m_next;
+       } while (nb_segs);
 
-uint16_t
-octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+       return nb_desc;
+}
 
-uint16_t
-octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
-                       uint16_t nb_pkts);
+static __rte_always_inline uint16_t
+__octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                    uint16_t nb_pkts, uint64_t *cmd_buf,
+                    const uint16_t flags)
+{
+       struct octeontx_txq *txq = tx_queue;
+       octeontx_dq_t *dq = &txq->dq;
+       uint16_t count = 0, nb_desc;
+       rte_cio_wmb();
+
+       while (count < nb_pkts) {
+               if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
+                       break;
+
+               if (flags & OCCTX_TX_MULTI_SEG_F) {
+                       nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
+                                                              cmd_buf, flags);
+               } else {
+                       nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
+                                                         cmd_buf, flags);
+               }
+
+               octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
+                                  nb_desc);
+
+               count++;
+       }
+       return count;
+}
 
 uint16_t
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
-#endif /* __OCTEONTX_RXTX_H__ */
+#define MULT_F       OCCTX_TX_MULTI_SEG_F
+/* [NOFF] [MULTI_SEG] */
+#define OCCTX_TX_FASTPATH_MODES                                                      \
+T(no_offload,                          0,      4,   OCCTX_TX_OFFLOAD_NONE)   \
+T(mseg,                                        1,      14,  MULT_F)                  \
+
+ #endif /* __OCTEONTX_RXTX_H__ */