net/octeontx_ep: add Rx path
authorNalla Pradeep <pnalla@marvell.com>
Fri, 29 Jan 2021 12:45:09 +0000 (04:45 -0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 29 Jan 2021 17:16:12 +0000 (18:16 +0100)
Function to deliver packets from DROQ to application is added. It also
fills DROQ with receive buffers timely such that device can fill them
with incoming packets.

Signed-off-by: Nalla Pradeep <pnalla@marvell.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
drivers/net/octeontx_ep/otx_ep_common.h
drivers/net/octeontx_ep/otx_ep_ethdev.c
drivers/net/octeontx_ep/otx_ep_rxtx.c
drivers/net/octeontx_ep/otx_ep_rxtx.h

index cb42e3d..b9184c1 100644 (file)
@@ -21,6 +21,7 @@
 #define SDP_OTX2_PKIND 57
 #define OTX_EP_BUSY_LOOP_COUNT      (10000)
 #define OTX_EP_MAX_IOQS_PER_VF 8
+#define OTX_CUST_DATA_LEN 0
 
 #define otx_ep_info(fmt, args...)                              \
        rte_log(RTE_LOG_INFO, otx_net_ep_logtype,               \
index 89056f5..a1efa40 100644 (file)
@@ -136,6 +136,7 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
 
        otx_epvf->fn_list.setup_device_regs(otx_epvf);
 
+       otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
        ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
        otx_epvf->max_rx_queues = ethdev_queues;
        otx_epvf->max_tx_queues = ethdev_queues;
@@ -382,6 +383,8 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
        otx_epdev_exit(eth_dev);
 
        eth_dev->dev_ops = NULL;
+       eth_dev->rx_pkt_burst = NULL;
+       eth_dev->tx_pkt_burst = NULL;
 
        return 0;
 }
index 1c72202..981147e 100644 (file)
@@ -7,6 +7,8 @@
 #include <rte_eal.h>
 #include <rte_mempool.h>
 #include <rte_mbuf.h>
+#include <rte_io.h>
+#include <rte_net.h>
 #include <ethdev_pci.h>
 
 #include "otx_ep_common.h"
 #include "otx2_ep_vf.h"
 #include "otx_ep_rxtx.h"
 
+/* SDP_LENGTH_S specifies packet length and is of 8-byte size */
+#define INFO_SIZE 8
+#define DROQ_REFILL_THRESHOLD 16
+
 static void
 otx_ep_dmazone_free(const struct rte_memzone *mz)
 {
@@ -353,3 +359,283 @@ delete_OQ:
        otx_ep_delete_oqs(otx_ep, oq_no);
        return -ENOMEM;
 }
+
+static uint32_t
+otx_ep_droq_refill(struct otx_ep_droq *droq)
+{
+       struct otx_ep_droq_desc *desc_ring;
+       struct otx_ep_droq_info *info;
+       struct rte_mbuf *buf = NULL;
+       uint32_t desc_refilled = 0;
+
+       desc_ring = droq->desc_ring;
+
+       while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
+               /* If a valid buffer exists (happens if there is no dispatch),
+                * reuse the buffer, else allocate.
+                */
+               if (droq->recv_buf_list[droq->refill_idx] != NULL)
+                       break;
+
+               buf = rte_pktmbuf_alloc(droq->mpool);
+               /* If a buffer could not be allocated, no point in
+                * continuing
+                */
+               if (buf == NULL) {
+                       droq->stats.rx_alloc_failure++;
+                       break;
+               }
+               info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
+               memset(info, 0, sizeof(*info));
+
+               droq->recv_buf_list[droq->refill_idx] = buf;
+               desc_ring[droq->refill_idx].buffer_ptr =
+                                       rte_mbuf_data_iova_default(buf);
+
+
+               droq->refill_idx = otx_ep_incr_index(droq->refill_idx, 1,
+                               droq->nb_desc);
+
+               desc_refilled++;
+               droq->refill_count--;
+       }
+
+       return desc_refilled;
+}
+
+static struct rte_mbuf *
+otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
+                       struct otx_ep_droq *droq, int next_fetch)
+{
+       volatile struct otx_ep_droq_info *info;
+       struct rte_mbuf *droq_pkt2 = NULL;
+       struct rte_mbuf *droq_pkt = NULL;
+       struct rte_net_hdr_lens hdr_lens;
+       struct otx_ep_droq_info *info2;
+       uint64_t total_pkt_len;
+       uint32_t pkt_len = 0;
+       int next_idx;
+
+       droq_pkt  = droq->recv_buf_list[droq->read_idx];
+       droq_pkt2  = droq->recv_buf_list[droq->read_idx];
+       info = rte_pktmbuf_mtod(droq_pkt, struct otx_ep_droq_info *);
+       /* make sure info is available */
+       rte_rmb();
+       if (unlikely(!info->length)) {
+               int retry = OTX_EP_MAX_DELAYED_PKT_RETRIES;
+               /* otx_ep_dbg("OCTEON DROQ[%d]: read_idx: %d; Data not ready "
+                * "yet, Retry; pending=%lu\n", droq->q_no, droq->read_idx,
+                * droq->pkts_pending);
+                */
+               droq->stats.pkts_delayed_data++;
+               while (retry && !info->length)
+                       retry--;
+               if (!retry && !info->length) {
+                       otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
+                                  droq->q_no, droq->read_idx);
+                       /* May be zero length packet; drop it */
+                       rte_pktmbuf_free(droq_pkt);
+                       droq->recv_buf_list[droq->read_idx] = NULL;
+                       droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
+                                                          droq->nb_desc);
+                       droq->stats.dropped_zlp++;
+                       droq->refill_count++;
+                       goto oq_read_fail;
+               }
+       }
+       if (next_fetch) {
+               next_idx = otx_ep_incr_index(droq->read_idx, 1, droq->nb_desc);
+               droq_pkt2  = droq->recv_buf_list[next_idx];
+               info2 = rte_pktmbuf_mtod(droq_pkt2, struct otx_ep_droq_info *);
+               rte_prefetch_non_temporal((const void *)info2);
+       }
+
+       info->length = rte_bswap64(info->length);
+       /* Deduce the actual data size */
+       total_pkt_len = info->length + INFO_SIZE;
+       if (total_pkt_len <= droq->buffer_size) {
+               info->length -=  OTX_EP_RH_SIZE;
+               droq_pkt  = droq->recv_buf_list[droq->read_idx];
+               if (likely(droq_pkt != NULL)) {
+                       droq_pkt->data_off += OTX_EP_DROQ_INFO_SIZE;
+                       /* otx_ep_dbg("OQ: pkt_len[%ld], buffer_size %d\n",
+                        * (long)info->length, droq->buffer_size);
+                        */
+                       pkt_len = (uint32_t)info->length;
+                       droq_pkt->pkt_len  = pkt_len;
+                       droq_pkt->data_len  = pkt_len;
+                       droq_pkt->port = otx_ep->port_id;
+                       droq->recv_buf_list[droq->read_idx] = NULL;
+                       droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
+                                                          droq->nb_desc);
+                       droq->refill_count++;
+               }
+       } else {
+               struct rte_mbuf *first_buf = NULL;
+               struct rte_mbuf *last_buf = NULL;
+
+               while (pkt_len < total_pkt_len) {
+                       int cpy_len = 0;
+
+                       cpy_len = ((pkt_len + droq->buffer_size) >
+                                       total_pkt_len)
+                                       ? ((uint32_t)total_pkt_len -
+                                               pkt_len)
+                                       : droq->buffer_size;
+
+                       droq_pkt = droq->recv_buf_list[droq->read_idx];
+                       droq->recv_buf_list[droq->read_idx] = NULL;
+
+                       if (likely(droq_pkt != NULL)) {
+                               /* Note the first seg */
+                               if (!pkt_len)
+                                       first_buf = droq_pkt;
+
+                               droq_pkt->port = otx_ep->port_id;
+                               if (!pkt_len) {
+                                       droq_pkt->data_off +=
+                                               OTX_EP_DROQ_INFO_SIZE;
+                                       droq_pkt->pkt_len =
+                                               cpy_len - OTX_EP_DROQ_INFO_SIZE;
+                                       droq_pkt->data_len =
+                                               cpy_len - OTX_EP_DROQ_INFO_SIZE;
+                               } else {
+                                       droq_pkt->pkt_len = cpy_len;
+                                       droq_pkt->data_len = cpy_len;
+                               }
+
+                               if (pkt_len) {
+                                       first_buf->nb_segs++;
+                                       first_buf->pkt_len += droq_pkt->pkt_len;
+                               }
+
+                               if (last_buf)
+                                       last_buf->next = droq_pkt;
+
+                               last_buf = droq_pkt;
+                       } else {
+                               otx_ep_err("no buf\n");
+                       }
+
+                       pkt_len += cpy_len;
+                       droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
+                                                          droq->nb_desc);
+                       droq->refill_count++;
+               }
+               droq_pkt = first_buf;
+       }
+       droq_pkt->packet_type = rte_net_get_ptype(droq_pkt, &hdr_lens,
+                                       RTE_PTYPE_ALL_MASK);
+       droq_pkt->l2_len = hdr_lens.l2_len;
+       droq_pkt->l3_len = hdr_lens.l3_len;
+       droq_pkt->l4_len = hdr_lens.l4_len;
+
+       if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
+           !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
+               rte_pktmbuf_free(droq_pkt);
+               goto oq_read_fail;
+       }
+
+       if (droq_pkt->nb_segs > 1 &&
+           !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
+               rte_pktmbuf_free(droq_pkt);
+               goto oq_read_fail;
+       }
+
+       return droq_pkt;
+
+oq_read_fail:
+       return NULL;
+}
+
+static inline uint32_t
+otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
+{
+       volatile uint64_t pkt_count;
+       uint32_t new_pkts;
+
+       /* Latest available OQ packets */
+       pkt_count = rte_read32(droq->pkts_sent_reg);
+       rte_write32(pkt_count, droq->pkts_sent_reg);
+       new_pkts = pkt_count;
+       droq->pkts_pending += new_pkts;
+       return new_pkts;
+}
+
+/* Check for response arrival from OCTEON TX2
+ * returns number of requests completed
+ */
+uint16_t
+otx_ep_recv_pkts(void *rx_queue,
+                 struct rte_mbuf **rx_pkts,
+                 uint16_t budget)
+{
+       struct otx_ep_droq *droq = rx_queue;
+       struct otx_ep_device *otx_ep;
+       struct rte_mbuf *oq_pkt;
+
+       uint32_t pkts = 0;
+       uint32_t new_pkts = 0;
+       int next_fetch;
+
+       otx_ep = droq->otx_ep_dev;
+
+       if (droq->pkts_pending > budget) {
+               new_pkts = budget;
+       } else {
+               new_pkts = droq->pkts_pending;
+               new_pkts += otx_ep_check_droq_pkts(droq);
+               if (new_pkts > budget)
+                       new_pkts = budget;
+       }
+
+       if (!new_pkts)
+               goto update_credit; /* No pkts at this moment */
+
+       for (pkts = 0; pkts < new_pkts; pkts++) {
+               /* Push the received pkt to application */
+               next_fetch = (pkts == new_pkts - 1) ? 0 : 1;
+               oq_pkt = otx_ep_droq_read_packet(otx_ep, droq, next_fetch);
+               if (!oq_pkt) {
+                       RTE_LOG_DP(ERR, PMD,
+                                  "DROQ read pkt failed pending %" PRIu64
+                                   "last_pkt_count %" PRIu64 "new_pkts %d.\n",
+                                  droq->pkts_pending, droq->last_pkt_count,
+                                  new_pkts);
+                       droq->pkts_pending -= pkts;
+                       droq->stats.rx_err++;
+                       goto finish;
+               }
+               rx_pkts[pkts] = oq_pkt;
+               /* Stats */
+               droq->stats.pkts_received++;
+               droq->stats.bytes_received += oq_pkt->pkt_len;
+       }
+       droq->pkts_pending -= pkts;
+
+       /* Refill DROQ buffers */
+update_credit:
+       if (droq->refill_count >= DROQ_REFILL_THRESHOLD) {
+               int desc_refilled = otx_ep_droq_refill(droq);
+
+               /* Flush the droq descriptor data to memory to be sure
+                * that when we update the credits the data in memory is
+                * accurate.
+                */
+               rte_wmb();
+               rte_write32(desc_refilled, droq->pkts_credit_reg);
+       } else {
+               /*
+                * SDP output goes into DROP state when output doorbell count
+                * goes below drop count. When door bell count is written with
+                * a value greater than drop count SDP output should come out
+                * of DROP state. Due to a race condition this is not happening.
+                * Writing doorbell register with 0 again may make SDP output
+                * come out of this state.
+                */
+
+               rte_write32(0, droq->pkts_credit_reg);
+       }
+finish:
+       return pkts;
+}
index 9779e96..d8b4114 100644 (file)
@@ -7,4 +7,14 @@
 
 #define OTX_EP_RXD_ALIGN 1
 #define OTX_EP_TXD_ALIGN 1
-#endif
+#define OTX_EP_MAX_DELAYED_PKT_RETRIES 10000
+static inline uint32_t
+otx_ep_incr_index(uint32_t index, uint32_t count, uint32_t max)
+{
+       return ((index + count) & (max - 1));
+}
+uint16_t
+otx_ep_recv_pkts(void *rx_queue,
+                 struct rte_mbuf **rx_pkts,
+                 uint16_t budget);
+#endif /* _OTX_EP_RXTX_H_ */