net/pfe: add queue setup and release
authorGagandeep Singh <g.singh@nxp.com>
Thu, 10 Oct 2019 06:32:28 +0000 (12:02 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 23 Oct 2019 14:43:08 +0000 (16:43 +0200)
This patch add RX/TX queue setup operations
and supported checksum offloads.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
doc/guides/nics/features/pfe.ini
doc/guides/nics/pfe.rst
drivers/net/pfe/pfe_ethdev.c
drivers/net/pfe/pfe_hif.c
drivers/net/pfe/pfe_hif.h
drivers/net/pfe/pfe_hif_lib.c

index dc78e95..3b96a1a 100644 (file)
@@ -4,6 +4,8 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 Linux VFIO           = Y
 ARMv8                = Y
 Usage doc            = Y
index 2e713c3..23aa15b 100644 (file)
@@ -93,6 +93,7 @@ the kernel layer for link status.
 PFE Features
 ~~~~~~~~~~~~
 
+- L3/L4 checksum offload
 - ARMv8
 
 Supported PFE SoCs
index 08f3716..d59ccde 100644 (file)
@@ -17,6 +17,17 @@ struct pfe_vdev_init_params {
        int8_t  gem_id;
 };
 static struct pfe *g_pfe;
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+               DEV_RX_OFFLOAD_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_UDP_CKSUM |
+               DEV_RX_OFFLOAD_TCP_CKSUM;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+               DEV_TX_OFFLOAD_IPV4_CKSUM |
+               DEV_TX_OFFLOAD_UDP_CKSUM |
+               DEV_TX_OFFLOAD_TCP_CKSUM;
 
 /* TODO: make pfe_svr a runtime option.
  * Driver should be able to get the SVR
@@ -284,6 +295,8 @@ pfe_eth_info(struct rte_eth_dev *dev,
        dev_info->max_rx_queues = dev->data->nb_rx_queues;
        dev_info->max_tx_queues = dev->data->nb_tx_queues;
        dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
+       dev_info->rx_offload_capa = dev_rx_offloads_sup;
+       dev_info->tx_offload_capa = dev_tx_offloads_sup;
        if (pfe_svr == SVR_LS1012A_REV1)
                dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
        else
@@ -292,12 +305,92 @@ pfe_eth_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* Only first mb_pool given on first call of this API will be used
+ * in whole system, also nb_rx_desc and rx_conf are unused params
+ */
+static int
+pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+               __rte_unused uint16_t nb_rx_desc,
+               __rte_unused unsigned int socket_id,
+               __rte_unused const struct rte_eth_rxconf *rx_conf,
+               struct rte_mempool *mb_pool)
+{
+       int rc = 0;
+       struct pfe *pfe;
+       struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+       pfe = priv->pfe;
+
+       if (queue_idx >= EMAC_RXQ_CNT) {
+               PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+                               queue_idx, EMAC_RXQ_CNT);
+               return -1;
+       }
+
+       if (!pfe->hif.setuped) {
+               rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
+               if (rc) {
+                       PFE_PMD_ERR("Could not allocate buffer descriptors");
+                       return -1;
+               }
+
+               pfe->hif.shm->pool = mb_pool;
+               if (pfe_hif_init_buffers(&pfe->hif)) {
+                       PFE_PMD_ERR("Could not initialize buffer descriptors");
+                       return -1;
+               }
+               hif_init();
+               hif_rx_enable();
+               hif_tx_enable();
+               pfe->hif.setuped = 1;
+       }
+       dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
+       priv->client.rx_q[queue_idx].queue_id = queue_idx;
+
+       return 0;
+}
+
+static void
+pfe_rx_queue_release(void *q __rte_unused)
+{
+       PMD_INIT_FUNC_TRACE();
+}
+
+static void
+pfe_tx_queue_release(void *q __rte_unused)
+{
+       PMD_INIT_FUNC_TRACE();
+}
+
+static int
+pfe_tx_queue_setup(struct rte_eth_dev *dev,
+                  uint16_t queue_idx,
+                  __rte_unused uint16_t nb_desc,
+                  __rte_unused unsigned int socket_id,
+                  __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+       struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+       if (queue_idx >= emac_txq_cnt) {
+               PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+                               queue_idx, emac_txq_cnt);
+               return -1;
+       }
+       dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
+       priv->client.tx_q[queue_idx].queue_id = queue_idx;
+       return 0;
+}
+
 static const struct eth_dev_ops ops = {
        .dev_start = pfe_eth_open,
        .dev_stop = pfe_eth_stop,
        .dev_close = pfe_eth_close,
        .dev_configure = pfe_eth_configure,
        .dev_infos_get = pfe_eth_info,
+       .rx_queue_setup = pfe_rx_queue_setup,
+       .rx_queue_release  = pfe_rx_queue_release,
+       .tx_queue_setup = pfe_tx_queue_setup,
+       .tx_queue_release  = pfe_tx_queue_release,
 };
 
 static int
index 39a6ec8..bd5bf7a 100644 (file)
@@ -43,6 +43,121 @@ pfe_hif_free_descr(struct pfe_hif *hif)
        rte_free(hif->descr_baseaddr_v);
 }
 
+/*
+ * pfe_hif_init_buffers
+ * This function initializes the HIF Rx/Tx ring descriptors and
+ * initialize Rx queue with buffers.
+ */
+int
+pfe_hif_init_buffers(struct pfe_hif *hif)
+{
+       struct hif_desc *desc, *first_desc_p;
+       uint32_t i = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Check enough Rx buffers available in the shared memory */
+       if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
+               return -ENOMEM;
+
+       hif->rx_base = hif->descr_baseaddr_v;
+       memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
+
+       /*Initialize Rx descriptors */
+       desc = hif->rx_base;
+       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
+
+       for (i = 0; i < hif->rx_ring_size; i++) {
+               /* Initialize Rx buffers from the shared memory */
+               struct rte_mbuf *mbuf =
+                       (struct rte_mbuf *)hif->shm->rx_buf_pool[i];
+
+               /* PFE mbuf structure is as follow:
+                * ----------------------------------------------------------+
+                * | mbuf  | priv | headroom (annotation + PFE data) | data  |
+                * ----------------------------------------------------------+
+                *
+                * As we are expecting additional information like parse
+                * results, eth id, queue id from PFE block along with data.
+                * so we have to provide additional memory for each packet to
+                * HIF rx rings so that PFE block can write its headers.
+                * so, we are giving the data pointor to HIF rings whose
+                * calculation is as below:
+                * mbuf->data_pointor - Required_header_size
+                *
+                * We are utilizing the HEADROOM area to receive the PFE
+                * block headers. On packet reception, HIF driver will use
+                * PFE headers information based on which it will decide
+                * the clients and fill the parse results.
+                * after that application can use/overwrite the HEADROOM area.
+                */
+               hif->rx_buf_vaddr[i] =
+                       (void *)((size_t)mbuf->buf_addr + mbuf->data_off -
+                                       PFE_PKT_HEADER_SZ);
+               hif->rx_buf_addr[i] =
+                       (void *)(size_t)(rte_pktmbuf_iova(mbuf) -
+                                       PFE_PKT_HEADER_SZ);
+               hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+
+               hif->shm->rx_buf_pool[i] = NULL;
+
+               writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
+                                       &desc->data);
+               writel(0, &desc->status);
+
+               /*
+                * Ensure everything else is written to DDR before
+                * writing bd->ctrl
+                */
+               rte_wmb();
+
+               writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
+                       | BD_CTRL_DIR | BD_CTRL_DESC_EN
+                       | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
+
+               /* Chain descriptors */
+               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+               desc++;
+       }
+
+       /* Overwrite last descriptor to chain it to first one*/
+       desc--;
+       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+
+       hif->rxtoclean_index = 0;
+
+       /*Initialize Rx buffer descriptor ring base address */
+       writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
+
+       hif->tx_base = hif->rx_base + hif->rx_ring_size;
+       first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
+                               hif->rx_ring_size;
+       memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
+
+       /*Initialize tx descriptors */
+       desc = hif->tx_base;
+
+       for (i = 0; i < hif->tx_ring_size; i++) {
+               /* Chain descriptors */
+               writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+               writel(0, &desc->ctrl);
+               desc++;
+       }
+
+       /* Overwrite last descriptor to chain it to first one */
+       desc--;
+       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+       hif->txavail = hif->tx_ring_size;
+       hif->txtosend = 0;
+       hif->txtoclean = 0;
+       hif->txtoflush = 0;
+
+       /*Initialize Tx buffer descriptor ring base address */
+       writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
+
+       return 0;
+}
+
 /*
  * pfe_hif_client_register
  *
index 9c4e577..d8bb26b 100644 (file)
@@ -143,5 +143,6 @@ void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
 int pfe_hif_init(struct pfe *pfe);
 void pfe_hif_exit(struct pfe *pfe);
 void pfe_hif_rx_idle(struct pfe_hif *hif);
+int pfe_hif_init_buffers(struct pfe_hif *hif);
 
 #endif /* _PFE_HIF_H_ */
index 2012d89..f5e290f 100644 (file)
@@ -15,6 +15,56 @@ unsigned int emac_txq_cnt;
 /*HIF shared memory Global variable */
 struct hif_shm ghif_shm;
 
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
+ * This function should be called after pfe_hif_exit
+ *
+ * @param[in] hif_shm          Shared memory address location in DDR
+ */
+void
+pfe_hif_shm_clean(struct hif_shm *hif_shm)
+{
+       unsigned int i;
+       void *pkt;
+
+       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+               pkt = hif_shm->rx_buf_pool[i];
+               if (pkt)
+                       rte_pktmbuf_free((struct rte_mbuf *)pkt);
+       }
+}
+
+/* Initialize shared memory used between HIF driver and clients,
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
+ * This function should be called before initializing HIF driver.
+ *
+ * @param[in] hif_shm          Shared memory address location in DDR
+ * @rerurn                     0 - on succes, <0 on fail to initialize
+ */
+int
+pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
+{
+       unsigned int i;
+       struct rte_mbuf *mbuf;
+
+       memset(hif_shm, 0, sizeof(struct hif_shm));
+       hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
+
+       for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+               mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
+               if (mbuf)
+                       hif_shm->rx_buf_pool[i] = mbuf;
+               else
+                       goto err0;
+       }
+
+       return 0;
+
+err0:
+       PFE_PMD_ERR("Low memory");
+       pfe_hif_shm_clean(hif_shm);
+       return -ENOMEM;
+}
+
 /*This function sends indication to HIF driver
  *
  * @param[in] hif      hif context