ethdev: add namespace
[dpdk.git] / drivers / net / axgbe / axgbe_rxtx.c
index eae830f..aa2c27e 100644 (file)
@@ -10,6 +10,7 @@
 #include <rte_time.h>
 #include <rte_mempool.h>
 #include <rte_mbuf.h>
+#include <rte_vect.h>
 
 static void
 axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
@@ -30,9 +31,9 @@ axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
        }
 }
 
-void axgbe_dev_rx_queue_release(void *rxq)
+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       axgbe_rx_queue_release(rxq);
+       axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
 }
 
 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -74,8 +75,8 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                (DMA_CH_INC * rxq->queue_id));
        rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
                                                  DMA_CH_RDTR_LO);
-       if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
-               rxq->crc_len = ETHER_CRC_LEN;
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               rxq->crc_len = RTE_ETHER_CRC_LEN;
        else
                rxq->crc_len = 0;
 
@@ -95,7 +96,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                axgbe_rx_queue_release(rxq);
                return -ENOMEM;
        }
-       rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+       rxq->ring_phys_addr = (uint64_t)dma->iova;
        rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
        memset((void *)rxq->desc, 0, size);
        /* Allocate software ring */
@@ -208,9 +209,10 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        volatile union axgbe_rx_desc *desc;
        uint64_t old_dirty = rxq->dirty;
        struct rte_mbuf *mbuf, *tmbuf;
-       unsigned int err;
+       unsigned int err, etlt;
        uint32_t error_status;
        uint16_t idx, pidx, pkt_len;
+       uint64_t offloads;
 
        idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
        while (nb_rx < nb_pkts) {
@@ -229,6 +231,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                    (unsigned int)rxq->queue_id);
                        rte_eth_devices[
                                rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       rxq->rx_mbuf_alloc_failed++;
                        break;
                }
                pidx = idx + 1;
@@ -274,6 +277,30 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                /* Get the RSS hash */
                if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
                        mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+               etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                               RX_NORMAL_DESC3, ETLT);
+               offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
+               if (!err || !etlt) {
+                       if (etlt == RX_CVLAN_TAG_PRESENT) {
+                               mbuf->ol_flags |= PKT_RX_VLAN;
+                               mbuf->vlan_tci =
+                                       AXGMAC_GET_BITS_LE(desc->write.desc0,
+                                                       RX_NORMAL_DESC0, OVT);
+                               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                                       mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
+                               else
+                                       mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+                               } else {
+                                       mbuf->ol_flags &=
+                                               ~(PKT_RX_VLAN
+                                                       | PKT_RX_VLAN_STRIPPED);
+                                       mbuf->vlan_tci = 0;
+                               }
+               }
+               /* Indicate if a Context Descriptor is next */
+               if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
+                       mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
+                                       | PKT_RX_IEEE1588_TMST;
                pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
                                             PL) - rxq->crc_len;
                /* Mbuf populate */
@@ -306,6 +333,171 @@ err_set:
        return nb_rx;
 }
 
+
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       PMD_INIT_FUNC_TRACE();
+       uint16_t nb_rx = 0;
+       struct axgbe_rx_queue *rxq = rx_queue;
+       volatile union axgbe_rx_desc *desc;
+
+       uint64_t old_dirty = rxq->dirty;
+       struct rte_mbuf *first_seg = NULL;
+       struct rte_mbuf *mbuf, *tmbuf;
+       unsigned int err, etlt;
+       uint32_t error_status;
+       uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+       uint64_t offloads;
+
+       idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+       while (nb_rx < nb_pkts) {
+               bool eop = 0;
+next_desc:
+               if (unlikely(idx == rxq->nb_desc))
+                       idx = 0;
+
+               desc = &rxq->desc[idx];
+
+               if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+                       break;
+
+               tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+               if (unlikely(!tmbuf)) {
+                       PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+                                   " queue_id = %u\n",
+                                   (unsigned int)rxq->port_id,
+                                   (unsigned int)rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               pidx = idx + 1;
+               if (unlikely(pidx == rxq->nb_desc))
+                       pidx = 0;
+
+               rte_prefetch0(rxq->sw_ring[pidx]);
+               if ((pidx & 0x3) == 0) {
+                       rte_prefetch0(&rxq->desc[pidx]);
+                       rte_prefetch0(&rxq->sw_ring[pidx]);
+               }
+
+               mbuf = rxq->sw_ring[idx];
+               /* Check for any errors and free mbuf*/
+               err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                        RX_NORMAL_DESC3, ES);
+               error_status = 0;
+               if (unlikely(err)) {
+                       error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+                       if ((error_status != AXGBE_L3_CSUM_ERR)
+                                       && (error_status != AXGBE_L4_CSUM_ERR)) {
+                               rxq->errors++;
+                               rte_pktmbuf_free(mbuf);
+                               goto err_set;
+                       }
+               }
+               rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+               if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                       RX_NORMAL_DESC3, LD)) {
+                       eop = 0;
+                       pkt_len = rxq->buf_size;
+                       data_len = pkt_len;
+               } else {
+                       eop = 1;
+                       pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                       RX_NORMAL_DESC3, PL);
+                       data_len = pkt_len - rxq->crc_len;
+               }
+
+               if (first_seg != NULL) {
+                       if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+                               rte_mempool_put(rxq->mb_pool,
+                                               first_seg);
+               } else {
+                       first_seg = mbuf;
+               }
+
+               /* Get the RSS hash */
+               if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+                       mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+               etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                               RX_NORMAL_DESC3, ETLT);
+               offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
+               if (!err || !etlt) {
+                       if (etlt == RX_CVLAN_TAG_PRESENT) {
+                               mbuf->ol_flags |= PKT_RX_VLAN;
+                               mbuf->vlan_tci =
+                                       AXGMAC_GET_BITS_LE(desc->write.desc0,
+                                                       RX_NORMAL_DESC0, OVT);
+                               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                                       mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
+                               else
+                                       mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+                       } else {
+                               mbuf->ol_flags &=
+                                       ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+                               mbuf->vlan_tci = 0;
+                       }
+               }
+               /* Mbuf populate */
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->data_len = data_len;
+
+err_set:
+               rxq->cur++;
+               rxq->sw_ring[idx++] = tmbuf;
+               desc->read.baddr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+               memset((void *)(&desc->read.desc2), 0, 8);
+               AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+               rxq->dirty++;
+
+               if (!eop) {
+                       rte_pktmbuf_free(mbuf);
+                       goto next_desc;
+               }
+
+               first_seg->pkt_len = pkt_len;
+               rxq->bytes += pkt_len;
+               mbuf->next = NULL;
+
+               first_seg->port = rxq->port_id;
+               if (rxq->pdata->rx_csum_enable) {
+                       mbuf->ol_flags = 0;
+                       mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+                               mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                               mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+                       } else if (unlikely(error_status
+                                               == AXGBE_L4_CSUM_ERR)) {
+                               mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                       }
+               }
+
+               rx_pkts[nb_rx++] = first_seg;
+
+                /* Setup receipt context for a new packet.*/
+               first_seg = NULL;
+       }
+
+       /* Save receive context.*/
+       rxq->pkts += nb_rx;
+
+       if (rxq->dirty != old_dirty) {
+               rte_wmb();
+               idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+               AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+                                  low32_value(rxq->ring_phys_addr +
+                                  (idx * sizeof(union axgbe_rx_desc))));
+       }
+       return nb_rx;
+}
+
 /* Tx Apis */
 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
 {
@@ -325,9 +517,9 @@ static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
        }
 }
 
-void axgbe_dev_tx_queue_release(void *txq)
+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       axgbe_tx_queue_release(txq);
+       axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
 }
 
 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -340,9 +532,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        struct axgbe_tx_queue *txq;
        unsigned int tsize;
        const struct rte_memzone *tz;
+       uint64_t offloads;
 
        tx_desc = nb_desc;
-       pdata = (struct axgbe_port *)dev->data->dev_private;
+       pdata = dev->data->dev_private;
 
        /*
         * validate tx descriptors count
@@ -359,7 +552,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (!txq)
                return -ENOMEM;
        txq->pdata = pdata;
-
+       offloads = tx_conf->offloads |
+               txq->pdata->eth_dev->data->dev_conf.txmode.offloads;
        txq->nb_desc = tx_desc;
        txq->free_thresh = tx_conf->tx_free_thresh ?
                tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
@@ -371,10 +565,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (txq->nb_desc % txq->free_thresh != 0)
                txq->vector_disable = 1;
 
-       if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
-           ETH_TXQ_FLAGS_NOOFFLOADS) {
+       if (offloads != 0)
                txq->vector_disable = 1;
-       }
 
        /* Allocate TX ring hardware descriptors */
        tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
@@ -385,7 +577,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -ENOMEM;
        }
        memset(tz->addr, 0, tsize);
-       txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+       txq->ring_phys_addr = (uint64_t)tz->iova;
        txq->desc = tz->addr;
        txq->queue_id = queue_idx;
        txq->port_id = dev->data->port_id;
@@ -408,7 +600,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (!pdata->tx_queues)
                pdata->tx_queues = dev->data->tx_queues;
 
-       if (txq->vector_disable)
+       if (txq->vector_disable ||
+                       rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
                dev->tx_pkt_burst = &axgbe_xmit_pkts;
        else
 #ifdef RTE_ARCH_X86
@@ -420,6 +613,30 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        return 0;
 }
 
+int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
+               char *fw_version, size_t fw_size)
+{
+       struct axgbe_port *pdata;
+       struct axgbe_hw_features *hw_feat;
+       int ret;
+
+       pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+       hw_feat = &pdata->hw_feat;
+
+       ret = snprintf(fw_version, fw_size, "%d.%d.%d",
+                       AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+                       AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+                       AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
+       if (ret < 0)
+               return -EINVAL;
+
+       ret += 1; /* add the size of '\0' */
+       if (fw_size < (size_t)ret)
+               return ret;
+       else
+               return 0;
+}
+
 static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
                                      unsigned int queue)
 {
@@ -577,6 +794,10 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
        /* Total msg length to transmit */
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
                           mbuf->pkt_len);
+       /* Timestamp enablement check */
+       if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+       rte_wmb();
        /* Mark it as First and Last Descriptor */
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
@@ -590,10 +811,28 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
        rte_wmb();
 
+       if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+               /* Mark it as a CONTEXT descriptor */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                 CTXT, 1);
+               /* Set the VLAN tag */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                 VT, mbuf->vlan_tci);
+               /* Indicate this descriptor contains the VLAN tag */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                         VLTV, 1);
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+                               TX_NORMAL_DESC2_VLAN_INSERT);
+       } else {
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+       }
+       rte_wmb();
+
        /* Set OWN bit */
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
        rte_wmb();
 
+
        /* Save mbuf */
        txq->sw_ring[idx] = mbuf;
        /* Update current index*/
@@ -674,3 +913,49 @@ void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
                }
        }
 }
+
+int
+axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct axgbe_rx_queue *rxq = rx_queue;
+       volatile union axgbe_rx_desc *desc;
+       uint16_t idx;
+
+
+       if (unlikely(offset >= rxq->nb_desc))
+               return -EINVAL;
+
+       if (offset >= rxq->nb_desc - rxq->dirty)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+       desc = &rxq->desc[idx + offset];
+
+       if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct axgbe_tx_queue *txq = tx_queue;
+       volatile struct axgbe_tx_desc *desc;
+       uint16_t idx;
+
+
+       if (unlikely(offset >= txq->nb_desc))
+               return -EINVAL;
+
+       if (offset >= txq->nb_desc - txq->dirty)
+               return RTE_ETH_TX_DESC_UNAVAIL;
+
+       idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1);
+       desc = &txq->desc[idx + offset];
+
+       if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}