net/txgbe: add L2 tunnel filter init and uninit
[dpdk.git] / drivers / net / txgbe / txgbe_rxtx.c
index 5fadbb1..82dde50 100644 (file)
 #include "txgbe_ethdev.h"
 #include "txgbe_rxtx.h"
 
+#ifdef RTE_LIBRTE_IEEE1588
+#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define TXGBE_TX_IEEE1588_TMST 0
+#endif
+
 /* Bit Mask to indicate what bits required for building TX context */
 static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
                PKT_TX_OUTER_IPV6 |
@@ -50,7 +56,8 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
                PKT_TX_L4_MASK |
                PKT_TX_TCP_SEG |
                PKT_TX_TUNNEL_MASK |
-               PKT_TX_OUTER_IP_CKSUM);
+               PKT_TX_OUTER_IP_CKSUM |
+               TXGBE_TX_IEEE1588_TMST);
 
 #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
                (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
@@ -851,6 +858,11 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 */
                cmd_type_len = TXGBE_TXD_FCS;
 
+#ifdef RTE_LIBRTE_IEEE1588
+               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                       cmd_type_len |= TXGBE_TXD_1588;
+#endif
+
                olinfo_status = 0;
                if (tx_ol_req) {
                        if (ol_flags & PKT_TX_TCP_SEG) {
@@ -1028,8 +1040,20 @@ txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
                PKT_RX_RSS_HASH, 0, 0, 0,
                0, 0, 0,  PKT_RX_FDIR,
        };
-
+#ifdef RTE_LIBRTE_IEEE1588
+       static uint64_t ip_pkt_etqf_map[8] = {
+               0, 0, 0, PKT_RX_IEEE1588_PTP,
+               0, 0, 0, 0,
+       };
+       int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
+       if (likely(-1 != etfid))
+               return ip_pkt_etqf_map[etfid] |
+                      ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+       else
+               return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#else
        return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#endif
 }
 
 static inline uint64_t
@@ -1046,6 +1070,10 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
                     vlan_flags & PKT_RX_VLAN_STRIPPED)
                    ? vlan_flags : 0;
 
+#ifdef RTE_LIBRTE_IEEE1588
+       if (rx_status & TXGBE_RXD_STAT_1588)
+               pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
        return pkt_flags;
 }
 
@@ -1117,7 +1145,7 @@ txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
                for (j = 0; j < LOOK_AHEAD; j++)
                        s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
 
-               rte_smp_rmb();
+               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
                /* Compute how many status bits were set */
                for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
@@ -1916,6 +1944,95 @@ txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
        }
 }
 
+static int
+txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
+{
+       struct txgbe_tx_entry *swr_ring = txq->sw_ring;
+       uint16_t i, tx_last, tx_id;
+       uint16_t nb_tx_free_last;
+       uint16_t nb_tx_to_clean;
+       uint32_t pkt_cnt;
+
+       /* Start free mbuf from the next of tx_tail */
+       tx_last = txq->tx_tail;
+       tx_id  = swr_ring[tx_last].next_id;
+
+       if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
+               return 0;
+
+       nb_tx_to_clean = txq->nb_tx_free;
+       nb_tx_free_last = txq->nb_tx_free;
+       if (!free_cnt)
+               free_cnt = txq->nb_tx_desc;
+
+       /* Loop through swr_ring to count the amount of
+        * freeable mubfs and packets.
+        */
+       for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+               for (i = 0; i < nb_tx_to_clean &&
+                       pkt_cnt < free_cnt &&
+                       tx_id != tx_last; i++) {
+                       if (swr_ring[tx_id].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+                               swr_ring[tx_id].mbuf = NULL;
+
+                               /*
+                                * last segment in the packet,
+                                * increment packet count
+                                */
+                               pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+                       }
+
+                       tx_id = swr_ring[tx_id].next_id;
+               }
+
+               if (pkt_cnt < free_cnt) {
+                       if (txgbe_xmit_cleanup(txq))
+                               break;
+
+                       nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+                       nb_tx_free_last = txq->nb_tx_free;
+               }
+       }
+
+       return (int)pkt_cnt;
+}
+
+static int
+txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
+                       uint32_t free_cnt)
+{
+       int i, n, cnt;
+
+       if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+               free_cnt = txq->nb_tx_desc;
+
+       cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+       for (i = 0; i < cnt; i += n) {
+               if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+                       break;
+
+               n = txgbe_tx_free_bufs(txq);
+
+               if (n == 0)
+                       break;
+       }
+
+       return i;
+}
+
+int
+txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+       struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+       if (txq->offloads == 0 &&
+               txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
+               return txgbe_tx_done_cleanup_simple(txq, free_cnt);
+
+       return txgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
 static void __rte_cold
 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
 {
@@ -2501,6 +2618,79 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
+uint32_t
+txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define TXGBE_RXQ_SCAN_INTERVAL 4
+       volatile struct txgbe_rx_desc *rxdp;
+       struct txgbe_rx_queue *rxq;
+       uint32_t desc = 0;
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+       rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+       while ((desc < rxq->nb_rx_desc) &&
+               (rxdp->qw1.lo.status &
+                       rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
+               desc += TXGBE_RXQ_SCAN_INTERVAL;
+               rxdp += TXGBE_RXQ_SCAN_INTERVAL;
+               if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+                       rxdp = &(rxq->rx_ring[rxq->rx_tail +
+                               desc - rxq->nb_rx_desc]);
+       }
+
+       return desc;
+}
+
+int
+txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct txgbe_rx_queue *rxq = rx_queue;
+       volatile uint32_t *status;
+       uint32_t nb_hold, desc;
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+       nb_hold = rxq->nb_rx_hold;
+       if (offset >= rxq->nb_rx_desc - nb_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       status = &rxq->rx_ring[desc].qw1.lo.status;
+       if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct txgbe_tx_queue *txq = tx_queue;
+       volatile uint32_t *status;
+       uint32_t desc;
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       desc = txq->tx_tail + offset;
+       if (desc >= txq->nb_tx_desc) {
+               desc -= txq->nb_tx_desc;
+               if (desc >= txq->nb_tx_desc)
+                       desc -= txq->nb_tx_desc;
+       }
+
+       status = &txq->tx_ring[desc].dw3;
+       if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 void __rte_cold
 txgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
@@ -3125,7 +3315,7 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                        struct txgbe_dcb_config *dcb_config)
 {
        int     ret = 0;
-       uint8_t i, nb_tcs;
+       uint8_t i, pfc_en, nb_tcs;
        uint16_t pbsize, rx_buffer_size;
        uint8_t config_dcb_rx = 0;
        uint8_t config_dcb_tx = 0;
@@ -3299,6 +3489,26 @@ txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        /* Configure queue statistics registers */
        txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
 
+       /* Check if the PFC is supported */
+       if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+               pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+               for (i = 0; i < nb_tcs; i++) {
+                       /* If the TC count is 8,
+                        * and the default high_water is 48,
+                        * the low_water is 16 as default.
+                        */
+                       hw->fc.high_water[i] = (pbsize * 3) / 4;
+                       hw->fc.low_water[i] = pbsize / 4;
+                       /* Enable pfc for this TC */
+                       tc = &dcb_config->tc_config[i];
+                       tc->pfc = txgbe_dcb_pfc_enabled;
+               }
+               txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+               if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+                       pfc_en &= 0x0F;
+               ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+       }
+
        return ret;
 }