net/mlx5: check VLAN push/pop support
[dpdk.git] / drivers / net / txgbe / txgbe_rxtx.c
index 7117dbb..1a26128 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2020
+ * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <sys/queue.h>
@@ -58,6 +59,7 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
                PKT_TX_TCP_SEG |
                PKT_TX_TUNNEL_MASK |
                PKT_TX_OUTER_IP_CKSUM |
+               PKT_TX_OUTER_UDP_CKSUM |
 #ifdef RTE_LIB_SECURITY
                PKT_TX_SEC_OFFLOAD |
 #endif
@@ -389,6 +391,7 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
                        /* for non UDP / GRE tunneling, set to 0b */
                        break;
                case PKT_TX_TUNNEL_VXLAN:
+               case PKT_TX_TUNNEL_VXLAN_GPE:
                case PKT_TX_TUNNEL_GENEVE:
                        tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
                        break;
@@ -580,7 +583,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
                ptype |= RTE_PTYPE_L2_ETHER |
                         RTE_PTYPE_L3_IPV4 |
                         RTE_PTYPE_TUNNEL_VXLAN_GPE;
-               ptype |= RTE_PTYPE_INNER_L2_ETHER;
                break;
        case PKT_TX_TUNNEL_IPIP:
        case PKT_TX_TUNNEL_IP:
@@ -1134,7 +1136,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 
        if (rx_status & TXGBE_RXD_STAT_EIPCS &&
            rx_status & TXGBE_RXD_ERR_EIPCS) {
-               pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+               pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
        }
 
 #ifdef RTE_LIB_SECURITY
@@ -2220,7 +2222,8 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 
        tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
 
-       tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+       tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                          DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 #ifdef RTE_LIB_SECURITY
        if (dev->security_ctx)
@@ -4938,6 +4941,63 @@ txgbevf_dev_tx_init(struct rte_eth_dev *dev)
        }
 }
 
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void __rte_cold
+txgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw     *hw;
+       struct txgbe_tx_queue *txq;
+       struct txgbe_rx_queue *rxq;
+       uint32_t txdctl;
+       uint32_t rxdctl;
+       uint16_t i;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = TXGBE_DEV_HW(dev);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               /* Setup Transmit Threshold Registers */
+               wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
+                     TXGBE_TXCFG_HTHRESH_MASK |
+                     TXGBE_TXCFG_WTHRESH_MASK,
+                     TXGBE_TXCFG_HTHRESH(txq->hthresh) |
+                     TXGBE_TXCFG_WTHRESH(txq->wthresh));
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+               poll_ms = 10;
+               /* Wait until TX Enable ready */
+               do {
+                       rte_delay_ms(1);
+                       txdctl = rd32(hw, TXGBE_TXCFG(i));
+               } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ENA, TXGBE_RXCFG_ENA);
+
+               /* Wait until RX Enable ready */
+               poll_ms = 10;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = rd32(hw, TXGBE_RXCFG(i));
+               } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+               rte_wmb();
+               wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1);
+       }
+}
+
 int
 txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
                    const struct rte_flow_action_rss *in)