net/mlx5: create clock queue for packet pacing
[dpdk.git] / drivers / net / ixgbe / ixgbe_fdir.c
index 0af1719..6faaa8f 100644 (file)
@@ -12,8 +12,8 @@
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_vxlan.h>
+#include <rte_ethdev_driver.h>
 #include <rte_malloc.h>
 
 #include "ixgbe_logs.h"
 #define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
 
 #define IXGBE_FDIR_FLOW_TYPES ( \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
-       (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+       (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
 
 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
        uint8_t ipv6_addr[16]; \
@@ -77,7 +77,6 @@
        rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
 } while (0)
 
-#define DEFAULT_VXLAN_PORT 4789
 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
 
 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
@@ -110,10 +109,6 @@ static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                              bool del,
                              bool update);
 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
-static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
-                       struct rte_eth_fdir_info *fdir_info);
-static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
-                       struct rte_eth_fdir_stats *fdir_stats);
 
 /**
  * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
@@ -366,7 +361,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 
        /* set the default UDP port for VxLAN */
        if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
-               IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+               IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT);
 
        /* some bits must be set for mac vlan or tunnel mode */
        fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
@@ -394,9 +389,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
                                IXGBE_FDIRIP6M_TNI_VNI;
 
        if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
-               mac_mask = info->mask.mac_addr_byte_mask;
-               fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
-                               & IXGBE_FDIRIP6M_INNER_MAC;
+               fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+               mac_mask = info->mask.mac_addr_byte_mask &
+                       (IXGBE_FDIRIP6M_INNER_MAC >>
+                       IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+               fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+                               IXGBE_FDIRIP6M_INNER_MAC);
 
                switch (info->mask.tunnel_type_mask) {
                case 0:
@@ -771,10 +769,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
                        input->formatted.inner_mac,
                        fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
                        sizeof(input->formatted.inner_mac));
-               input->formatted.tunnel_type =
-                       fdir_filter->input.flow.tunnel_flow.tunnel_type;
+               if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+                               RTE_FDIR_TUNNEL_TYPE_VXLAN)
+                       input->formatted.tunnel_type =
+                                       IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+               else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+                               RTE_FDIR_TUNNEL_TYPE_NVGRE)
+                       input->formatted.tunnel_type =
+                                       IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+               else
+                       PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
                input->formatted.tni_vni =
-                       fdir_filter->input.flow.tunnel_flow.tunnel_id;
+                       fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
        }
 
        return 0;
@@ -1001,8 +1008,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
                        IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
                } else {
                        /* tunnel mode */
-                       if (input->formatted.tunnel_type !=
-                               RTE_FDIR_TUNNEL_TYPE_NVGRE)
+                       if (input->formatted.tunnel_type)
                                tunnel_type = 0x80000000;
                        tunnel_type |= addr_high;
                        IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -1010,6 +1016,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
                        IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
                                        input->formatted.tni_vni);
                }
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
        }
 
        /* record vlan (little-endian) and flex_bytes(big-endian) */
@@ -1248,7 +1257,8 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
             IXGBE_ATR_FLOW_TYPE_IPV6) &&
            (info->mask.src_port_mask != 0 ||
             info->mask.dst_port_mask != 0) &&
-            rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+           (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+            rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
                PMD_DRV_LOG(ERR, "By this device,"
                            " IPv4 is not supported without"
                            " L4 protocol and ports masked!");
@@ -1400,13 +1410,13 @@ ixgbe_fdir_flush(struct rte_eth_dev *dev)
 }
 
 #define FDIRENTRIES_NUM_SHIFT 10
-static void
+void
 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_hw_fdir_info *info =
                        IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-       uint32_t fdirctrl, max_num;
+       uint32_t fdirctrl, max_num, i;
        uint8_t offset;
 
        fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
@@ -1438,9 +1448,11 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
 
        if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
            fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
-               fdir_info->flow_types_mask[0] = 0;
+               fdir_info->flow_types_mask[0] = 0ULL;
        else
                fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+       for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+               fdir_info->flow_types_mask[i] = 0ULL;
 
        fdir_info->flex_payload_unit = sizeof(uint16_t);
        fdir_info->max_flex_payload_segment_num = 1;
@@ -1457,7 +1469,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
                        (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
 }
 
-static void
+void
 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);