net/qede: remove flags from Tx entry
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index acf7a0f..fcf150e 100644 (file)
@@ -17,8 +17,8 @@
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_memzone.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
@@ -45,7 +45,6 @@
 #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
 #define ETH_I40E_SUPPORT_MULTI_DRIVER  "support-multi-driver"
 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG  "queue-num-per-vf"
-#define ETH_I40E_USE_LATEST_VEC        "use-latest-supported-vec"
 #define ETH_I40E_VF_MSG_CFG            "vf_msg_cfg"
 
 #define I40E_CLEAR_PXE_WAIT_MS     200
 #define I40E_TRANSLATE_INSET 0
 #define I40E_TRANSLATE_REG   1
 
-#define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
-#define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
-#define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
-#define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
-#define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
-#define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
+#define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
+#define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
+#define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
+#define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
+#define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
 
 /* PCI offset for querying capability */
 #define PCI_DEV_CAP_REG            0xA4
 /* Bit mask of Extended Tag enable/disable */
 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
 
+#define I40E_GLQF_PIT_IPV4_START       2
+#define I40E_GLQF_PIT_IPV4_COUNT       2
+#define I40E_GLQF_PIT_IPV6_START       4
+#define I40E_GLQF_PIT_IPV6_COUNT       2
+
+#define I40E_GLQF_PIT_SOURCE_OFF_GET(a)        \
+                               (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
+                                I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_DEST_OFF_GET(a) \
+                               (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
+                                I40E_GLQF_PIT_DEST_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_FSIZE_GET(a)     (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
+                                        I40E_GLQF_PIT_FSIZE_SHIFT)
+
+#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask))
+#define I40E_FDIR_FIELD_OFFSET(a)      ((a) >> 1)
+
 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
 static int i40e_dev_configure(struct rte_eth_dev *dev);
@@ -320,10 +338,8 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                                        struct rte_eth_udp_tunnel *udp_tunnel);
 static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
-                               enum rte_filter_type filter_type,
-                               enum rte_filter_op filter_op,
-                               void *arg);
+static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+                                const struct rte_flow_ops **ops);
 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
                                  struct rte_eth_dcb_info *dcb_info);
 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
@@ -403,7 +419,6 @@ static const char *const valid_keys[] = {
        ETH_I40E_FLOATING_VEB_LIST_ARG,
        ETH_I40E_SUPPORT_MULTI_DRIVER,
        ETH_I40E_QUEUE_NUM_PER_VF_ARG,
-       ETH_I40E_USE_LATEST_VEC,
        ETH_I40E_VF_MSG_CFG,
        NULL};
 
@@ -486,7 +501,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
        .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
        .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
-       .filter_ctrl                  = i40e_dev_filter_ctrl,
+       .flow_ops_get                 = i40e_dev_flow_ops_get,
        .rxq_info_get                 = i40e_rxq_info_get,
        .txq_info_get                 = i40e_txq_info_get,
        .rx_burst_mode_get            = i40e_rx_burst_mode_get,
@@ -641,6 +656,13 @@ eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        return retval;
        }
 
+       if (eth_da.nb_representor_ports > 0 &&
+           eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+               PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
+                           pci_dev->device.devargs->args);
+               return -ENOTSUP;
+       }
+
        retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
                sizeof(struct i40e_adapter),
                eth_dev_pci_specific_init, pci_dev,
@@ -1317,62 +1339,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
        return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
 }
 
-static int
-i40e_parse_latest_vec_handler(__rte_unused const char *key,
-                               const char *value,
-                               void *opaque)
-{
-       struct i40e_adapter *ad = opaque;
-       int use_latest_vec;
-
-       use_latest_vec = atoi(value);
-
-       if (use_latest_vec != 0 && use_latest_vec != 1)
-               PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
-
-       ad->use_latest_vec = (uint8_t)use_latest_vec;
-
-       return 0;
-}
-
-static int
-i40e_use_latest_vec(struct rte_eth_dev *dev)
-{
-       struct i40e_adapter *ad =
-               I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct rte_kvargs *kvlist;
-       int kvargs_count;
-
-       ad->use_latest_vec = false;
-
-       if (!dev->device->devargs)
-               return 0;
-
-       kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
-       if (!kvlist)
-               return -EINVAL;
-
-       kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
-       if (!kvargs_count) {
-               rte_kvargs_free(kvlist);
-               return 0;
-       }
-
-       if (kvargs_count > 1)
-               PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
-                           "the first invalid or last valid one is used !",
-                           ETH_I40E_USE_LATEST_VEC);
-
-       if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
-                               i40e_parse_latest_vec_handler, ad) < 0) {
-               rte_kvargs_free(kvlist);
-               return -EINVAL;
-       }
-
-       rte_kvargs_free(kvlist);
-       return 0;
-}
-
 static int
 read_vf_msg_config(__rte_unused const char *key,
                               const char *value,
@@ -1523,8 +1489,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
        i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
        /* Check if need to support multi-driver */
        i40e_support_multi_driver(dev);
-       /* Check if users want the latest supported vec path */
-       i40e_use_latest_vec(dev);
 
        /* Make sure all is clean before doing PF reset */
        i40e_clear_hw(hw);
@@ -9477,49 +9441,116 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
        return val;
 }
 
+static int
+i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
+                           uint32_t pit_reg_count, uint32_t hdr_off)
+{
+       const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
+       uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
+       uint32_t i, reg_val, src_off, count;
+
+       for (i = pit_reg_start; i < pit_reg_end; i++) {
+               reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
+
+               src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
+               count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
+
+               if (src_off <= field_off && (src_off + count) > field_off)
+                       break;
+       }
+
+       if (i >= pit_reg_end) {
+               PMD_DRV_LOG(ERR,
+                           "Hardware GLQF_PIT configuration does not support this field mask");
+               return -1;
+       }
+
+       return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
+}
+
 int
-i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
+                            uint32_t *mask, uint8_t nb_elem)
 {
-       uint8_t i, idx = 0;
-       uint64_t inset_need_mask = inset;
+       static const uint64_t mask_inset[] = {
+               I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
+               I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
 
        static const struct {
                uint64_t inset;
                uint32_t mask;
-       } inset_mask_map[] = {
-               {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
-               {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
-               {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
-               {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
-               {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
-               {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
-               {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
-               {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
+               uint32_t offset;
+       } inset_mask_offset_map[] = {
+               { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
+                 offsetof(struct rte_ipv4_hdr, type_of_service) },
+
+               { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
+                 offsetof(struct rte_ipv4_hdr, next_proto_id) },
+
+               { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
+                 offsetof(struct rte_ipv4_hdr, time_to_live) },
+
+               { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
+                 offsetof(struct rte_ipv6_hdr, vtc_flow) },
+
+               { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
+                 offsetof(struct rte_ipv6_hdr, proto) },
+
+               { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
+                 offsetof(struct rte_ipv6_hdr, hop_limits) },
        };
 
-       if (!inset || !mask || !nb_elem)
+       uint32_t i;
+       int idx = 0;
+
+       assert(mask);
+       if (!inset)
                return 0;
 
-       for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+       for (i = 0; i < RTE_DIM(mask_inset); i++) {
                /* Clear the inset bit, if no MASK is required,
                 * for example proto + ttl
                 */
-               if ((inset & inset_mask_map[i].inset) ==
-                    inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
-                       inset_need_mask &= ~inset_mask_map[i].inset;
-               if (!inset_need_mask)
-                       return 0;
+               if ((mask_inset[i] & inset) == mask_inset[i]) {
+                       inset &= ~mask_inset[i];
+                       if (!inset)
+                               return 0;
+               }
        }
-       for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
-               if ((inset_need_mask & inset_mask_map[i].inset) ==
-                   inset_mask_map[i].inset) {
-                       if (idx >= nb_elem) {
-                               PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
-                               return -EINVAL;
-                       }
-                       mask[idx] = inset_mask_map[i].mask;
-                       idx++;
+
+       for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
+               uint32_t pit_start, pit_count;
+               int offset;
+
+               if (!(inset_mask_offset_map[i].inset & inset))
+                       continue;
+
+               if (inset_mask_offset_map[i].inset &
+                   (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+                    I40E_INSET_IPV4_TTL)) {
+                       pit_start = I40E_GLQF_PIT_IPV4_START;
+                       pit_count = I40E_GLQF_PIT_IPV4_COUNT;
+               } else {
+                       pit_start = I40E_GLQF_PIT_IPV6_START;
+                       pit_count = I40E_GLQF_PIT_IPV6_COUNT;
+               }
+
+               offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
+                               inset_mask_offset_map[i].offset);
+
+               if (offset < 0)
+                       return -EINVAL;
+
+               if (idx >= nb_elem) {
+                       PMD_DRV_LOG(ERR,
+                                   "Configuration of inset mask out of range %u",
+                                   nb_elem);
+                       return -ERANGE;
                }
+
+               mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
+                                               inset_mask_offset_map[i].mask);
+               idx++;
        }
 
        return idx;
@@ -9573,7 +9604,7 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
 
                input_set = i40e_get_default_input_set(pctype);
 
-               num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+               num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
                                                   I40E_INSET_MASK_NUM_REG);
                if (num < 0)
                        return;
@@ -9653,7 +9684,7 @@ i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
                inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
                input_set |= pf->hash_input_set[pctype];
        }
-       num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+       num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
                                           I40E_INSET_MASK_NUM_REG);
        if (num < 0)
                return -EINVAL;
@@ -9845,30 +9876,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
 }
 
 static int
-i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
+i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+                     const struct rte_flow_ops **ops)
 {
-       int ret = 0;
-
        if (dev == NULL)
                return -EINVAL;
 
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               *(const void **)arg = &i40e_flow_ops;
-               break;
-       default:
-               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
-                                                       filter_type);
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
+       *ops = &i40e_flow_ops;
+       return 0;
 }
 
 /*
@@ -11747,7 +11762,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > RTE_ETHER_MAX_LEN)
+       if (frame_size > I40E_ETH_MAX_LEN)
                dev_data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -12447,5 +12462,4 @@ RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
                              ETH_I40E_FLOATING_VEB_ARG "=1"
                              ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
                              ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
-                             ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
-                             ETH_I40E_USE_LATEST_VEC "=0|1");
+                             ETH_I40E_SUPPORT_MULTI_DRIVER "=1");