net/cnxk: support device infos query
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
index 9b86bcd..dd61258 100644 (file)
@@ -27,6 +27,7 @@
 #include <rte_tailq.h>
 #include <rte_hash_crc.h>
 #include <rte_bitmap.h>
+#include <rte_os_shim.h>
 
 #include "i40e_logs.h"
 #include "base/i40e_prototype.h"
 #define I40E_TRANSLATE_INSET 0
 #define I40E_TRANSLATE_REG   1
 
-#define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
-#define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
-#define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
-#define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
-#define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
-#define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
+#define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
+#define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
+#define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
+#define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
+#define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
+#define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
 
 /* PCI offset for querying capability */
 #define PCI_DEV_CAP_REG            0xA4
 /* Bit mask of Extended Tag enable/disable */
 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
 
+#define I40E_GLQF_PIT_IPV4_START       2
+#define I40E_GLQF_PIT_IPV4_COUNT       2
+#define I40E_GLQF_PIT_IPV6_START       4
+#define I40E_GLQF_PIT_IPV6_COUNT       2
+
+#define I40E_GLQF_PIT_SOURCE_OFF_GET(a)        \
+                               (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
+                                I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_DEST_OFF_GET(a) \
+                               (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
+                                I40E_GLQF_PIT_DEST_OFF_SHIFT)
+
+#define I40E_GLQF_PIT_FSIZE_GET(a)     (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
+                                        I40E_GLQF_PIT_FSIZE_SHIFT)
+
+#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask))
+#define I40E_FDIR_FIELD_OFFSET(a)      ((a) >> 1)
+
 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
 static int i40e_dev_configure(struct rte_eth_dev *dev);
@@ -319,10 +339,8 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                                        struct rte_eth_udp_tunnel *udp_tunnel);
 static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
-                               enum rte_filter_type filter_type,
-                               enum rte_filter_op filter_op,
-                               void *arg);
+static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+                                const struct rte_flow_ops **ops);
 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
                                  struct rte_eth_dcb_info *dcb_info);
 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
@@ -484,7 +502,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
        .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
        .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
-       .filter_ctrl                  = i40e_dev_filter_ctrl,
+       .flow_ops_get                 = i40e_dev_flow_ops_get,
        .rxq_info_get                 = i40e_rxq_info_get,
        .txq_info_get                 = i40e_txq_info_get,
        .rx_burst_mode_get            = i40e_rx_burst_mode_get,
@@ -836,6 +854,8 @@ floating_veb_list_handler(__rte_unused const char *key,
                idx = strtoul(floating_veb_value, &end, 10);
                if (errno || end == NULL)
                        return -1;
+               if (idx < 0)
+                       return -1;
                while (isblank(*end))
                        end++;
                if (*end == '-') {
@@ -2289,7 +2309,8 @@ i40e_phy_conf_link(struct i40e_hw *hw,
        phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
        phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
                I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
-               I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
+               I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
+               I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
        phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
        phy_conf.eee_capability = phy_ab.eee_capability;
        phy_conf.eeer = phy_ab.eeer_val;
@@ -3668,9 +3689,11 @@ i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
                 ((hw->nvm.version >> 4) & 0xff),
                 (hw->nvm.version & 0xf), hw->nvm.eetrack,
                 ver, build, patch);
+       if (ret < 0)
+               return -EINVAL;
 
        ret += 1; /* add the size of '\0' */
-       if (fw_size < (u32)ret)
+       if (fw_size < (size_t)ret)
                return ret;
        else
                return 0;
@@ -9424,49 +9447,116 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
        return val;
 }
 
+static int
+i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
+                           uint32_t pit_reg_count, uint32_t hdr_off)
+{
+       const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
+       uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
+       uint32_t i, reg_val, src_off, count;
+
+       for (i = pit_reg_start; i < pit_reg_end; i++) {
+               reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
+
+               src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
+               count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
+
+               if (src_off <= field_off && (src_off + count) > field_off)
+                       break;
+       }
+
+       if (i >= pit_reg_end) {
+               PMD_DRV_LOG(ERR,
+                           "Hardware GLQF_PIT configuration does not support this field mask");
+               return -1;
+       }
+
+       return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
+}
+
 int
-i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
+i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
+                            uint32_t *mask, uint8_t nb_elem)
 {
-       uint8_t i, idx = 0;
-       uint64_t inset_need_mask = inset;
+       static const uint64_t mask_inset[] = {
+               I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
+               I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
 
        static const struct {
                uint64_t inset;
                uint32_t mask;
-       } inset_mask_map[] = {
-               {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
-               {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
-               {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
-               {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
-               {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
-               {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
-               {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
-               {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
+               uint32_t offset;
+       } inset_mask_offset_map[] = {
+               { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
+                 offsetof(struct rte_ipv4_hdr, type_of_service) },
+
+               { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
+                 offsetof(struct rte_ipv4_hdr, next_proto_id) },
+
+               { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
+                 offsetof(struct rte_ipv4_hdr, time_to_live) },
+
+               { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
+                 offsetof(struct rte_ipv6_hdr, vtc_flow) },
+
+               { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
+                 offsetof(struct rte_ipv6_hdr, proto) },
+
+               { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
+                 offsetof(struct rte_ipv6_hdr, hop_limits) },
        };
 
-       if (!inset || !mask || !nb_elem)
+       uint32_t i;
+       int idx = 0;
+
+       assert(mask);
+       if (!inset)
                return 0;
 
-       for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
+       for (i = 0; i < RTE_DIM(mask_inset); i++) {
                /* Clear the inset bit, if no MASK is required,
                 * for example proto + ttl
                 */
-               if ((inset & inset_mask_map[i].inset) ==
-                    inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
-                       inset_need_mask &= ~inset_mask_map[i].inset;
-               if (!inset_need_mask)
-                       return 0;
+               if ((mask_inset[i] & inset) == mask_inset[i]) {
+                       inset &= ~mask_inset[i];
+                       if (!inset)
+                               return 0;
+               }
        }
-       for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
-               if ((inset_need_mask & inset_mask_map[i].inset) ==
-                   inset_mask_map[i].inset) {
-                       if (idx >= nb_elem) {
-                               PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
-                               return -EINVAL;
-                       }
-                       mask[idx] = inset_mask_map[i].mask;
-                       idx++;
+
+       for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
+               uint32_t pit_start, pit_count;
+               int offset;
+
+               if (!(inset_mask_offset_map[i].inset & inset))
+                       continue;
+
+               if (inset_mask_offset_map[i].inset &
+                   (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
+                    I40E_INSET_IPV4_TTL)) {
+                       pit_start = I40E_GLQF_PIT_IPV4_START;
+                       pit_count = I40E_GLQF_PIT_IPV4_COUNT;
+               } else {
+                       pit_start = I40E_GLQF_PIT_IPV6_START;
+                       pit_count = I40E_GLQF_PIT_IPV6_COUNT;
                }
+
+               offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
+                               inset_mask_offset_map[i].offset);
+
+               if (offset < 0)
+                       return -EINVAL;
+
+               if (idx >= nb_elem) {
+                       PMD_DRV_LOG(ERR,
+                                   "Configuration of inset mask out of range %u",
+                                   nb_elem);
+                       return -ERANGE;
+               }
+
+               mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
+                                               inset_mask_offset_map[i].mask);
+               idx++;
        }
 
        return idx;
@@ -9520,7 +9610,7 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
 
                input_set = i40e_get_default_input_set(pctype);
 
-               num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+               num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
                                                   I40E_INSET_MASK_NUM_REG);
                if (num < 0)
                        return;
@@ -9600,7 +9690,7 @@ i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
                inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
                input_set |= pf->hash_input_set[pctype];
        }
-       num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+       num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
                                           I40E_INSET_MASK_NUM_REG);
        if (num < 0)
                return -EINVAL;
@@ -9792,30 +9882,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
 }
 
 static int
-i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
+i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
+                     const struct rte_flow_ops **ops)
 {
-       int ret = 0;
-
        if (dev == NULL)
                return -EINVAL;
 
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               *(const void **)arg = &i40e_flow_ops;
-               break;
-       default:
-               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
-                                                       filter_type);
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
+       *ops = &i40e_flow_ops;
+       return 0;
 }
 
 /*
@@ -11591,9 +11665,6 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
        uint32_t value = 0;
        uint32_t i;
 
-       if (!info || !info->length || !info->data)
-               return -EINVAL;
-
        if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
                is_sfp = true;
 
@@ -12378,16 +12449,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
        return ret;
 }
 
-RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
-RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
-#ifdef RTE_LIBRTE_I40E_DEBUG_RX
-RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
-#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX
-RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
+#ifdef RTE_ETHDEV_DEBUG_RX
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
 #endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
-RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
+#ifdef RTE_ETHDEV_DEBUG_TX
+RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
 #endif
 
 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,