app/testpmd: print extended flow director info
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
index 279ef52..dacf2db 100644 (file)
 /* Maximun number of VSI */
 #define I40E_MAX_NUM_VSIS          (384UL)
 
-/* Bit shift and mask */
-#define I40E_16_BIT_SHIFT 16
-#define I40E_16_BIT_MASK  0xFFFF
-#define I40E_32_BIT_SHIFT 32
-#define I40E_32_BIT_MASK  0xFFFFFFFF
-#define I40E_48_BIT_SHIFT 48
-#define I40E_48_BIT_MASK  0xFFFFFFFFFFFFULL
-
 /* Default queue interrupt throttling time in microseconds*/
 #define I40E_ITR_INDEX_DEFAULT          0
 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
@@ -144,9 +136,11 @@ static void i40e_macaddr_add(struct rte_eth_dev *dev,
                          uint32_t pool);
 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
-                                   struct rte_eth_rss_reta *reta_conf);
+                                   struct rte_eth_rss_reta_entry64 *reta_conf,
+                                   uint16_t reta_size);
 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
-                                  struct rte_eth_rss_reta *reta_conf);
+                                  struct rte_eth_rss_reta_entry64 *reta_conf,
+                                  uint16_t reta_size);
 
 static int i40e_get_cap(struct i40e_hw *hw);
 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
@@ -322,6 +316,35 @@ static struct rte_driver rte_i40e_driver = {
 
 PMD_REGISTER_DRIVER(rte_i40e_driver);
 
+/*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
+#endif
+
+static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+{
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
+       I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
+
+       /* GLQF_PIT Registers */
+       I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
+       I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+}
+
 static int
 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
                   struct rte_eth_dev *dev)
@@ -385,6 +408,13 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
                return ret;
        }
 
+       /*
+        * To work around the NVM issue,initialize registers
+        * for flexible payload by software.
+        * It should be removed once issues are fixed in NVM.
+        */
+       i40e_flex_payload_reg_init(hw);
+
        /* Initialize the parameters for adminq */
        i40e_init_adminq_parameter(hw);
        ret = i40e_init_adminq(hw);
@@ -510,6 +540,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 err_mac_alloc:
        i40e_vsi_release(pf->main_vsi);
 err_setup_pf_switch:
+       i40e_fdir_teardown(pf);
 err_get_mac_addr:
 err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
@@ -800,6 +831,18 @@ i40e_dev_start(struct rte_eth_dev *dev)
                i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
        }
 
+       ret = i40e_fdir_configure(dev);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "failed to configure fdir.");
+               goto err_up;
+       }
+
+       /* enable FDIR MSIX interrupt */
+       if (pf->flags & I40E_FLAG_FDIR) {
+               i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+       }
+
        /* Enable all queues which have been configured */
        ret = i40e_dev_switch_queues(pf, TRUE);
        if (ret != I40E_SUCCESS) {
@@ -854,11 +897,16 @@ i40e_dev_stop(struct rte_eth_dev *dev)
                i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
        }
 
+       if (pf->flags & I40E_FLAG_FDIR) {
+               i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+               i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+       }
        /* Clear all queues and release memory */
        i40e_dev_clear_queues(dev);
 
        /* Set link down */
        i40e_dev_set_link_down(dev);
+
 }
 
 static void
@@ -880,6 +928,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
        i40e_shutdown_lan_hmc(hw);
 
        /* release all the existing VSIs and VEBs */
+       i40e_fdir_teardown(pf);
        i40e_vsi_release(pf->main_vsi);
 
        /* shutdown the adminq */
@@ -1294,6 +1343,9 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                            I40E_GLPRT_PTC9522L(hw->port),
                            pf->offset_loaded, &os->tx_size_big,
                            &ns->tx_size_big);
+       i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
+                          pf->offset_loaded,
+                          &os->fd_sb_match, &ns->fd_sb_match);
        /* GLPRT_MSPDC not supported */
        /* GLPRT_XEC not supported */
 
@@ -1310,6 +1362,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->obytes   = ns->eth.tx_bytes;
        stats->oerrors  = ns->eth.tx_errors;
        stats->imcasts  = ns->eth.rx_multicast;
+       stats->fdirmatch = ns->fd_sb_match;
 
        /* Rx Errors */
        stats->ibadcrc  = ns->crc_errors;
@@ -1385,6 +1438,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                        ns->mac_short_packet_dropped);
        PMD_DRV_LOG(DEBUG, "checksum_error:           %lu",
                    ns->checksum_error);
+       PMD_DRV_LOG(DEBUG, "fdir_match:               %lu", ns->fd_sb_match);
        PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
 }
 
@@ -1777,32 +1831,41 @@ i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
 
 static int
 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
-                        struct rte_eth_rss_reta *reta_conf)
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t lut, l;
-       uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
-
-       for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < max)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                                               (i - max)) & 0xF);
+       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t idx, shift;
+       uint8_t mask;
+
+       if (reta_size != lut_size ||
+               reta_size > ETH_RSS_RETA_SIZE_512) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                                       "(%d)\n", reta_size, lut_size);
+               return -EINVAL;
+       }
 
+       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               I40E_4_BIT_MASK);
                if (!mask)
                        continue;
-
-               if (mask == 0xF)
+               if (mask == I40E_4_BIT_MASK)
                        l = 0;
                else
                        l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-
-               for (j = 0, lut = 0; j < 4; j++) {
+               for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
-                               lut |= reta_conf->reta[i + j] << (8 * j);
+                               lut |= reta_conf[idx].reta[shift + j] <<
+                                                       (CHAR_BIT * j);
                        else
-                               lut |= l & (0xFF << (8 * j));
+                               lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
                }
                I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
        }
@@ -1812,27 +1875,37 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
-                       struct rte_eth_rss_reta *reta_conf)
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size)
 {
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t lut;
-       uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
-
-       for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < max)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >>
-                                               (i - max)) & 0xF);
+       uint16_t i, j, lut_size = pf->hash_lut_size;
+       uint16_t idx, shift;
+       uint8_t mask;
+
+       if (reta_size != lut_size ||
+               reta_size > ETH_RSS_RETA_SIZE_512) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                                       "(%d)\n", reta_size, lut_size);
+               return -EINVAL;
+       }
 
+       for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               I40E_4_BIT_MASK);
                if (!mask)
                        continue;
 
                lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
-               for (j = 0; j < 4; j++) {
+               for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
-                               reta_conf->reta[i + j] =
-                                       (uint8_t)((lut >> (8 * j)) & 0xFF);
+                               reta_conf[idx].reta[shift] = ((lut >>
+                                       (CHAR_BIT * j)) & I40E_8_BIT_MASK);
                }
        }
 
@@ -2809,16 +2882,30 @@ i40e_vsi_setup(struct i40e_pf *pf,
        case I40E_VSI_VMDQ2:
                vsi->nb_qps = pf->vmdq_nb_qps;
                break;
+       case I40E_VSI_FDIR:
+               vsi->nb_qps = pf->fdir_nb_qps;
+               break;
        default:
                goto fail_mem;
        }
-       ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
-       if (ret < 0) {
-               PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
-                               vsi->seid, ret);
-               goto fail_mem;
-       }
-       vsi->base_queue = ret;
+       /*
+        * The filter status descriptor is reported in rx queue 0,
+        * while the tx queue for fdir filter programming has no
+        * such constraints, can be non-zero queues.
+        * To simplify it, choose FDIR vsi use queue 0 pair.
+        * To make sure it will use queue 0 pair, queue allocation
+        * need be done before this function is called
+        */
+       if (type != I40E_VSI_FDIR) {
+               ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+                       if (ret < 0) {
+                               PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+                                               vsi->seid, ret);
+                               goto fail_mem;
+                       }
+                       vsi->base_queue = ret;
+       } else
+               vsi->base_queue = I40E_FDIR_QUEUE_ID;
 
        /* VF has MSIX interrupt in VF range, don't allocate here */
        if (type != I40E_VSI_SRIOV) {
@@ -2987,6 +3074,23 @@ i40e_vsi_setup(struct i40e_pf *pf,
                ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
                ctxt.info.valid_sections |=
                        rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+       } else if (type == I40E_VSI_FDIR) {
+               vsi->uplink_seid = uplink_vsi->uplink_seid;
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = 0;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = 0x1;     /* regular data port */
+               ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+               ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+                                               I40E_DEFAULT_TCMAP);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to configure "
+                                       "TC queue mapping.");
+                       goto fail_msix_alloc;
+               }
+               ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+               ctxt.info.valid_sections |=
+                       rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
        } else {
                PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
                goto fail_msix_alloc;
@@ -3175,8 +3279,16 @@ i40e_pf_setup(struct i40e_pf *pf)
                PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
                return ret;
        }
-
-       /* VSI setup */
+       if (pf->flags & I40E_FLAG_FDIR) {
+               /* make queue allocated first, let FDIR use queue pair 0*/
+               ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+               if (ret != I40E_FDIR_QUEUE_ID) {
+                       PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+                                   " ret =%d", ret);
+                       pf->flags &= ~I40E_FLAG_FDIR;
+               }
+       }
+       /*  main VSI setup */
        vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
        if (!vsi) {
                PMD_DRV_LOG(ERR, "Setup of main vsi failed");
@@ -3184,6 +3296,15 @@ i40e_pf_setup(struct i40e_pf *pf)
        }
        pf->main_vsi = vsi;
 
+       /* setup FDIR after main vsi created.*/
+       if (pf->flags & I40E_FLAG_FDIR) {
+               ret = i40e_fdir_setup(pf);
+               if (ret != I40E_SUCCESS) {
+                       PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+                       pf->flags &= ~I40E_FLAG_FDIR;
+               }
+       }
+
        /* Configure filter control */
        memset(&settings, 0, sizeof(settings));
        if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
@@ -3584,7 +3705,7 @@ i40e_stat_update_32(struct i40e_hw *hw,
                *stat = (uint64_t)(new_data - *offset);
        else
                *stat = (uint64_t)((new_data +
-                       ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
+                       ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
 }
 
 static void
@@ -3599,7 +3720,7 @@ i40e_stat_update_48(struct i40e_hw *hw,
 
        new_data = (uint64_t)I40E_READ_REG(hw, loreg);
        new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
-                       I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
+                       I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
 
        if (!offset_loaded)
                *offset = new_data;
@@ -3608,7 +3729,7 @@ i40e_stat_update_48(struct i40e_hw *hw,
                *stat = new_data - *offset;
        else
                *stat = (uint64_t)((new_data +
-                       ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
+                       ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
 
        *stat &= I40E_48_BIT_MASK;
 }
@@ -5011,6 +5132,9 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
        case RTE_ETH_FILTER_TUNNEL:
                ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
                break;
+       case RTE_ETH_FILTER_FDIR:
+               ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                                                        filter_type);
@@ -5020,3 +5144,49 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
 
        return ret;
 }
+
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+{
+       static const enum i40e_filter_pctype pctype_table[] = {
+               [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+               [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+               [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+               [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+               [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
+                                       I40E_FILTER_PCTYPE_FRAG_IPV4,
+               [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+               [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+               [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+               [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
+                                       I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+               [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
+                                       I40E_FILTER_PCTYPE_FRAG_IPV6,
+       };
+
+       return pctype_table[flow_type];
+}
+
+enum rte_eth_flow_type
+i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+{
+       static const enum rte_eth_flow_type flowtype_table[] = {
+               [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+                                       RTE_ETH_FLOW_TYPE_IPV4_OTHER,
+               [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+                                       RTE_ETH_FLOW_TYPE_FRAG_IPV4,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+               [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+                                       RTE_ETH_FLOW_TYPE_IPV6_OTHER,
+               [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+                                       RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+       };
+
+       return flowtype_table[pctype];
+}