net/ixgbe: allocate TC bandwidth
authorBernard Iremonger <bernard.iremonger@intel.com>
Sat, 1 Apr 2017 01:18:18 +0000 (09:18 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 4 Apr 2017 17:03:03 +0000 (19:03 +0200)
Ixgbe supports to set the relative bandwidth for the TCs.
It's a global setting for the PF and all the VFs of a
physical port.
This feature provide the API to set the bandwidth.

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/ixgbe/rte_pmd_ixgbe.h
drivers/net/ixgbe/rte_pmd_ixgbe_version.map

index 0c4d308..0ad59c2 100644 (file)
@@ -1135,6 +1135,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
        struct ixgbe_filter_info *filter_info =
                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+       struct ixgbe_bw_conf *bw_conf =
+               IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
        uint32_t ctrl_ext;
        uint16_t csum;
        int diag, i;
@@ -1346,6 +1348,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        TAILQ_INIT(&filter_l2_tunnel_list);
        TAILQ_INIT(&ixgbe_flow_list);
 
+       /* initialize bandwidth configuration info */
+       memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
+
        return 0;
 }
 
@@ -8700,6 +8705,79 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
        return 0;
 }
 
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+                             uint8_t tc_num,
+                             uint8_t *bw_weight)
+{
+       struct rte_eth_dev *dev;
+       struct ixgbe_dcb_config *dcb_config;
+       struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_conf *eth_conf;
+       struct ixgbe_bw_conf *bw_conf;
+       uint8_t i;
+       uint8_t nb_tcs;
+       uint16_t sum;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_device_supported(dev, &rte_ixgbe_pmd))
+               return -ENOTSUP;
+
+       if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+               PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+                           IXGBE_DCB_MAX_TRAFFIC_CLASS);
+               return -EINVAL;
+       }
+
+       dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+       bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+       eth_conf = &dev->data->dev_conf;
+
+       if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+               nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+       } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+                   ETH_32_POOLS)
+                       nb_tcs = ETH_4_TCS;
+               else
+                       nb_tcs = ETH_8_TCS;
+       } else {
+               nb_tcs = 1;
+       }
+
+       if (nb_tcs != tc_num) {
+               PMD_DRV_LOG(ERR,
+                           "Weight should be set for all %d enabled TCs.",
+                           nb_tcs);
+               return -EINVAL;
+       }
+
+       sum = 0;
+       for (i = 0; i < nb_tcs; i++)
+               sum += bw_weight[i];
+       if (sum != 100) {
+               PMD_DRV_LOG(ERR,
+                           "The summary of the TC weight should be 100.");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+       }
+       for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+       }
+
+       bw_conf->tc_num = nb_tcs;
+
+       return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
index f4ff6ad..a32ba4d 100644 (file)
@@ -428,6 +428,11 @@ struct ixgbe_macsec_stats {
        uint64_t in_pkts_notusingsa;
 };
 
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+       uint8_t tc_num; /* Number of TCs. */
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -449,6 +454,7 @@ struct ixgbe_adapter {
 #endif /* RTE_NIC_BYPASS */
        struct ixgbe_filter_info    filter;
        struct ixgbe_l2_tn_info     l2_tn;
+       struct ixgbe_bw_conf        bw_conf;
 
        bool rx_bulk_alloc_allowed;
        bool rx_vec_allowed;
@@ -502,6 +508,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
        (&((struct ixgbe_adapter *)adapter)->l2_tn)
 
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+       (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
 /*
  * RX/TX function prototypes
  */
index 4398719..eb7960b 100644 (file)
@@ -3762,6 +3762,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
        struct ixgbe_hw *hw =
                        IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_bw_conf *bw_conf =
+               IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
 
        switch (dev->data->dev_conf.rxmode.mq_mode) {
        case ETH_MQ_RX_VMDQ_DCB:
@@ -3833,8 +3835,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /* Re-configure 4 TCs BW */
                for (i = 0; i < nb_tcs; i++) {
                        tc = &dcb_config->tc_config[i];
-                       tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
-                                               (uint8_t)(100 / nb_tcs);
+                       if (bw_conf->tc_num != nb_tcs)
+                               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                       (uint8_t)(100 / nb_tcs);
                        tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
                                                (uint8_t)(100 / nb_tcs);
                }
@@ -3847,8 +3850,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                /* Re-configure 8 TCs BW */
                for (i = 0; i < nb_tcs; i++) {
                        tc = &dcb_config->tc_config[i];
-                       tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
-                               (uint8_t)(100 / nb_tcs + (i & 1));
+                       if (bw_conf->tc_num != nb_tcs)
+                               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                       (uint8_t)(100 / nb_tcs + (i & 1));
                        tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
                                (uint8_t)(100 / nb_tcs + (i & 1));
                }
index cdb747e..1f2b1bd 100644 (file)
@@ -404,6 +404,29 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask,
  */
 int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
 
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ *    The port identifier of the Ethernet device.
+ * @param tc_num
+ *    Number of TCs.
+ * @param bw_weight
+ *    An array of relative bandwidth weight for all the TCs.
+ *    The summary of the bw_weight should be 100.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ *   - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+                                 uint8_t tc_num,
+                                 uint8_t *bw_weight);
+
 /**
  * Response sent back to ixgbe driver from user app after callback
  */
index 2c7512d..45a57e3 100644 (file)
@@ -36,4 +36,5 @@ DPDK_17.05 {
        global:
 
        rte_pmd_ixgbe_ping_vf;
+       rte_pmd_ixgbe_set_tc_bw_alloc;
 } DPDK_17.02;