This patch adds one new API to get dcb related info.
rte_eth_dev_get_dcb_info
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jijiang Liu <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
+static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct i40e_hw *hw);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
.timesync_disable = i40e_timesync_disable,
.timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
.timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
+ .get_dcb_info = i40e_dev_get_dcb_info,
};
static struct eth_driver rte_i40e_pmd = {
}
return 0;
}
+
+static int
+i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
+ uint16_t bsf, tc_mapping;
+ int i;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
+ else
+ dcb_info->nb_tcs = 1;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
+ for (i = 0; i < dcb_info->nb_tcs; i++)
+ dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+ /* only main vsi support multi TCs */
+ dcb_info->tc_queue.tc_rxq[0][i].base =
+ (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+ dcb_info->tc_queue.tc_txq[0][i].base =
+ dcb_info->tc_queue.tc_rxq[0][i].base;
+ bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf;
+ dcb_info->tc_queue.tc_txq[0][i].nb_queue =
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
+ }
+ }
+ return 0;
+}
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
static int ixgbe_get_regs(struct rte_eth_dev *dev,
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_dcb_info = ixgbe_dev_get_dcb_info,
};
/*
}
}
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < dcb_info->nb_tcs; j++) {
+ dcb_info->tc_queue.tc_rxq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+ dcb_info->tc_queue.tc_txq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
return (*dev->dev_ops->set_eeprom)(dev, info);
}
+
+int
+rte_eth_dev_get_dcb_info(uint8_t port_id,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct rte_eth_dev *dev;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
+ return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
+}
uint64_t value;
};
+#define ETH_DCB_NUM_TCS 8
+#define ETH_MAX_VMDQ_POOL 64
+
+/**
+ * A structure used to get the information of queue and
+ * TC mapping on both TX and RX paths.
+ */
+struct rte_eth_dcb_tc_queue_mapping {
+ /** rx queues assigned to tc per Pool */
+ struct {
+ uint8_t base;
+ uint8_t nb_queue;
+ } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+ /** rx queues assigned to tc per Pool */
+ struct {
+ uint8_t base;
+ uint8_t nb_queue;
+ } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
+};
+
+/**
+ * A structure used to get the information of DCB.
+ * It includes TC UP mapping and queue TC mapping.
+ */
+struct rte_eth_dcb_info {
+ uint8_t nb_tcs; /**< number of TCs */
+ uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
+ uint8_t tc_bws[ETH_DCB_NUM_TCS]; /**< TX BW percentage for each TC */
+ /** rx queues assigned to tc */
+ struct rte_eth_dcb_tc_queue_mapping tc_queue;
+};
+
struct rte_eth_dev;
struct rte_eth_dev_callback;
void *arg);
/**< @internal Take operations to assigned filter type on an Ethernet device */
+typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+/**< @internal Get dcb information on an Ethernet device */
+
/**
* @internal A structure containing the functions exported by an Ethernet driver.
*/
eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
/** Read the IEEE1588/802.1AS TX timestamp. */
eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
+
+ /** Get DCB information */
+ eth_get_dcb_info get_dcb_info;
};
/**
int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
+/**
+ * Get DCB information on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param dcb_info
+ * dcb information.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support.
+ */
+int rte_eth_dev_get_dcb_info(uint8_t port_id,
+ struct rte_eth_dcb_info *dcb_info);
+
/**
* Add a callback to be called on packet RX on a given port and queue.
*
rte_eth_timesync_read_tx_timestamp;
} DPDK_2.0;
+
+DPDK_2.2 {
+ global:
+
+ rte_eth_dev_get_dcb_info;
+
+} DPDK_2.1;