struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
uint8_t i, j;
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
else
dcb_info->nb_tcs = 1;
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
- for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
- for (j = 0; j < dcb_info->nb_tcs; j++) {
- dcb_info->tc_queue.tc_rxq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
- dcb_info->tc_queue.tc_txq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
}
}
} else { /* vt is disabled*/