vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
}
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- vmdq_rx_conf.dcb_queue[i] = i;
- vmdq_tx_conf.dcb_queue[i] = i;
+ vmdq_rx_conf.dcb_tc[i] = i;
+ vmdq_tx_conf.dcb_tc[i] = i;
}
/*set DCB mode of RX and TX of multiple queues*/
tx_conf.nb_tcs = dcb_conf->num_tcs;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
- rx_conf.dcb_queue[i] = i;
- tx_conf.dcb_queue[i] = i;
+ rx_conf.dcb_tc[i] = i;
+ tx_conf.dcb_tc[i] = i;
}
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
* The deprecated flow director API is removed.
It was replaced by rte_eth_dev_filter_ctrl().
+* The dcb_queue is renamed to dcb_tc in following dcb configuration
+ structures: rte_eth_dcb_rx_conf, rte_eth_dcb_tx_conf,
+ rte_eth_vmdq_dcb_conf, rte_eth_vmdq_dcb_tx_conf.
+
* The function rte_eal_pci_close_one() is removed.
It was replaced by rte_eal_pci_detach().
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
*/
- queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
}
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_rx_conf->dcb_queue[i];
+ j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_tx_conf->dcb_queue[i];
+ j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = rx_conf->dcb_queue[i];
+ j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = tx_conf->dcb_queue[i];
+ j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
.default_pool = 0,
.nb_pool_maps = 0,
.pool_map = {{0, 0},},
- .dcb_queue = {0},
+ .dcb_tc = {0},
},
},
};
conf.pool_map[i].pools = 1 << (i % num_pools);
}
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
- conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
+ conf.dcb_tc[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
}
(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
(void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
/* This structure may be extended in future. */
struct rte_eth_dcb_rx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_dcb_tx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_dcb_tx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
- /**< Possible DCB queue,4 or 8. */
+ /** Traffic class each UP mapped to. */
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_tx_conf {
uint16_t vlan_id; /**< The vlan id of the received frame */
uint64_t pools; /**< Bitmask of pools for packet rx */
} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
- uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
/**< Selects a queue in a pool */
};