struct rte_eth_dev_info *dev_info);
static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
int ret;
struct rte_eth_dev_info dev_info;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
PMD_INIT_FUNC_TRACE();
rx_offloads, dev_info.rx_offload_capa);
return -ENOTSUP;
}
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
.txq_flags = FM10K_SIMPLE_TX_FLAG,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
return 0;
}
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
+static int
+fm10k_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t queue_supported = fm10k_get_tx_queue_offloads_capa(dev);
+ uint64_t port_supported = fm10k_get_tx_port_offloads_capa(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
PMD_INIT_FUNC_TRACE();
+ if (!fm10k_check_tx_queue_offloads(dev, conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ fm10k_get_tx_port_offloads_capa(dev),
+ fm10k_get_tx_queue_offloads_capa(dev));
+ return -ENOTSUP;
+ }
+
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->txq_flags = conf->txq_flags;
+ q->offloads = conf->offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];