int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct rte_eth_dev_info dev_info;
uint64_t rx_offloads;
+ uint64_t tx_offloads;
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
rx_offloads, dev_info.rx_offload_capa);
return -ENOTSUP;
}
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
PMD_INIT_FUNC_TRACE();
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = E1000_MAX_RING_DESC,
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
}
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ RTE_SET_USED(dev);
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_queue_offload_capa;
+
+ /*
+ * As only one Tx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+ return tx_queue_offload_capa;
+}
+
+static int
+em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
+ uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64,
+ (void *)dev,
+ tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ em_get_tx_port_offloads_capa(dev),
+ em_get_tx_queue_offloads_capa(dev));
+ return -ENOTSUP;
+ }
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = tx_conf->offloads;
return 0;
}
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
}
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
switch (hw->mac.type) {
case e1000_82575:
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
.txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
.txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
/**< Start context position for transmit queue. */
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
igb_reset_tx_queue_stat(txq);
}
+uint64_t
+igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
+static int
+igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
+ uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
int
eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
struct e1000_hw *hw;
uint32_t size;
+ if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64,
+ (void *)dev,
+ tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ igb_get_tx_port_offloads_capa(dev),
+ igb_get_tx_queue_offloads_capa(dev));
+ return -ENOTSUP;
+ }
+
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
dev->tx_pkt_burst = eth_igb_xmit_pkts;
dev->tx_pkt_prepare = ð_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = tx_conf->offloads;
return 0;
}
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.offloads = txq->offloads;
}
int