static void
qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
{
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ bool use_tx_offload = false;
if (ECORE_IS_CMT(edev)) {
dev->rx_pkt_burst = qede_recv_pkts_cmt;
dev->rx_pkt_burst = qede_recv_pkts_regular;
}
- dev->tx_pkt_burst = qede_xmit_pkts;
+ use_tx_offload = !!(tx_offloads &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+ DEV_TX_OFFLOAD_TCP_TSO | /* tso */
+ DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+ if (use_tx_offload) {
+ DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+ dev->tx_pkt_burst = qede_xmit_pkts;
+ } else {
+ DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+ dev->tx_pkt_burst = qede_xmit_pkts_regular;
+ }
}
static void
qede_reset_queue_stats(qdev, true);
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * enabling RSS. Hence RSS configuration is deferred up to this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- qede_stop_vport(edev);
+ if (qdev->vport_started)
+ qede_stop_vport(edev);
qdev->vport_started = false;
qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);