DP_ERR(edev, "rte_intr_ack failed\n");
}
+static void
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ bool use_tx_offload = false;
+
+ if (ECORE_IS_CMT(edev)) {
+ dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ return;
+ }
+
+ if (dev->data->lro || dev->data->scattered_rx) {
+ DP_INFO(edev, "Assigning qede_recv_pkts\n");
+ dev->rx_pkt_burst = qede_recv_pkts;
+ } else {
+ DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
+ dev->rx_pkt_burst = qede_recv_pkts_regular;
+ }
+
+ use_tx_offload = !!(tx_offloads &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+ DEV_TX_OFFLOAD_TCP_TSO | /* tso */
+ DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+ if (use_tx_offload) {
+ DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+ dev->tx_pkt_burst = qede_xmit_pkts;
+ } else {
+ DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+ dev->tx_pkt_burst = qede_xmit_pkts_regular;
+ }
+}
+
static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
- if (~qdev->enable_tx_switching & flg) {
+ if ((qdev->enable_tx_switching == false) && (flg == true)) {
params.update_tx_switching_flg = 1;
params.tx_switching_flg = !flg;
}
/* Start/resume traffic */
qede_fastpath_start(edev);
+ qede_assign_rxtx_handlers(eth_dev);
DP_INFO(edev, "Device started\n");
return 0;
PMD_INIT_FUNC_TRACE(edev);
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
*/
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_RSS_HASH);
dev_info->rx_queue_offload_capa = 0;
/* TX offloads are on a per-packet basis, so it is applicable
};
if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+ eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
return ptypes;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+
/* Reassign back */
+ qede_assign_rxtx_handlers(dev);
if (ECORE_IS_CMT(edev)) {
dev->rx_pkt_burst = qede_recv_pkts_cmt;
dev->tx_pkt_burst = qede_xmit_pkts_cmt;
strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
QEDE_PMD_DRV_VER_STR_SIZE);
- if (ECORE_IS_CMT(edev)) {
- eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
- eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
- } else {
- eth_dev->rx_pkt_burst = qede_recv_pkts;
- eth_dev->tx_pkt_burst = qede_xmit_pkts;
- }
-
+ qede_assign_rxtx_handlers(eth_dev);
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
/* For CMT mode device do periodic polling for slowpath events.