X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=500440c2cad8c5f4290b5c894aa274cc48293f78;hb=4231be021a624bfcfbf0f9b3d77ab881c619e609;hp=575982fd08b9ffac12a0b1a211f3b67032c9f580;hpb=30b170b4a56f313f1668d4f583d9572ce48823c7;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 575982fd08..500440c2ca 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -267,6 +267,42 @@ qede_interrupt_handler(void *param) DP_ERR(edev, "rte_intr_ack failed\n"); } +static void +qede_assign_rxtx_handlers(struct rte_eth_dev *dev) +{ + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + bool use_tx_offload = false; + + if (ECORE_IS_CMT(edev)) { + dev->rx_pkt_burst = qede_recv_pkts_cmt; + dev->tx_pkt_burst = qede_xmit_pkts_cmt; + return; + } + + if (dev->data->lro || dev->data->scattered_rx) { + DP_INFO(edev, "Assigning qede_recv_pkts\n"); + dev->rx_pkt_burst = qede_recv_pkts; + } else { + DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); + dev->rx_pkt_burst = qede_recv_pkts_regular; + } + + use_tx_offload = !!(tx_offloads & + (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ + DEV_TX_OFFLOAD_TCP_TSO | /* tso */ + DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ + + if (use_tx_offload) { + DP_INFO(edev, "Assigning qede_xmit_pkts\n"); + dev->tx_pkt_burst = qede_xmit_pkts; + } else { + DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); + dev->tx_pkt_burst = qede_xmit_pkts_regular; + } +} + static void qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) { @@ -450,7 +486,7 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; - if (~qdev->enable_tx_switching & flg) { + if ((qdev->enable_tx_switching == false) && (flg == true)) { params.update_tx_switching_flg = 1; params.tx_switching_flg = !flg; } @@ -1082,6 +1118,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) /* Start/resume traffic */ qede_fastpath_start(edev); + qede_assign_rxtx_handlers(eth_dev); DP_INFO(edev, "Device started\n"); return 0; @@ -1186,6 +1223,9 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* We need to have min 1 RX queue.There is no min check in * rte_eth_dev_configure(), so we are checking it here. */ @@ -1306,7 +1346,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_STRIP); + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_RSS_HASH); dev_info->rx_queue_offload_capa = 0; /* TX offloads are on a per-packet basis, so it is applicable @@ -1988,6 +2029,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) }; if (eth_dev->rx_pkt_burst == qede_recv_pkts || + eth_dev->rx_pkt_burst == qede_recv_pkts_regular || eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) return ptypes; @@ -2283,7 +2325,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; + /* Reassign back */ + qede_assign_rxtx_handlers(dev); if (ECORE_IS_CMT(edev)) { dev->rx_pkt_burst = qede_recv_pkts_cmt; dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -2491,14 +2535,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); - if (ECORE_IS_CMT(edev)) { - eth_dev->rx_pkt_burst = qede_recv_pkts_cmt; - eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt; - } else { - eth_dev->rx_pkt_burst = qede_recv_pkts; - eth_dev->tx_pkt_burst = qede_xmit_pkts; - } - + qede_assign_rxtx_handlers(eth_dev); eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; /* For CMT mode device do periodic polling for slowpath events.