X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=c907d7fd83124a6aa302b18f0ebbe20e6b6e8fd9;hb=4f1cfda59ad3226837a1bedc7262abee2b0ae299;hp=1a95c01f916f745e2c1188bbd21f420841efe6cd;hpb=1df1bb5275393d757d45a2dcd48cff17a108bc88;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 1a95c01f91..c907d7fd83 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -237,9 +237,6 @@ qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; size_t size; - if (fw_ver == NULL) - return 0; - if (IS_PF(edev)) snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", QEDE_PMD_FW_VERSION); @@ -345,9 +342,9 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) } use_tx_offload = !!(tx_offloads & - (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ - DEV_TX_OFFLOAD_TCP_TSO | /* tso */ - DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ + (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ + RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */ + RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ if (use_tx_offload) { DP_INFO(edev, "Assigning qede_xmit_pkts\n"); @@ -1005,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; - if (mask & ETH_VLAN_STRIP_MASK) { - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); } - if (mask & ETH_VLAN_FILTER_MASK) { + if (mask & RTE_ETH_VLAN_FILTER_MASK) { /* VLAN filtering kicks in when a VLAN is added */ - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { qede_vlan_filter_set(eth_dev, 0, 1); } else { if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ @@ -1025,7 +1022,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) * enabled */ eth_dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_FILTER; + RTE_ETH_RX_OFFLOAD_VLAN_FILTER; } else { qede_vlan_filter_set(eth_dev, 0, 0); } @@ -1072,11 +1069,11 @@ int qede_config_rss(struct rte_eth_dev *eth_dev) /* Configure default RETA */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) - reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; + reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX; for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { - id = i / RTE_RETA_GROUP_SIZE; - pos = i % RTE_RETA_GROUP_SIZE; + id = i / RTE_ETH_RETA_GROUP_SIZE; + pos = i % RTE_ETH_RETA_GROUP_SIZE; q = i % QEDE_RSS_COUNT(eth_dev); reta_conf[id].reta[pos] = q; } @@ -1115,12 +1112,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) } /* Configure TPA parameters */ - if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { if (qede_enable_tpa(eth_dev, true)) return -EINVAL; /* Enable scatter mode for LRO */ if (!eth_dev->data->scattered_rx) - rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; + rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; } /* Start queues */ @@ -1135,7 +1132,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ - if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) goto err; @@ -1163,12 +1160,13 @@ err: return -1; /* common error code is < 0 */ } -static void qede_dev_stop(struct rte_eth_dev *eth_dev) +static int qede_dev_stop(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); PMD_INIT_FUNC_TRACE(edev); + eth_dev->data->dev_started = 0; /* Bring the link down */ qede_dev_set_link_state(eth_dev, false); @@ -1183,7 +1181,7 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Disable vport */ if (qede_activate_vport(eth_dev, false)) - return; + return 0; if (qdev->enable_lro) qede_enable_tpa(eth_dev, false); @@ -1195,6 +1193,8 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) ecore_hw_stop_fastpath(edev); /* TBD - loop */ DP_INFO(edev, "Device is stopped\n"); + + return 0; } static const char * const valid_args[] = { @@ -1272,8 +1272,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); - if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) - rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; /* We need to have min 1 RX queue.There is no min check in * rte_eth_dev_configure(), so we are checking it here. @@ -1291,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) DP_NOTICE(edev, false, "Invalid devargs supplied, requested change will not take effect\n"); - if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || - rxmode->mq_mode == ETH_MQ_RX_RSS)) { + if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE || + rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) { DP_ERR(edev, "Unsupported multi-queue mode\n"); return -ENOTSUP; } @@ -1312,13 +1312,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOMEM; } - /* If jumbo enabled adjust MTU */ - if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) - eth_dev->data->mtu = - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; - - if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) eth_dev->data->scattered_rx = 1; if (qede_start_vport(qdev, eth_dev->data->mtu)) @@ -1327,8 +1321,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) qdev->mtu = eth_dev->data->mtu; /* Enable VLAN offloads by default */ - ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK); + ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK | + RTE_ETH_VLAN_FILTER_MASK); if (ret) return ret; @@ -1391,35 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; - dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO | - DEV_RX_OFFLOAD_KEEP_CRC | - DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_RSS_HASH); + dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_LRO | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_RSS_HASH); dev_info->rx_queue_offload_capa = 0; /* TX offloads are on a per-packet basis, so it is applicable * to both at port and queue levels. */ - dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO); dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; dev_info->default_txconf = (struct rte_eth_txconf) { - .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, + .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS, }; dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -1431,17 +1424,17 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - speed_cap |= ETH_LINK_SPEED_1G; + speed_cap |= RTE_ETH_LINK_SPEED_1G; if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - speed_cap |= ETH_LINK_SPEED_10G; + speed_cap |= RTE_ETH_LINK_SPEED_10G; if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - speed_cap |= ETH_LINK_SPEED_25G; + speed_cap |= RTE_ETH_LINK_SPEED_25G; if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - speed_cap |= ETH_LINK_SPEED_40G; + speed_cap |= RTE_ETH_LINK_SPEED_40G; if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - speed_cap |= ETH_LINK_SPEED_50G; + speed_cap |= RTE_ETH_LINK_SPEED_50G; if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - speed_cap |= ETH_LINK_SPEED_100G; + speed_cap |= RTE_ETH_LINK_SPEED_100G; dev_info->speed_capa = speed_cap; return 0; @@ -1468,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) /* Link Mode */ switch (q_link.duplex) { case QEDE_DUPLEX_HALF: - link_duplex = ETH_LINK_HALF_DUPLEX; + link_duplex = RTE_ETH_LINK_HALF_DUPLEX; break; case QEDE_DUPLEX_FULL: - link_duplex = ETH_LINK_FULL_DUPLEX; + link_duplex = RTE_ETH_LINK_FULL_DUPLEX; break; case QEDE_DUPLEX_UNKNOWN: default: @@ -1480,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) link.link_duplex = link_duplex; /* Link Status */ - link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; /* AN */ link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? - ETH_LINK_AUTONEG : ETH_LINK_FIXED; + RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", link.link_speed, link.link_duplex, @@ -1549,6 +1542,7 @@ static int qede_dev_close(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int ret = 0; PMD_INIT_FUNC_TRACE(edev); @@ -1562,7 +1556,7 @@ static int qede_dev_close(struct rte_eth_dev *eth_dev) * can release all the resources and device can be brought up newly */ if (eth_dev->data->dev_started) - qede_dev_stop(eth_dev); + ret = qede_dev_stop(eth_dev); if (qdev->vport_started) qede_stop_vport(edev); @@ -1593,11 +1587,7 @@ static int qede_dev_close(struct rte_eth_dev *eth_dev) if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - return 0; + return ret; } static int @@ -1885,6 +1875,8 @@ static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; enum _ecore_status_t ecore_status; + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + type = QED_FILTER_RX_MODE_TYPE_PROMISC; ecore_status = qed_configure_filter_rx_mode(eth_dev, type); return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; @@ -2020,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, } /* Pause is assumed to be supported (SUPPORTED_Pause) */ - if (fc_conf->mode == RTE_FC_FULL) + if (fc_conf->mode == RTE_ETH_FC_FULL) params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | QED_LINK_PAUSE_RX_ENABLE); - if (fc_conf->mode == RTE_FC_TX_PAUSE) + if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; - if (fc_conf->mode == RTE_FC_RX_PAUSE) + if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; params.link_up = true; @@ -2049,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | QED_LINK_PAUSE_TX_ENABLE)) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -2096,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) { *rss_caps = 0; - *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; - *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; - *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; - *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; - *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; - *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; - *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; - *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; + *rss_caps |= (hf & RTE_ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + *rss_caps |= (hf & RTE_ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; + *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; } int qede_rss_hash_update(struct rte_eth_dev *eth_dev, @@ -2137,8 +2129,10 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, /* RSS hash key */ if (key) { if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { - DP_ERR(edev, "RSS key length exceeds limit\n"); - return -EINVAL; + len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); + DP_NOTICE(edev, false, + "RSS key length too big, trimmed to %d\n", + len); } DP_INFO(edev, "Applying user supplied hash key\n"); rss_params.update_rss_key = 1; @@ -2227,7 +2221,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev, uint8_t entry; int rc = 0; - if (reta_size > ETH_RSS_RETA_SIZE_128) { + if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) { DP_ERR(edev, "reta_size %d is not supported by hardware\n", reta_size); return -EINVAL; @@ -2251,8 +2245,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev, for_each_hwfn(edev, i) { for (j = 0; j < reta_size; j++) { - idx = j / RTE_RETA_GROUP_SIZE; - shift = j % RTE_RETA_GROUP_SIZE; + idx = j / RTE_ETH_RETA_GROUP_SIZE; + shift = j % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) { entry = reta_conf[idx].reta[shift]; fid = entry * edev->num_hwfns + i; @@ -2288,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, uint16_t i, idx, shift; uint8_t entry; - if (reta_size > ETH_RSS_RETA_SIZE_128) { + if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) { DP_ERR(edev, "reta_size %d is not supported\n", reta_size); return -EINVAL; } for (i = 0; i < reta_size; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) { entry = qdev->rss_ind_table[i]; reta_conf[idx].reta[shift] = entry; @@ -2312,28 +2306,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct qede_dev *qdev = QEDE_INIT_QDEV(dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct rte_eth_dev_info dev_info = {0}; struct qede_fastpath *fp; - uint32_t max_rx_pkt_len; uint32_t frame_size; uint16_t bufsz; bool restart = false; int i, rc; PMD_INIT_FUNC_TRACE(edev); - rc = qede_dev_info_get(dev, &dev_info); - if (rc != 0) { - DP_ERR(edev, "Error during getting ethernet device info\n"); - return rc; - } - max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; - frame_size = max_rx_pkt_len; - if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { - DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", - mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - - QEDE_ETH_OVERHEAD); - return -EINVAL; - } + + frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN; if (!dev->data->scattered_rx && frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", @@ -2342,7 +2323,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } if (dev->data->dev_started) { dev->data->dev_started = 0; - qede_dev_stop(dev); + rc = qede_dev_stop(dev); + if (rc != 0) + return rc; restart = true; } rte_delay_ms(1000); @@ -2365,19 +2348,12 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) fp->rxq->rx_buf_size = rc; } } - if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - else - dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; if (!dev->data->dev_started && restart) { qede_dev_start(dev); dev->data->dev_started = 1; } - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; - return 0; } @@ -2393,13 +2369,25 @@ qede_dev_reset(struct rte_eth_dev *dev) return qede_eth_dev_init(dev); } +static void +qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +{ + qede_rx_queue_release(dev->data->rx_queues[qid]); +} + +static void +qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +{ + qede_tx_queue_release(dev->data->tx_queues[qid]); +} + static const struct eth_dev_ops qede_eth_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, - .rx_queue_release = qede_rx_queue_release, + .rx_queue_release = qede_dev_rx_queue_release, .tx_queue_setup = qede_tx_queue_setup, - .tx_queue_release = qede_tx_queue_release, + .tx_queue_release = qede_dev_tx_queue_release, .dev_start = qede_dev_start, .dev_reset = qede_dev_reset, .dev_set_link_up = qede_dev_set_link_up, @@ -2430,7 +2418,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .reta_update = qede_rss_reta_update, .reta_query = qede_rss_reta_query, .mtu_set = qede_set_mtu, - .filter_ctrl = qede_dev_filter_ctrl, + .flow_ops_get = qede_dev_flow_ops_get, .udp_tunnel_port_add = qede_udp_dst_port_add, .udp_tunnel_port_del = qede_udp_dst_port_del, .fw_version_get = qede_fw_version_get, @@ -2441,9 +2429,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, - .rx_queue_release = qede_rx_queue_release, + .rx_queue_release = qede_dev_rx_queue_release, .tx_queue_setup = qede_tx_queue_setup, - .tx_queue_release = qede_tx_queue_release, + .tx_queue_release = qede_dev_tx_queue_release, .dev_start = qede_dev_start, .dev_reset = qede_dev_reset, .dev_set_link_up = qede_dev_set_link_up, @@ -2543,6 +2531,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) } rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* @DPDK */ edev->vendor_id = pci_dev->id.vendor_id; @@ -2729,16 +2718,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) adapter->ipgre.num_filters = 0; if (is_vf) { adapter->vxlan.enable = true; - adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | - ETH_TUNNEL_FILTER_IVLAN; + adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | + RTE_ETH_TUNNEL_FILTER_IVLAN; adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; adapter->geneve.enable = true; - adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | - ETH_TUNNEL_FILTER_IVLAN; + adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | + RTE_ETH_TUNNEL_FILTER_IVLAN; adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; adapter->ipgre.enable = true; - adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | - ETH_TUNNEL_FILTER_IVLAN; + adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | + RTE_ETH_TUNNEL_FILTER_IVLAN; } else { adapter->vxlan.enable = false; adapter->geneve.enable = false; @@ -2746,18 +2735,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) qed_ops->sriov_configure(edev, pci_dev->max_vfs); } - /* Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - - DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", - adapter->primary_mac.addr_bytes[0], - adapter->primary_mac.addr_bytes[1], - adapter->primary_mac.addr_bytes[2], - adapter->primary_mac.addr_bytes[3], - adapter->primary_mac.addr_bytes[4], - adapter->primary_mac.addr_bytes[5]); + DP_INFO(edev, "MAC address : " RTE_ETHER_ADDR_PRT_FMT "\n", + RTE_ETHER_ADDR_BYTES(&adapter->primary_mac)); DP_INFO(edev, "Device initialized\n"); @@ -2893,5 +2872,5 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); -RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE); -RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE); +RTE_LOG_REGISTER_SUFFIX(qede_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(qede_logtype_driver, driver, NOTICE);