X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=d00c1d9a022fb7bc5407c91bb4b6d7fbe1eb8a02;hb=d10798072c5cb3ed4a7c636a003285120b2f3ce7;hp=1190e0643bde38982ec8035d639f5e630102d431;hpb=5cdd769a26ecf41d6cd8d8b29bc49aab288effcb;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 1190e0643b..d00c1d9a02 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -7,10 +7,157 @@ */ #include "qede_ethdev.h" +#include /* Globals */ static const struct qed_eth_ops *qed_ops; static const char *drivername = "qede pmd"; +static int64_t timer_period = 1; + +struct rte_qede_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; + +static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { + {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, + {"rx_multicast_bytes", + offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, + {"rx_broadcast_bytes", + offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, + {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, + {"rx_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, + {"rx_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, + + {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, + {"tx_multicast_bytes", + offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, + {"tx_broadcast_bytes", + offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, + {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, + {"tx_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, + {"tx_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, + + {"rx_64_byte_packets", + offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, + {"rx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, + {"rx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, + {"rx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, + {"rx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, + {"rx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, + {"rx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, + {"rx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, + {"rx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, + {"rx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, + {"rx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + rx_9217_to_16383_byte_packets)}, + {"tx_64_byte_packets", + offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, + {"tx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, + {"tx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, + {"tx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, + {"tx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, + {"tx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, + {"trx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, + {"tx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, + {"tx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, + {"tx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + tx_9217_to_16383_byte_packets)}, + + {"rx_mac_crtl_frames", + offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, + {"tx_mac_control_frames", + offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, + {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, + {"rx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, rx_pfc_frames)}, + {"tx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, tx_pfc_frames)}, + + {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, + {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, + {"rx_carrier_errors", + offsetof(struct ecore_eth_stats, rx_carrier_errors)}, + {"rx_oversize_packet_errors", + offsetof(struct ecore_eth_stats, rx_oversize_packets)}, + {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, + {"rx_undersize_packet_errors", + offsetof(struct ecore_eth_stats, rx_undersize_packets)}, + {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, + {"rx_host_buffer_not_available", + offsetof(struct ecore_eth_stats, no_buff_discards)}, + /* Number of packets discarded because they are bigger than MTU */ + {"rx_packet_too_big_discards", + offsetof(struct ecore_eth_stats, packet_too_big_discard)}, + {"rx_ttl_zero_discards", + offsetof(struct ecore_eth_stats, ttl0_discard)}, + {"rx_multi_function_tag_filter_discards", + offsetof(struct ecore_eth_stats, mftag_filter_discards)}, + {"rx_mac_filter_discards", + offsetof(struct ecore_eth_stats, mac_filter_discards)}, + {"rx_hw_buffer_truncates", + offsetof(struct ecore_eth_stats, brb_truncates)}, + {"rx_hw_buffer_discards", + offsetof(struct ecore_eth_stats, brb_discards)}, + {"tx_lpi_entry_count", + offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, + {"tx_total_collisions", + offsetof(struct ecore_eth_stats, tx_total_collisions)}, + {"tx_error_drop_packets", + offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, + + {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, + {"rx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, + {"rx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, + {"rx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, + {"rx_mac_frames_ok", + offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, + {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, + {"tx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, + {"tx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, + {"tx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, + + {"lro_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, + {"lro_coalesced_events", + offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, + {"lro_aborts_num", + offsetof(struct ecore_eth_stats, tpa_aborts_num)}, + {"lro_not_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, + {"lro_coalesced_bytes", + offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, +}; static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) { @@ -143,6 +290,14 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); int rc; + if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), + mac_addr->addr_bytes)) { + DP_ERR(edev, "Setting MAC address is not allowed\n"); + ether_addr_copy(&qdev->primary_mac, + ð_dev->data->mac_addrs[0]); + return; + } + /* First remove the primary mac */ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, qdev->primary_mac.addr_bytes); @@ -350,6 +505,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -EINVAL; } + /* Check requirements for 100G mode */ + if (edev->num_hwfns > 1) { + if (eth_dev->data->nb_rx_queues < 2) { + DP_NOTICE(edev, false, + "100G mode requires minimum two queues\n"); + return -EINVAL; + } + + if ((eth_dev->data->nb_rx_queues % 2) != 0) { + DP_NOTICE(edev, false, + "100G mode requires even number of queues\n"); + return -EINVAL; + } + } + qdev->num_rss = eth_dev->data->nb_rx_queues; /* Initial state */ @@ -418,7 +588,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev); dev_info->max_tx_queues = dev_info->max_rx_queues; dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; - dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev); + if (IS_VF(edev)) + dev_info->max_vfs = 0; + else + dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev); dev_info->driver_name = qdev->drv_ver; dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; @@ -529,6 +702,26 @@ static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); } +static void qede_poll_sp_sb_cb(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + qede_interrupt_action(&edev->hwfns[1]); + + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + assert(false && "Unable to start periodic timer"); + } +} + static void qede_dev_close(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -561,6 +754,9 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); + if (edev->num_hwfns > 1) + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); + qdev->state = QEDE_CLOSE; } @@ -600,15 +796,52 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) stats.tx_mcast_bytes + stats.tx_bcast_bytes; eth_stats->oerrors = stats.tx_err_drop_pkts; +} - DP_INFO(edev, - "no_buff_discards=%" PRIu64 "" - " mac_filter_discards=%" PRIu64 "" - " brb_truncates=%" PRIu64 "" - " brb_discards=%" PRIu64 "\n", - stats.no_buff_discards, - stats.mac_filter_discards, - stats.brb_truncates, stats.brb_discards); +static int +qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings); + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", + qede_xstats_strings[i].name); + + return stat_cnt; +} + +static int +qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + unsigned int num = RTE_DIM(qede_xstats_strings); + + if (n < num) + return num; + + qdev->ops->get_vport_stats(edev, &stats); + + for (num = 0; num < n; num++) + xstats[num].value = *(u64 *)(((char *)&stats) + + qede_xstats_strings[num].offset); + + return num; +} + +static void +qede_reset_xstats(struct rte_eth_dev *dev) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -742,6 +975,170 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) return NULL; } +int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint8_t rss_caps; + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint64_t hf = rss_conf->rss_hf; + int i; + + if (hf == 0) + DP_ERR(edev, "hash function 0 will disable RSS\n"); + + rss_caps = 0; + rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + + /* If the mapping doesn't fit any supported, return */ + if (rss_caps == 0 && hf != 0) + return -EINVAL; + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + + if (key != NULL) + memcpy(qdev->rss_params.rss_key, rss_conf->rss_key, + rss_conf->rss_key_len); + + qdev->rss_params.rss_caps = rss_caps; + memcpy(&vport_update_params.rss_params, &qdev->rss_params, + sizeof(vport_update_params.rss_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + + return qdev->ops->vport_update(edev, &vport_update_params); +} + +int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + uint64_t hf; + + if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key)) + return -EINVAL; + + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, qdev->rss_params.rss_key, + sizeof(qdev->rss_params.rss_key)); + + hf = 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ? + ETH_RSS_IPV4 : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? + ETH_RSS_IPV6 : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? + ETH_RSS_IPV6_EX : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ? + ETH_RSS_NONFRAG_IPV4_TCP : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? + ETH_RSS_NONFRAG_IPV6_TCP : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? + ETH_RSS_IPV6_TCP_EX : 0; + + rss_conf->rss_hf = hf; + + return 0; +} + +int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint16_t i, idx, shift; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + DP_ERR(edev, "reta_size %d is not supported by hardware\n", + reta_size); + return -EINVAL; + } + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + memcpy(&vport_update_params.rss_params, &qdev->rss_params, + sizeof(vport_update_params.rss_params)); + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + uint8_t entry = reta_conf[idx].reta[shift]; + qdev->rss_params.rss_ind_table[i] = entry; + } + } + + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + + return qdev->ops->vport_update(edev, &vport_update_params); +} + +int qede_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + uint16_t i, idx, shift; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + struct ecore_dev *edev = &qdev->edev; + DP_ERR(edev, "reta_size %d is not supported\n", + reta_size); + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + uint8_t entry = qdev->rss_params.rss_ind_table[i]; + reta_conf[idx].reta[shift] = entry; + } + } + + return 0; +} + +int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t frame_size; + struct qede_dev *qdev = dev->data->dev_private; + struct rte_eth_dev_info dev_info = {0}; + + qede_dev_info_get(dev, &dev_info); + + /* VLAN_TAG = 4 */ + frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; + + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + qdev->mtu = mtu; + qede_dev_stop(dev); + qede_dev_start(dev); + + return 0; +} + static const struct eth_dev_ops qede_eth_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, @@ -761,6 +1158,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, .mac_addr_add = qede_mac_addr_add, .mac_addr_remove = qede_mac_addr_remove, .mac_addr_set = qede_mac_addr_set, @@ -769,6 +1169,43 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .flow_ctrl_set = qede_flow_ctrl_set, .flow_ctrl_get = qede_flow_ctrl_get, .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, +}; + +static const struct eth_dev_ops qede_eth_vf_dev_ops = { + .dev_configure = qede_dev_configure, + .dev_infos_get = qede_dev_info_get, + .rx_queue_setup = qede_rx_queue_setup, + .rx_queue_release = qede_rx_queue_release, + .tx_queue_setup = qede_tx_queue_setup, + .tx_queue_release = qede_tx_queue_release, + .dev_start = qede_dev_start, + .dev_set_link_up = qede_dev_set_link_up, + .dev_set_link_down = qede_dev_set_link_down, + .link_update = qede_link_update, + .promiscuous_enable = qede_promiscuous_enable, + .promiscuous_disable = qede_promiscuous_disable, + .allmulticast_enable = qede_allmulticast_enable, + .allmulticast_disable = qede_allmulticast_disable, + .dev_stop = qede_dev_stop, + .dev_close = qede_dev_close, + .stats_get = qede_get_stats, + .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, + .vlan_offload_set = qede_vlan_offload_set, + .vlan_filter_set = qede_vlan_filter_set, + .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -861,9 +1298,26 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) params.drv_eng = QEDE_ENGINEERING_VERSION; strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + /* For CMT mode device do periodic polling for slowpath events. + * This is required since uio device uses only one MSI-x + * interrupt vector but we need one for each engine. + */ + if (edev->num_hwfns > 1) { + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + return -EINVAL; + } + } + rc = qed_ops->common->slowpath_start(edev, ¶ms); if (rc) { DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENODEV; } @@ -872,6 +1326,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "Cannot get device_info rc %d\n", rc); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENODEV; } @@ -884,7 +1340,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), ECORE_MAC); else - adapter->dev_info.num_mac_addrs = 1; + ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), + &adapter->dev_info.num_mac_addrs); /* Allocate memory for storing MAC addr */ eth_dev->data->mac_addrs = rte_zmalloc(edev->name, @@ -896,14 +1353,40 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "Failed to allocate MAC address\n"); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENOMEM; } - ether_addr_copy((struct ether_addr *)edev->hwfns[0]. + if (!is_vf) { + ether_addr_copy((struct ether_addr *)edev->hwfns[0]. hw_info.hw_mac_addr, ð_dev->data->mac_addrs[0]); + ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), + &bulletin_change); + if (bulletin_change) { + is_mac_exist = + ecore_vf_bulletin_get_forced_mac( + ECORE_LEADING_HWFN(edev), + vf_mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced) { + DP_INFO(edev, "VF macaddr received from PF\n"); + ether_addr_copy((struct ether_addr *)&vf_mac, + ð_dev->data->mac_addrs[0]); + ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + DP_NOTICE(edev, false, + "No VF macaddr assigned\n"); + } + } + } - eth_dev->dev_ops = &qede_eth_dev_ops; + eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; if (do_once) { qede_print_adapter_info(adapter); @@ -987,16 +1470,20 @@ static struct rte_pci_id pci_id_qede_map[] = { { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100) + }, {.vendor_id = 0,} }; static struct eth_driver rte_qedevf_pmd = { .pci_drv = { - .name = "rte_qedevf_pmd", .id_table = pci_id_qedevf_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - }, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, + }, .eth_dev_init = qedevf_eth_dev_init, .eth_dev_uninit = qedevf_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), @@ -1004,43 +1491,18 @@ static struct eth_driver rte_qedevf_pmd = { static struct eth_driver rte_qede_pmd = { .pci_drv = { - .name = "rte_qede_pmd", .id_table = pci_id_qede_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - }, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, + }, .eth_dev_init = qede_eth_dev_init, .eth_dev_uninit = qede_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), }; -static int -rte_qedevf_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_qedevf_pmd); - - return 0; -} - -static int -rte_qede_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_qede_pmd); - - return 0; -} - -static struct rte_driver rte_qedevf_driver = { - .type = PMD_PDEV, - .init = rte_qede_pmd_init -}; - -static struct rte_driver rte_qede_driver = { - .type = PMD_PDEV, - .init = rte_qedevf_pmd_init -}; - -PMD_REGISTER_DRIVER(rte_qede_driver); -PMD_REGISTER_DRIVER(rte_qedevf_driver); +DRIVER_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); +DRIVER_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); +DRIVER_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); +DRIVER_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);