X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=68c8c899b25fd3297d05dd38c585f529232e4cef;hb=c6dd1eb8a55c1f66da8398508ea8a749799eef18;hp=3e1a62c9f9a6f83d7086800bccfef17e7ae4d84a;hpb=413ecf29425174f25383ea43fba003fd651ddc89;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 3e1a62c9f9..68c8c899b2 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -16,7 +16,7 @@ int qede_logtype_init; int qede_logtype_driver; static const struct qed_eth_ops *qed_ops; -static int64_t timer_period = 1; +#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ /* VXLAN tunnel classification mapping */ const struct _qede_udp_tunn_types { @@ -338,6 +338,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); } +static void +qede_interrupt_handler_intx(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + u64 status; + + /* Check if our device actually raised an interrupt */ + status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); + if (status & 0x1) { + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + + if (rte_intr_enable(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_enable failed\n"); + } +} + static void qede_interrupt_handler(void *param) { @@ -518,14 +536,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; - if (!qdev->enable_tx_switching) { - if ((QEDE_NPAR_TX_SWITCHING != NULL) || - ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) { - params.update_tx_switching_flg = 1; - params.tx_switching_flg = !flg; - DP_INFO(edev, "%s tx-switching is disabled\n", - QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF"); - } + if (~qdev->enable_tx_switching & flg) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; } for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; @@ -603,37 +616,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) return 0; } -/* Update MTU via vport-update without doing port restart. - * The vport must be deactivated before calling this API. - */ -int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) -{ - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); - struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_sp_vport_update_params params; - struct ecore_hwfn *p_hwfn; - int rc; - int i; - - memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); - params.vport_id = 0; - params.mtu = mtu; - params.vport_id = 0; - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - params.opaque_fid = p_hwfn->hw_info.opaque_fid; - rc = ecore_sp_vport_update(p_hwfn, ¶ms, - ECORE_SPQ_MODE_EBLOCK, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update MTU\n"); - return -1; - } - } - DP_INFO(edev, "MTU updated to %u\n", mtu); - - return 0; -} - static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) { memset(ucast, 0, sizeof(struct ecore_filter_ucast)); @@ -857,10 +839,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, ETHER_ADDR_LEN) == 0) && ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { - DP_ERR(edev, "Unicast MAC is already added" - " with vlan = %u, vni = %u\n", - ucast->vlan, ucast->vni); - return -EEXIST; + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; } } u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), @@ -993,7 +975,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, struct ecore_filter_ucast ucast; int re; + if (!is_valid_assigned_ether_addr(mac_addr)) + return -EINVAL; + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; ucast.type = ECORE_FILTER_MAC; ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); @@ -1015,6 +1001,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) return; } + if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + qede_set_ucast_cmn_params(&ucast); ucast.opcode = ECORE_FILTER_REMOVE; ucast.type = ECORE_FILTER_MAC; @@ -1038,8 +1027,9 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) return -EPERM; } - qede_mac_addr_add(eth_dev, mac_addr, 0, 0); - return 0; + qede_mac_addr_remove(eth_dev, 0); + + return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); } static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) @@ -1117,9 +1107,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { if (tmp->vid == vlan_id) { - DP_ERR(edev, "VLAN %u already configured\n", - vlan_id); - return -EEXIST; + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; } } @@ -1296,6 +1286,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); + /* Update MTU only if it has changed */ + if (eth_dev->data->mtu != qdev->mtu) { + if (qede_update_mtu(eth_dev, qdev->mtu)) + goto err; + } + /* Configure TPA parameters */ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { if (qede_enable_tpa(eth_dev, true)) @@ -1359,9 +1355,6 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Disable traffic */ ecore_hw_stop_fastpath(edev); /* TBD - loop */ - if (IS_PF(edev)) - qede_mac_addr_remove(eth_dev, 0); - DP_INFO(edev, "Device is stopped\n"); } @@ -1387,8 +1380,12 @@ static int qede_args_check(const char *key, const char *val, void *opaque) } if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || - (strcmp(QEDE_VF_TX_SWITCHING, key) == 0)) + ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { qdev->enable_tx_switching = !!tmp; + DP_INFO(edev, "Disabling %s tx-switching\n", + strcmp(QEDE_NPAR_TX_SWITCHING, key) ? + "VF" : "NPAR"); + } return ret; } @@ -1463,7 +1460,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Parse devargs and fix up rxmode */ if (qede_args(eth_dev)) - return -ENOTSUP; + DP_NOTICE(edev, false, + "Invalid devargs supplied, requested change will not take effect\n"); if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == ETH_MQ_RX_RSS)) { @@ -1496,8 +1494,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Enable VLAN offloads by default */ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK); + ETH_VLAN_FILTER_MASK); if (ret) return ret; @@ -1615,18 +1612,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + struct qed_link_output q_link; + struct rte_eth_link link; uint16_t link_duplex; - struct qed_link_output link; - struct rte_eth_link *curr = ð_dev->data->dev_link; - memset(&link, 0, sizeof(struct qed_link_output)); - qdev->ops->common->get_link(edev, &link); + memset(&q_link, 0, sizeof(q_link)); + memset(&link, 0, sizeof(link)); + + qdev->ops->common->get_link(edev, &q_link); /* Link Speed */ - curr->link_speed = link.speed; + link.link_speed = q_link.speed; /* Link Mode */ - switch (link.duplex) { + switch (q_link.duplex) { case QEDE_DUPLEX_HALF: link_duplex = ETH_LINK_HALF_DUPLEX; break; @@ -1637,21 +1636,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) default: link_duplex = -1; } - curr->link_duplex = link_duplex; + link.link_duplex = link_duplex; /* Link Status */ - curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; + link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; /* AN */ - curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? + link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", - curr->link_speed, curr->link_duplex, - curr->link_autoneg, curr->link_status); + link.link_speed, link.link_duplex, + link.link_autoneg, link.link_status); - /* return 0 means link status changed, -1 means not changed */ - return ((curr->link_status == link.link_up) ? -1 : 0); + return rte_eth_linkstatus_set(eth_dev, &link); } static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) @@ -1698,7 +1696,7 @@ static void qede_poll_sp_sb_cb(void *param) qede_interrupt_action(ECORE_LEADING_HWFN(edev)); qede_interrupt_action(&edev->hwfns[1]); - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -2056,6 +2054,70 @@ qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); } +/* Update MTU via vport-update without doing port restart. + * The vport must be deactivated before calling this API. + */ +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + if (IS_PF(edev)) { + struct ecore_sp_vport_update_params params; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.mtu = mtu; + params.vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) + goto err; + } + } else { + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); + if (rc == ECORE_INVAL) { + DP_INFO(edev, "VF MTU Update TLV not supported\n"); + /* Recreate vport */ + rc = qede_start_vport(qdev, mtu); + if (rc != ECORE_SUCCESS) + goto err; + + /* Restore config lost due to vport stop */ + if (eth_dev->data->promiscuous) + qede_promiscuous_enable(eth_dev); + else + qede_promiscuous_disable(eth_dev); + + if (eth_dev->data->all_multicast) + qede_allmulticast_enable(eth_dev); + else + qede_allmulticast_disable(eth_dev); + + qede_vlan_offload_set(eth_dev, + qdev->vlan_offload_mask); + } else if (rc != ECORE_SUCCESS) { + goto err; + } + } + } + DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); + + return 0; + +err: + DP_ERR(edev, "Failed to update MTU\n"); + return -1; +} + static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { @@ -2210,7 +2272,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, vport_update_params.vport_id = 0; /* pass the L2 handles instead of qids */ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { - idx = qdev->rss_ind_table[i]; + idx = i % QEDE_RSS_COUNT(qdev); rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; } vport_update_params.rss_params = &rss_params; @@ -2458,12 +2520,8 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->dev_started = 0; qede_dev_stop(dev); restart = true; - } else { - if (IS_PF(edev)) - qede_mac_addr_remove(dev, 0); } rte_delay_ms(1000); - qede_start_vport(qdev, mtu); /* Recreate vport */ qdev->mtu = mtu; /* Fix up RX buf size for all queues of the port */ @@ -2487,22 +2545,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) else dev->data->dev_conf.rxmode.jumbo_frame = 0; - /* Restore config lost due to vport stop */ - if (IS_PF(edev)) - qede_mac_addr_set(dev, &qdev->primary_mac); - - if (dev->data->promiscuous) - qede_promiscuous_enable(dev); - else - qede_promiscuous_disable(dev); - - if (dev->data->all_multicast) - qede_allmulticast_enable(dev); - else - qede_allmulticast_disable(dev); - - qede_vlan_offload_set(dev, qdev->vlan_offload_mask); - if (!dev->data->dev_started && restart) { qede_dev_start(dev); dev->data->dev_started = 1; @@ -2999,6 +3041,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .mtu_set = qede_set_mtu, .udp_tunnel_port_add = qede_udp_dst_port_add, .udp_tunnel_port_del = qede_udp_dst_port_del, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -3027,6 +3072,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; + uint32_t int_mode; int rc; /* Extract key data structures */ @@ -3071,8 +3117,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) return -ENODEV; } qede_update_pf_params(edev); - rte_intr_callback_register(&pci_dev->intr_handle, - qede_interrupt_handler, (void *)eth_dev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + int_mode = ECORE_INT_MODE_INTA; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + int_mode = ECORE_INT_MODE_MSIX; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + if (rte_intr_enable(&pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; @@ -3080,7 +3140,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Start the Slowpath-process */ memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = ECORE_INT_MODE_MSIX; + + params.int_mode = int_mode; params.drv_major = QEDE_PMD_VERSION_MAJOR; params.drv_minor = QEDE_PMD_VERSION_MINOR; params.drv_rev = QEDE_PMD_VERSION_REVISION; @@ -3093,7 +3154,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * interrupt vector but we need one for each engine. */ if (ECORE_IS_CMT(edev) && IS_PF(edev)) { - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -3163,7 +3224,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) ECORE_LEADING_HWFN(edev), vf_mac, &is_mac_forced); - if (is_mac_exist && is_mac_forced) { + if (is_mac_exist) { DP_INFO(edev, "VF macaddr received from PF\n"); ether_addr_copy((struct ether_addr *)&vf_mac, ð_dev->data->mac_addrs[0]);