X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=071c44110a79492ef6b2dc6d48cefb96eff79b1f;hb=36efba2f93c45b18cf21642034517206d4784d42;hp=4e9e89fad9a6a4ab9c8ccc0278d6233127c720ea;hpb=ef86e67ad5d873573882e19d92a206a842d22927;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 4e9e89fad9..071c44110a 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -11,11 +11,14 @@ #include /* Globals */ +int qede_logtype_init; +int qede_logtype_driver; + static const struct qed_eth_ops *qed_ops; static int64_t timer_period = 1; /* VXLAN tunnel classification mapping */ -const struct _qede_vxlan_tunn_types { +const struct _qede_udp_tunn_types { uint16_t rte_filter_type; enum ecore_filter_ucast_type qede_type; enum ecore_tunn_clss qede_tunn_clss; @@ -453,6 +456,13 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; +#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH + if (IS_VF(edev)) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; + DP_INFO(edev, "VF tx-switching is disabled\n"); + } +#endif for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; params.opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -463,7 +473,8 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) break; } } - DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated"); + DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); + return rc; } @@ -474,8 +485,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, /* Enable LRO in split mode */ sge_tpa_params->tpa_ipv4_en_flg = enable; sge_tpa_params->tpa_ipv6_en_flg = enable; - sge_tpa_params->tpa_ipv4_tunn_en_flg = false; - sge_tpa_params->tpa_ipv6_tunn_en_flg = false; + sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; + sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; /* set if tpa enable changes */ sge_tpa_params->update_tpa_en_flg = 1; /* set if tpa parameters should be handled */ @@ -520,7 +531,7 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) return -1; } } - + qdev->enable_lro = flg; DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); return 0; @@ -602,15 +613,123 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, return ecore_filter_accept_cmd(edev, 0, flags, false, false, ECORE_SPQ_MODE_CB, NULL); } -static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, - uint8_t clss, bool mode, bool mask) + +static int +qede_tunnel_update(struct qede_dev *qdev, + struct ecore_tunnel_info *tunn_info) { - memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); - p_tunn->vxlan.b_update_mode = mode; - p_tunn->vxlan.b_mode_enabled = mask; - p_tunn->b_update_rx_cls = true; - p_tunn->b_update_tx_cls = true; - p_tunn->vxlan.tun_cls = clss; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_hwfn *p_hwfn; + struct ecore_ptt *p_ptt; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL; + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, + tunn_info, ECORE_SPQ_MODE_CB, NULL); + if (IS_PF(edev)) + ecore_ptt_release(p_hwfn, p_ptt); + + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +static int +qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + if (qdev->vxlan.enable == enable) + return ECORE_SUCCESS; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.vxlan.b_update_mode = true; + tunn.vxlan.b_mode_enabled = enable; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + tunn.vxlan.tun_cls = clss; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->vxlan.enable = enable; + qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0; + DP_INFO(edev, "vxlan is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->vxlan.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + tunn.vxlan.tun_cls); + } + + return rc; +} + +static int +qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.l2_geneve.b_update_mode = true; + tunn.l2_geneve.b_mode_enabled = enable; + tunn.ip_geneve.b_update_mode = true; + tunn.ip_geneve.b_mode_enabled = enable; + tunn.l2_geneve.tun_cls = clss; + tunn.ip_geneve.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->geneve.enable = enable; + qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0; + DP_INFO(edev, "GENEVE is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->geneve.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + +static int +qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + enum rte_eth_tunnel_type tunn_type, bool enable) +{ + int rc = -EINVAL; + + switch (tunn_type) { + case RTE_TUNNEL_TYPE_VXLAN: + rc = qede_vxlan_enable(eth_dev, clss, enable); + break; + case RTE_TUNNEL_TYPE_GENEVE: + rc = qede_geneve_enable(eth_dev, clss, enable); + break; + default: + rc = -EINVAL; + break; + } + + return rc; } static int @@ -975,7 +1094,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, return rc; } -static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -1013,6 +1132,8 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); + + return 0; } static void qede_prandom_bytes(uint32_t *buff) @@ -1078,6 +1199,7 @@ static void qede_fastpath_start(struct ecore_dev *edev) static int qede_dev_start(struct rte_eth_dev *eth_dev) { + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -1088,10 +1210,15 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) if (qede_update_mtu(eth_dev, qdev->new_mtu)) goto err; qdev->mtu = qdev->new_mtu; - /* If MTU has changed then update TPA too */ - if (qdev->enable_lro) - if (qede_enable_tpa(eth_dev, true)) - goto err; + } + + /* Configure TPA parameters */ + if (rxmode->enable_lro) { + if (qede_enable_tpa(eth_dev, true)) + return -EINVAL; + /* Enable scatter mode for LRO */ + if (!rxmode->enable_scatter) + eth_dev->data->scattered_rx = 1; } /* Start queues */ @@ -1103,7 +1230,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ - if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + if (rxmode->mq_mode == ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) goto err; @@ -1114,6 +1241,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) /* Bring-up the link */ qede_dev_set_link_state(eth_dev, true); + /* Update link status */ + qede_link_update(eth_dev, 0); + /* Start/resume traffic */ qede_fastpath_start(edev); @@ -1139,7 +1269,6 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) if (qdev->enable_lro) qede_enable_tpa(eth_dev, false); - /* TODO: Do we need disable LRO or RSS */ /* Stop queues */ qede_stop_queues(eth_dev); @@ -1157,11 +1286,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + int ret; PMD_INIT_FUNC_TRACE(edev); /* Check requirements for 100G mode */ - if (edev->num_hwfns > 1) { + if (ECORE_IS_CMT(edev)) { if (eth_dev->data->nb_rx_queues < 2 || eth_dev->data->nb_tx_queues < 2) { DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); @@ -1176,6 +1306,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) } } + /* We need to have min 1 RX queue.There is no min check in + * rte_eth_dev_configure(), so we are checking it here. + */ + if (eth_dev->data->nb_rx_queues == 0) { + DP_ERR(edev, "Minimum one RX queue is required\n"); + return -EINVAL; + } + /* Sanity checks and throw warnings */ if (rxmode->enable_scatter) eth_dev->data->scattered_rx = 1; @@ -1226,20 +1364,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) qdev->mtu = rxmode->max_rx_pkt_len; qdev->new_mtu = qdev->mtu; - /* Configure TPA parameters */ - if (rxmode->enable_lro) { - if (qede_enable_tpa(eth_dev, true)) - return -EINVAL; - /* Enable scatter mode for LRO */ - if (!rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; - } - qdev->enable_lro = rxmode->enable_lro; - /* Enable VLAN offloads by default */ - qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | + ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK); + if (ret) + return ret; DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); @@ -1310,7 +1440,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO); + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1330,7 +1461,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, } /* return 0 means link status changed, -1 means not changed */ -static int +int qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -1456,11 +1587,11 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) rte_intr_disable(&pci_dev->intr_handle); rte_intr_callback_unregister(&pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); - if (edev->num_hwfns > 1) + if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); } -static void +static int qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -1544,6 +1675,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) if (j == txq_stat_cntrs) break; } + + return 0; } static unsigned @@ -1806,8 +1939,23 @@ static const uint32_t * qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) { static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_TUNNEL_GENEVE, + /* Inner */ + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_FRAG, RTE_PTYPE_UNKNOWN }; @@ -2035,7 +2183,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev, params->update_rss_config = 1; /* Fix up RETA for CMT mode device */ - if (edev->num_hwfns > 1) + if (ECORE_IS_CMT(edev)) qdev->rss_enable = qede_update_rss_parm_cmt(edev, params); vport_update_params.vport_id = 0; @@ -2148,50 +2296,182 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } static int -qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, - struct rte_eth_udp_tunnel *tunnel_udp, - bool add) +qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_tunnel_info tunn; /* @DPDK */ - struct ecore_hwfn *p_hwfn; - int rc, i; + uint16_t udp_port; + int rc; PMD_INIT_FUNC_TRACE(edev); memset(&tunn, 0, sizeof(tunn)); - if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + udp_port = 0; + tunn.vxlan_port.b_update_port = true; - tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : - QEDE_VXLAN_DEF_PORT; - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unable to config UDP port %u\n", - tunn.vxlan_port.port); - return rc; - } + tunn.vxlan_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * VXLAN filters have reached 0 then VxLAN offload can be be + * disabled. + */ + if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0) + return qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + + udp_port = 0; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; + } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * GENEVE filters have reached 0 then GENEVE offload can be be + * disabled. + */ + if (qdev->geneve.enable && qdev->geneve.num_filters == 0) + return qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + + default: + return ECORE_INVAL; } return 0; -} -static int -qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, - struct rte_eth_udp_tunnel *tunnel_udp) -{ - return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); } - static int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tunnel_udp) { - return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_tunnel_info tunn; /* @DPDK */ + uint16_t udp_port; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + memset(&tunn, 0, sizeof(tunn)); + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for VXLAN was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable VxLAN tunnel with default MAC/VLAN classification if + * it was not enabled while adding VXLAN filter before UDP port + * update. + */ + if (!qdev->vxlan.enable) { + rc = qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable VXLAN " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port); + + qdev->vxlan.udp_port = udp_port; + break; + + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for GENEVE was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable GENEVE tunnel with default MAC/VLAN classification if + * it was not enabled while adding GENEVE filter before UDP port + * update. + */ + if (!qdev->geneve.enable) { + rc = qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable GENEVE " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port); + + qdev->geneve.udp_port = udp_port; + break; + + default: + return ECORE_INVAL; + } + + return 0; } static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, @@ -2258,116 +2538,117 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, return ECORE_SUCCESS; } -static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, - enum rte_filter_op filter_op, - const struct rte_eth_tunnel_filter_conf *conf) +static int +_qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + const struct rte_eth_tunnel_filter_conf *conf, + __attribute__((unused)) enum rte_filter_op filter_op, + enum ecore_tunn_clss *clss, + bool add) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_tunnel_info tunn; - struct ecore_hwfn *p_hwfn; + struct ecore_filter_ucast ucast = {0}; enum ecore_filter_ucast_type type; - enum ecore_tunn_clss clss; - struct ecore_filter_ucast ucast; + uint16_t filter_type = 0; char str[80]; - uint16_t filter_type; - int rc, i; - - PMD_INIT_FUNC_TRACE(edev); + int rc; - filter_type = conf->filter_type | qdev->vxlan_filter_type; - /* First determine if the given filter classification is supported */ - qede_get_ecore_tunn_params(filter_type, &type, &clss, str); - if (clss == MAX_ECORE_TUNN_CLSS) { - DP_ERR(edev, "Wrong filter type\n"); + filter_type = conf->filter_type; + /* Determine if the given filter classification is supported */ + qede_get_ecore_tunn_params(filter_type, &type, clss, str); + if (*clss == MAX_ECORE_TUNN_CLSS) { + DP_ERR(edev, "Unsupported filter type\n"); return -EINVAL; } /* Init tunnel ucast params */ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", - conf->filter_type); + DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n", + conf->filter_type); return rc; } DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", str, filter_op, ucast.type); - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ucast.opcode = ECORE_FILTER_ADD; - /* Skip MAC/VLAN if filter is based on VNI */ - if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { - rc = qede_mac_int_ops(eth_dev, &ucast, 1); - if (rc == 0) { - /* Enable accept anyvlan */ - qede_config_accept_any_vlan(qdev, true); - } - } else { - rc = qede_ucast_filter(eth_dev, &ucast, 1); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, &ucast, - ECORE_SPQ_MODE_CB, NULL); + ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE; + + /* Skip MAC/VLAN if filter is based on VNI */ + if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { + rc = qede_mac_int_ops(eth_dev, &ucast, add); + if ((rc == 0) && add) { + /* Enable accept anyvlan */ + qede_config_accept_any_vlan(qdev, true); } + } else { + rc = qede_ucast_filter(eth_dev, &ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, &ucast, + ECORE_SPQ_MODE_CB, NULL); + } - if (rc != ECORE_SUCCESS) - return rc; + return rc; +} - qdev->vxlan_filter_type = filter_type; +static int +qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, + const struct rte_eth_tunnel_filter_conf *conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; + bool add; + int rc; - DP_INFO(edev, "Enabling VXLAN tunneling\n"); - qede_set_cmn_tunn_param(&tunn, clss, true, true); - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, - &tunn, ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update tunn_clss %u\n", - tunn.vxlan.tun_cls); - } - } - qdev->num_tunn_filters++; /* Filter added successfully */ - break; + PMD_INIT_FUNC_TRACE(edev); + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + add = true; + break; case RTE_ETH_FILTER_DELETE: - ucast.opcode = ECORE_FILTER_REMOVE; + add = false; + break; + default: + DP_ERR(edev, "Unsupported operation %d\n", filter_op); + return -EINVAL; + } - if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { - rc = qede_mac_int_ops(eth_dev, &ucast, 0); - } else { - rc = qede_ucast_filter(eth_dev, &ucast, 0); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, &ucast, - ECORE_SPQ_MODE_CB, NULL); + if (IS_VF(edev)) + return qede_tunn_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, + conf->tunnel_type, add); + + rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add); + if (rc != ECORE_SUCCESS) + return rc; + + if (add) { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { + qdev->vxlan.num_filters++; + qdev->vxlan.filter_type = conf->filter_type; + } else { /* GENEVE */ + qdev->geneve.num_filters++; + qdev->geneve.filter_type = conf->filter_type; } - if (rc != ECORE_SUCCESS) - return rc; - qdev->vxlan_filter_type = filter_type; - qdev->num_tunn_filters--; + if (!qdev->vxlan.enable || !qdev->geneve.enable) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + true); + } else { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) + qdev->vxlan.num_filters--; + else /*GENEVE*/ + qdev->geneve.num_filters--; /* Disable VXLAN if VXLAN filters become 0 */ - if (qdev->num_tunn_filters == 0) { - DP_INFO(edev, "Disabling VXLAN tunneling\n"); - - /* Use 0 as tunnel mode */ - qede_set_cmn_tunn_param(&tunn, clss, false, true); - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, - "Failed to update tunn_clss %u\n", - tunn.vxlan.tun_cls); - break; - } - } - } - break; - default: - DP_ERR(edev, "Unsupported operation %d\n", filter_op); - return -EINVAL; + if ((qdev->vxlan.num_filters == 0) || + (qdev->geneve.num_filters == 0)) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + false); } - DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); return 0; } @@ -2386,13 +2667,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, case RTE_ETH_FILTER_TUNNEL: switch (filter_conf->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: + case RTE_TUNNEL_TYPE_GENEVE: DP_INFO(edev, "Packet steering to the specified Rx queue" - " is not supported with VXLAN tunneling"); - return(qede_vxlan_tunn_config(eth_dev, filter_op, + " is not supported with UDP tunneling"); + return(qede_tunn_filter_config(eth_dev, filter_op, filter_conf)); /* Place holders for future tunneling support */ - case RTE_TUNNEL_TYPE_GENEVE: case RTE_TUNNEL_TYPE_TEREDO: case RTE_TUNNEL_TYPE_NVGRE: case RTE_TUNNEL_TYPE_IP_IN_GRE: @@ -2495,6 +2776,8 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .reta_update = qede_rss_reta_update, .reta_query = qede_rss_reta_query, .mtu_set = qede_set_mtu, + .udp_tunnel_port_add = qede_udp_dst_port_add, + .udp_tunnel_port_del = qede_udp_dst_port_del, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -2527,6 +2810,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Extract key data structures */ adapter = eth_dev->data->dev_private; + adapter->ethdev = eth_dev; edev = &adapter->edev; pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); pci_addr = pci_dev->addr; @@ -2587,7 +2871,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * This is required since uio device uses only one MSI-x * interrupt vector but we need one for each engine. */ - if (edev->num_hwfns > 1 && IS_PF(edev)) { + if (ECORE_IS_CMT(edev) && IS_PF(edev)) { rc = rte_eal_alarm_set(timer_period * US_PER_S, qede_poll_sp_sb_cb, (void *)eth_dev); @@ -2844,3 +3128,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); + +RTE_INIT(qede_init_log); +static void +qede_init_log(void) +{ + qede_logtype_init = rte_log_register("pmd.qede.init"); + if (qede_logtype_init >= 0) + rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); + qede_logtype_driver = rte_log_register("pmd.qede.driver"); + if (qede_logtype_driver >= 0) + rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); +}