X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=0c9f6590e12df951b690bd084e355fb2e830557b;hb=ca041cd44fcc8b22c0e84460254596096e8fe914;hp=e20df3654697fc6e770fdd10e9440b665cc71869;hpb=b74fd6b842b7e41e4ee6a037dd37735aeedd8095;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index e20df36546..0c9f6590e1 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -5,6 +5,7 @@ */ #include "qede_ethdev.h" +#include #include #include #include @@ -124,6 +125,8 @@ static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, {"rx_mac_filter_discards", offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, + {"rx_gft_filter_drop", + offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, {"rx_hw_buffer_truncates", offsetof(struct ecore_eth_stats_common, brb_truncates)}, {"rx_hw_buffer_discards", @@ -247,8 +250,8 @@ qede_interrupt_handler_intx(void *param) if (status & 0x1) { qede_interrupt_action(ECORE_LEADING_HWFN(edev)); - if (rte_intr_enable(eth_dev->intr_handle)) - DP_ERR(edev, "rte_intr_enable failed\n"); + if (rte_intr_ack(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_ack failed\n"); } } @@ -260,8 +263,8 @@ qede_interrupt_handler(void *param) struct ecore_dev *edev = &qdev->edev; qede_interrupt_action(ECORE_LEADING_HWFN(edev)); - if (rte_intr_enable(eth_dev->intr_handle)) - DP_ERR(edev, "rte_intr_enable failed\n"); + if (rte_intr_ack(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_ack failed\n"); } static void @@ -297,12 +300,13 @@ static void qede_print_adapter_info(struct qede_dev *qdev) (info->mfw_rev >> 16) & 0xff, (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); DP_INFO(edev, " Management Firmware version : %s\n", ver_str); - DP_INFO(edev, " Firmware file : %s\n", fw_file); + DP_INFO(edev, " Firmware file : %s\n", qede_fw_file); DP_INFO(edev, "*********************************\n"); } static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) { + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); unsigned int i = 0, j = 0, qid; unsigned int rxq_stat_cntrs, txq_stat_cntrs; @@ -310,12 +314,12 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - for_each_rss(qid) { + for (qid = 0; qid < qdev->num_rx_queues; qid++) { OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + offsetof(struct qede_rx_queue, rcv_pkts), 0, sizeof(uint64_t)); @@ -341,7 +345,7 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) i = 0; - for_each_tss(qid) { + for (qid = 0; qid < qdev->num_tx_queues; qid++) { txq = qdev->fp_array[qid].txq; OSAL_MEMSET((uint64_t *)(uintptr_t) @@ -558,13 +562,13 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qede_ucast_entry *tmp = NULL; struct qede_ucast_entry *u; - struct ether_addr *mac_addr; + struct rte_ether_addr *mac_addr; - mac_addr = (struct ether_addr *)ucast->mac; + mac_addr = (struct rte_ether_addr *)ucast->mac; if (add) { SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { if ((memcmp(mac_addr, &tmp->mac, - ETHER_ADDR_LEN) == 0) && + RTE_ETHER_ADDR_LEN) == 0) && ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { DP_INFO(edev, "Unicast MAC is already added" @@ -579,7 +583,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, DP_ERR(edev, "Did not allocate memory for ucast\n"); return -ENOMEM; } - ether_addr_copy(mac_addr, &u->mac); + rte_ether_addr_copy(mac_addr, &u->mac); u->vlan = ucast->vlan; u->vni = ucast->vni; SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); @@ -587,7 +591,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } else { SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { if ((memcmp(mac_addr, &tmp->mac, - ETHER_ADDR_LEN) == 0) && + RTE_ETHER_ADDR_LEN) == 0) && ucast->vlan == tmp->vlan && ucast->vni == tmp->vni) break; @@ -604,8 +608,9 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } static int -qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, - uint32_t mc_addrs_num) +qede_add_mcast_filters(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -621,14 +626,14 @@ qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, DP_ERR(edev, "Did not allocate memory for mcast\n"); return -ENOMEM; } - ether_addr_copy(&mc_addrs[i], &m->mac); + rte_ether_addr_copy(&mc_addrs[i], &m->mac); SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); } memset(&mcast, 0, sizeof(mcast)); mcast.num_mc_addrs = mc_addrs_num; mcast.opcode = ECORE_FILTER_ADD; for (i = 0; i < mc_addrs_num; i++) - ether_addr_copy(&mc_addrs[i], (struct ether_addr *) + rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) &mcast.mac[i]); rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); if (rc != ECORE_SUCCESS) { @@ -653,7 +658,8 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) mcast.opcode = ECORE_FILTER_REMOVE; j = 0; SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]); + rte_ether_addr_copy(&tmp->mac, + (struct rte_ether_addr *)&mcast.mac[j]); j++; } rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); @@ -700,19 +706,19 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } static int -qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, +qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { struct ecore_filter_ucast ucast; int re; - if (!is_valid_assigned_ether_addr(mac_addr)) + if (!rte_is_valid_assigned_ether_addr(mac_addr)) return -EINVAL; qede_set_ucast_cmn_params(&ucast); ucast.opcode = ECORE_FILTER_ADD; ucast.type = ECORE_FILTER_MAC; - ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); return re; } @@ -732,7 +738,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) return; } - if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) return; qede_set_ucast_cmn_params(&ucast); @@ -740,14 +746,14 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) ucast.type = ECORE_FILTER_MAC; /* Use the index maintained by rte */ - ether_addr_copy(ð_dev->data->mac_addrs[index], - (struct ether_addr *)&ucast.mac); + rte_ether_addr_copy(ð_dev->data->mac_addrs[index], + (struct rte_ether_addr *)&ucast.mac); qede_mac_int_ops(eth_dev, &ucast, false); } static int -qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) +qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -988,7 +994,7 @@ int qede_config_rss(struct rte_eth_dev *eth_dev) for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { id = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; - q = i % QEDE_RSS_COUNT(qdev); + q = i % QEDE_RSS_COUNT(eth_dev); reta_conf[id].reta[pos] = q; } if (qede_rss_reta_update(eth_dev, &reta_conf[0], @@ -1162,22 +1168,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); - /* Check requirements for 100G mode */ - if (ECORE_IS_CMT(edev)) { - if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { - DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); - return -EINVAL; - } - - if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { - DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); - return -EINVAL; - } - } - /* We need to have min 1 RX queue.There is no min check in * rte_eth_dev_configure(), so we are checking it here. */ @@ -1204,8 +1194,9 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOTSUP; qede_dealloc_fp_resc(eth_dev); - qdev->num_tx_queues = eth_dev->data->nb_tx_queues; - qdev->num_rx_queues = eth_dev->data->nb_rx_queues; + qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; + qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; + if (qede_alloc_fp_resc(qdev)) return -ENOMEM; @@ -1213,7 +1204,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) eth_dev->data->mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; + RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) eth_dev->data->scattered_rx = 1; @@ -1230,7 +1221,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return ret; DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", - QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); + QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); + + if (ECORE_IS_CMT(edev)) + DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", + qdev->num_rx_queues, qdev->num_tx_queues); + return 0; } @@ -1250,7 +1246,7 @@ static const struct rte_eth_desc_lim qede_tx_desc_lim = { .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET }; -static void +static int qede_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { @@ -1272,6 +1268,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, else dev_info->max_rx_queues = (uint16_t)RTE_MIN( QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); + /* Since CMT mode internally doubles the number of queues */ + if (ECORE_IS_CMT(edev)) + dev_info->max_rx_queues = dev_info->max_rx_queues / 2; + dev_info->max_tx_queues = dev_info->max_rx_queues; dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; @@ -1330,6 +1330,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) speed_cap |= ETH_LINK_SPEED_100G; dev_info->speed_capa = speed_cap; + + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1378,33 +1380,39 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) return rte_eth_linkstatus_set(eth_dev, &link); } -static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) +static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; + enum _ecore_status_t ecore_status; PMD_INIT_FUNC_TRACE(edev); if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - qed_configure_filter_rx_mode(eth_dev, type); + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; } -static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) +static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + enum _ecore_status_t ecore_status; PMD_INIT_FUNC_TRACE(edev); if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) - qed_configure_filter_rx_mode(eth_dev, + ecore_status = qed_configure_filter_rx_mode(eth_dev, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); else - qed_configure_filter_rx_mode(eth_dev, + ecore_status = qed_configure_filter_rx_mode(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; } static void qede_poll_sp_sb_cb(void *param) @@ -1423,7 +1431,6 @@ static void qede_poll_sp_sb_cb(void *param) if (rc != 0) { DP_ERR(edev, "Unable to start periodic" " timer rc %d\n", rc); - assert(false && "Unable to start periodic timer"); } } @@ -1480,7 +1487,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct ecore_eth_stats stats; - unsigned int i = 0, j = 0, qid; + unsigned int i = 0, j = 0, qid, idx, hw_fn; unsigned int rxq_stat_cntrs, txq_stat_cntrs; struct qede_tx_queue *txq; @@ -1516,44 +1523,59 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) eth_stats->oerrors = stats.common.tx_err_drop_pkts; /* Queue stats */ - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || - (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) + if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || + txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Not all the queue stats will be displayed. Set" " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" " appropriately and retry.\n"); - for_each_rss(qid) { - eth_stats->q_ipackets[i] = - *(uint64_t *)( - ((char *)(qdev->fp_array[qid].rxq)) + - offsetof(struct qede_rx_queue, - rcv_pkts)); - eth_stats->q_errors[i] = - *(uint64_t *)( - ((char *)(qdev->fp_array[qid].rxq)) + - offsetof(struct qede_rx_queue, - rx_hw_errors)) + - *(uint64_t *)( - ((char *)(qdev->fp_array[qid].rxq)) + - offsetof(struct qede_rx_queue, - rx_alloc_errors)); + for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { + eth_stats->q_ipackets[i] = 0; + eth_stats->q_errors[i] = 0; + + for_each_hwfn(edev, hw_fn) { + idx = qid * edev->num_hwfns + hw_fn; + + eth_stats->q_ipackets[i] += + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rcv_pkts)); + eth_stats->q_errors[i] += + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rx_hw_errors)) + + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rx_alloc_errors)); + } + i++; if (i == rxq_stat_cntrs) break; } - for_each_tss(qid) { - txq = qdev->fp_array[qid].txq; - eth_stats->q_opackets[j] = - *((uint64_t *)(uintptr_t) - (((uint64_t)(uintptr_t)(txq)) + - offsetof(struct qede_tx_queue, - xmit_pkts))); + for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { + eth_stats->q_opackets[j] = 0; + + for_each_hwfn(edev, hw_fn) { + idx = qid * edev->num_hwfns + hw_fn; + + txq = qdev->fp_array[idx].txq; + eth_stats->q_opackets[j] += + *((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, + xmit_pkts))); + } + j++; if (j == txq_stat_cntrs) break; @@ -1564,18 +1586,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) static unsigned qede_get_xstats_count(struct qede_dev *qdev) { + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + if (ECORE_IS_BB(&qdev->edev)) return RTE_DIM(qede_xstats_strings) + RTE_DIM(qede_bb_xstats_strings) + (RTE_DIM(qede_rxq_xstats_strings) * - RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS)); + QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); else return RTE_DIM(qede_xstats_strings) + RTE_DIM(qede_ah_xstats_strings) + (RTE_DIM(qede_rxq_xstats_strings) * - RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS)); + QEDE_RSS_COUNT(dev)); } static int @@ -1586,45 +1608,43 @@ qede_get_xstats_names(struct rte_eth_dev *dev, struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; const unsigned int stat_cnt = qede_get_xstats_count(qdev); - unsigned int i, qid, stat_idx = 0; - unsigned int rxq_stat_cntrs; - - if (xstats_names != NULL) { - for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { - snprintf(xstats_names[stat_idx].name, - sizeof(xstats_names[stat_idx].name), - "%s", - qede_xstats_strings[i].name); + unsigned int i, qid, hw_fn, stat_idx = 0; + + if (xstats_names == NULL) + return stat_cnt; + + for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); + stat_idx++; + } + + if (ECORE_IS_BB(edev)) { + for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_bb_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); stat_idx++; } - - if (ECORE_IS_BB(edev)) { - for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { - snprintf(xstats_names[stat_idx].name, - sizeof(xstats_names[stat_idx].name), - "%s", - qede_bb_xstats_strings[i].name); - stat_idx++; - } - } else { - for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { - snprintf(xstats_names[stat_idx].name, - sizeof(xstats_names[stat_idx].name), - "%s", - qede_ah_xstats_strings[i].name); - stat_idx++; - } + } else { + for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_ah_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); + stat_idx++; } + } - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS); - for (qid = 0; qid < rxq_stat_cntrs; qid++) { + for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { + for_each_hwfn(edev, hw_fn) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { snprintf(xstats_names[stat_idx].name, - sizeof(xstats_names[stat_idx].name), - "%.4s%d%s", - qede_rxq_xstats_strings[i].name, qid, - qede_rxq_xstats_strings[i].name + 4); + RTE_ETH_XSTATS_NAME_SIZE, + "%.4s%d.%d%s", + qede_rxq_xstats_strings[i].name, + hw_fn, qid, + qede_rxq_xstats_strings[i].name + 4); stat_idx++; } } @@ -1641,8 +1661,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct ecore_dev *edev = &qdev->edev; struct ecore_eth_stats stats; const unsigned int num = qede_get_xstats_count(qdev); - unsigned int i, qid, stat_idx = 0; - unsigned int rxq_stat_cntrs; + unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; if (n < num) return num; @@ -1674,24 +1693,24 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, } } - rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS); - for (qid = 0; qid < rxq_stat_cntrs; qid++) { - for_each_rss(qid) { + for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { + for_each_hwfn(edev, hw_fn) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { - xstats[stat_idx].value = *(uint64_t *)( - ((char *)(qdev->fp_array[qid].rxq)) + + fpidx = qid * edev->num_hwfns + hw_fn; + xstats[stat_idx].value = *(uint64_t *) + (((char *)(qdev->fp_array[fpidx].rxq)) + qede_rxq_xstats_strings[i].offset); xstats[stat_idx].id = stat_idx; stat_idx++; } + } } return stat_idx; } -static void +static int qede_reset_xstats(struct rte_eth_dev *dev) { struct qede_dev *qdev = dev->data->dev_private; @@ -1699,6 +1718,8 @@ qede_reset_xstats(struct rte_eth_dev *dev) ecore_reset_vport_stats(edev); qede_reset_queue_stats(qdev, true); + + return 0; } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -1728,39 +1749,49 @@ static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) return qede_dev_set_link_state(eth_dev, false); } -static void qede_reset_stats(struct rte_eth_dev *eth_dev) +static int qede_reset_stats(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); qede_reset_queue_stats(qdev, false); + + return 0; } -static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) +static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) { enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + enum _ecore_status_t ecore_status; if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_PROMISC; - qed_configure_filter_rx_mode(eth_dev, type); + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; } -static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) +static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) { + enum _ecore_status_t ecore_status; + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) - qed_configure_filter_rx_mode(eth_dev, + ecore_status = qed_configure_filter_rx_mode(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC); else - qed_configure_filter_rx_mode(eth_dev, + ecore_status = qed_configure_filter_rx_mode(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; } static int -qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, - uint32_t mc_addrs_num) +qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -1773,7 +1804,7 @@ qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, } for (i = 0; i < mc_addrs_num; i++) { - if (!is_multicast_ether_addr(&mc_addrs[i])) { + if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { DP_ERR(edev, "Not a valid multicast MAC\n"); return -EINVAL; } @@ -1938,7 +1969,8 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_UNKNOWN }; - if (eth_dev->rx_pkt_burst == qede_recv_pkts) + if (eth_dev->rx_pkt_burst == qede_recv_pkts || + eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) return ptypes; return NULL; @@ -1968,8 +2000,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, uint32_t *key = (uint32_t *)rss_conf->rss_key; uint64_t hf = rss_conf->rss_hf; uint8_t len = rss_conf->rss_key_len; - uint8_t idx; - uint8_t i; + uint8_t idx, i, j, fpidx; int rc; memset(&vport_update_params, 0, sizeof(vport_update_params)); @@ -2003,14 +2034,18 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, /* tbl_size has to be set with capabilities */ rss_params.rss_table_size_log = 7; vport_update_params.vport_id = 0; - /* pass the L2 handles instead of qids */ - for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { - idx = i % QEDE_RSS_COUNT(qdev); - rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; - } - vport_update_params.rss_params = &rss_params; for_each_hwfn(edev, i) { + /* pass the L2 handles instead of qids */ + for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { + idx = j % QEDE_RSS_COUNT(eth_dev); + fpidx = idx * edev->num_hwfns + i; + rss_params.rss_ind_table[j] = + qdev->fp_array[fpidx].rxq->handle; + } + + vport_update_params.rss_params = &rss_params; + p_hwfn = &edev->hwfns[i]; vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, @@ -2062,61 +2097,6 @@ static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, return 0; } -static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, - struct ecore_rss_params *rss) -{ - int i, fn; - bool rss_mode = 1; /* enable */ - struct ecore_queue_cid *cid; - struct ecore_rss_params *t_rss; - - /* In regular scenario, we'd simply need to take input handlers. - * But in CMT, we'd have to split the handlers according to the - * engine they were configured on. We'd then have to understand - * whether RSS is really required, since 2-queues on CMT doesn't - * require RSS. - */ - - /* CMT should be round-robin */ - for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { - cid = rss->rss_ind_table[i]; - - if (cid->p_owner == ECORE_LEADING_HWFN(edev)) - t_rss = &rss[0]; - else - t_rss = &rss[1]; - - t_rss->rss_ind_table[i / edev->num_hwfns] = cid; - } - - t_rss = &rss[1]; - t_rss->update_rss_ind_table = 1; - t_rss->rss_table_size_log = 7; - t_rss->update_rss_config = 1; - - /* Make sure RSS is actually required */ - for_each_hwfn(edev, fn) { - for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; - i++) { - if (rss[fn].rss_ind_table[i] != - rss[fn].rss_ind_table[0]) - break; - } - - if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { - DP_INFO(edev, - "CMT - 1 queue per-hwfn; Disabling RSS\n"); - rss_mode = 0; - goto out; - } - } - -out: - t_rss->rss_enable = rss_mode; - - return rss_mode; -} - int qede_rss_reta_update(struct rte_eth_dev *eth_dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) @@ -2125,8 +2105,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_sp_vport_update_params vport_update_params; struct ecore_rss_params *params; + uint16_t i, j, idx, fid, shift; struct ecore_hwfn *p_hwfn; - uint16_t i, idx, shift; uint8_t entry; int rc = 0; @@ -2137,40 +2117,36 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev, } memset(&vport_update_params, 0, sizeof(vport_update_params)); - params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, - RTE_CACHE_LINE_SIZE); + params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); if (params == NULL) { DP_ERR(edev, "failed to allocate memory\n"); return -ENOMEM; } - for (i = 0; i < reta_size; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; - if (reta_conf[idx].mask & (1ULL << shift)) { - entry = reta_conf[idx].reta[shift]; - /* Pass rxq handles to ecore */ - params->rss_ind_table[i] = - qdev->fp_array[entry].rxq->handle; - /* Update the local copy for RETA query command */ - qdev->rss_ind_table[i] = entry; - } - } - params->update_rss_ind_table = 1; params->rss_table_size_log = 7; params->update_rss_config = 1; - /* Fix up RETA for CMT mode device */ - if (ECORE_IS_CMT(edev)) - qdev->rss_enable = qede_update_rss_parm_cmt(edev, - params); vport_update_params.vport_id = 0; /* Use the current value of rss_enable */ params->rss_enable = qdev->rss_enable; vport_update_params.rss_params = params; for_each_hwfn(edev, i) { + for (j = 0; j < reta_size; j++) { + idx = j / RTE_RETA_GROUP_SIZE; + shift = j % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + entry = reta_conf[idx].reta[shift]; + fid = entry * edev->num_hwfns + i; + /* Pass rxq handles to ecore */ + params->rss_ind_table[j] = + qdev->fp_array[fid].rxq->handle; + /* Update the local copy for RETA query cmd */ + qdev->rss_ind_table[j] = entry; + } + } + p_hwfn = &edev->hwfns[i]; vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, @@ -2228,12 +2204,16 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) int i, rc; PMD_INIT_FUNC_TRACE(edev); - qede_dev_info_get(dev, &dev_info); + rc = qede_dev_info_get(dev, &dev_info); + if (rc != 0) { + DP_ERR(edev, "Error during getting ethernet device info\n"); + return rc; + } max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; frame_size = max_rx_pkt_len; - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", - mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - + mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD); return -EINVAL; } @@ -2257,7 +2237,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) qdev->mtu = mtu; /* Fix up RX buf size for all queues of the port */ - for_each_rss(i) { + for (i = 0; i < qdev->num_rx_queues; i++) { fp = &qdev->fp_array[i]; if (fp->rxq != NULL) { bufsz = (uint16_t)rte_pktmbuf_data_room_size( @@ -2273,7 +2253,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) fp->rxq->rx_buf_size = rc; } } - if (max_rx_pkt_len > ETHER_MAX_LEN) + if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -2286,9 +2266,13 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; /* Reassign back */ - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - + if (ECORE_IS_CMT(edev)) { + dev->rx_pkt_burst = qede_recv_pkts_cmt; + dev->tx_pkt_burst = qede_xmit_pkts_cmt; + } else { + dev->rx_pkt_burst = qede_recv_pkts; + dev->tx_pkt_burst = qede_xmit_pkts; + } return 0; } @@ -2407,7 +2391,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) struct qed_slowpath_params params; static bool do_once = true; uint8_t bulletin_change; - uint8_t vf_mac[ETHER_ADDR_LEN]; + uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; uint8_t is_mac_forced; bool is_mac_exist; /* Fix up ecore debug level */ @@ -2429,10 +2413,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) pci_addr.bus, pci_addr.devid, pci_addr.function, eth_dev->data->port_id); - eth_dev->rx_pkt_burst = qede_recv_pkts; - eth_dev->tx_pkt_burst = qede_xmit_pkts; - eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { DP_ERR(edev, "Skipping device init from secondary process\n"); return 0; @@ -2490,6 +2470,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); + if (ECORE_IS_CMT(edev)) { + eth_dev->rx_pkt_burst = qede_recv_pkts_cmt; + eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt; + } else { + eth_dev->rx_pkt_burst = qede_recv_pkts; + eth_dev->tx_pkt_burst = qede_xmit_pkts; + } + + eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; + /* For CMT mode device do periodic polling for slowpath events. * This is required since uio device uses only one MSI-x * interrupt vector but we need one for each engine. @@ -2537,7 +2527,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Allocate memory for storing MAC addr */ eth_dev->data->mac_addrs = rte_zmalloc(edev->name, - (ETHER_ADDR_LEN * + (RTE_ETHER_ADDR_LEN * adapter->dev_info.num_mac_filters), RTE_CACHE_LINE_SIZE); @@ -2551,10 +2541,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) } if (!is_vf) { - ether_addr_copy((struct ether_addr *)edev->hwfns[0]. + rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. hw_info.hw_mac_addr, ð_dev->data->mac_addrs[0]); - ether_addr_copy(ð_dev->data->mac_addrs[0], + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], &adapter->primary_mac); } else { ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), @@ -2567,10 +2557,12 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) &is_mac_forced); if (is_mac_exist) { DP_INFO(edev, "VF macaddr received from PF\n"); - ether_addr_copy((struct ether_addr *)&vf_mac, - ð_dev->data->mac_addrs[0]); - ether_addr_copy(ð_dev->data->mac_addrs[0], - &adapter->primary_mac); + rte_ether_addr_copy( + (struct rte_ether_addr *)&vf_mac, + ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy( + ð_dev->data->mac_addrs[0], + &adapter->primary_mac); } else { DP_ERR(edev, "No VF macaddr assigned\n"); } @@ -2593,7 +2585,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) SLIST_INIT(&adapter->vlan_list_head); SLIST_INIT(&adapter->uc_list_head); SLIST_INIT(&adapter->mc_list_head); - adapter->mtu = ETHER_MTU; + adapter->mtu = RTE_ETHER_MTU; adapter->vport_started = false; /* VF tunnel offloads is enabled by default in PF driver */