X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=c8bdc311dd2bef96cc48c506602f93a9a96fb776;hb=b7435d660a8c;hp=9d5974568578665edfbd2def1ea20c8dfb934614;hpb=49947a13ba9e2dfa37bc6c20c1a3313470493134;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 9d59745685..c8bdc311dd 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -53,6 +53,7 @@ #include "bnxt_txr.h" #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" +#include "bnxt_nvm_defs.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = @@ -60,6 +61,7 @@ static const char bnxt_version[] = #define PCI_VENDOR_ID_BROADCOM 0x14E4 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 #define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 @@ -96,6 +98,8 @@ static const char bnxt_version[] = #define BROADCOM_DEV_ID_57416_MF 0x16ee static const struct rte_pci_id bnxt_pci_id_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, @@ -198,8 +202,16 @@ static int bnxt_init_chip(struct bnxt *bp) { unsigned int i, rss_idx, fw_idx; struct rte_eth_link new; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + uint32_t queue_id, base = BNXT_MISC_VEC_ID; + uint32_t vec = BNXT_MISC_VEC_ID; int rc; + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + if (bp->eth_dev->data->mtu > ETHER_MTU) { bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; bp->flags |= BNXT_FLAG_JUMBO; @@ -295,13 +307,55 @@ static int bnxt_init_chip(struct bnxt *bp) else bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); } - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); if (rc) { RTE_LOG(ERR, PMD, "HWRM cfa l2 rx mask failure rc: %x\n", rc); goto err_out; } + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && + bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = bp->eth_dev->data->nb_rx_queues; + RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, + intr_vector); + if (intr_vector > bp->rx_cp_nr_rings) { + RTE_LOG(ERR, PMD, "At most %d intr queues supported", + bp->rx_cp_nr_rings); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + bp->eth_dev->data->nb_rx_queues * + sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" + " intr_vec", bp->eth_dev->data->nb_rx_queues); + return -ENOMEM; + } + RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " + "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", + __func__, intr_handle->intr_vec, intr_handle->nb_efd, + intr_handle->max_intr); + } + + for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); @@ -357,6 +411,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; + unsigned int max_rx_rings; dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); @@ -367,9 +422,14 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - dev_info->max_rx_queues = bp->max_rx_rings; - dev_info->max_tx_queues = bp->max_tx_rings; + max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, + RTE_MIN(bp->max_rsscos_ctx, + bp->max_stat_ctx))); + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = max_rx_rings; + dev_info->max_tx_queues = max_rx_rings; dev_info->reta_size = bp->max_rsscos_ctx; + dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; /* Fast path specifics */ @@ -411,6 +471,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; + eth_dev->data->dev_conf.intr_conf.rxq = 1; + /* *INDENT-ON* */ /* @@ -607,7 +669,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, if (filter->mac_index == index) { STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); filter->mac_index = INVALID_MAC_INDEX; memset(&filter->l2_addr, 0, ETHER_ADDR_LEN); @@ -654,7 +716,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, STAILQ_INSERT_TAIL(&vnic->filter, filter, next); filter->mac_index = index; memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); - return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); } int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) @@ -703,7 +765,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags |= BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) @@ -717,7 +779,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) @@ -731,7 +793,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) @@ -745,7 +807,7 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) vnic = &bp->vnic_info[0]; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, @@ -824,11 +886,15 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, */ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { if (!rss_conf->rss_hf) - return -EINVAL; + RTE_LOG(ERR, PMD, "Hash type NONE\n"); } else { if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) return -EINVAL; } + + bp->flags |= BNXT_FLAG_UPDATE_HASH; + memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); + if (rss_conf->rss_hf & ETH_RSS_IPV4) hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) @@ -1144,7 +1210,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) /* Must delete the filter */ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_INSERT_TAIL( &bp->free_filter_list, filter, next); @@ -1170,7 +1236,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) memcpy(new_filter->l2_addr, filter->l2_addr, ETHER_ADDR_LEN); /* MAC only filter */ - rc = bnxt_hwrm_set_filter(bp, + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, new_filter); if (rc) @@ -1222,7 +1288,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) /* Must delete the MAC filter */ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); filter->l2_ovlan = 0; STAILQ_INSERT_TAIL( &bp->free_filter_list, @@ -1245,8 +1311,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) new_filter->l2_ovlan = vlan_id; new_filter->l2_ovlan_mask = 0xF000; new_filter->enables |= en; - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, - new_filter); + rc = bnxt_hwrm_set_l2_filter(bp, + vnic->fw_vnic_id, + new_filter); if (rc) goto exit; RTE_LOG(INFO, PMD, @@ -1325,7 +1392,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) /* Default Filter is at Index 0 */ if (filter->mac_index != 0) continue; - rc = bnxt_hwrm_clear_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) break; memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); @@ -1334,7 +1401,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) filter->enables |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) break; filter->mac_index = 0; @@ -1369,7 +1436,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, vnic->mc_addr_cnt = i; allmulti: - return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } static int @@ -1514,6 +1581,694 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev) return bnxt_hwrm_port_led_cfg(bp, false); } +static uint32_t +bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + uint32_t desc = 0, raw_cons = 0, cons; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + struct rx_pkt_cmpl *rxcmp; + uint16_t cmp_type; + uint8_t cmp = 1; + bool valid; + + rxq = dev->data->rx_queues[rx_queue_id]; + cpr = rxq->cp_ring; + valid = cpr->valid; + + while (raw_cons < rxq->nb_rx_desc) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMPL_VALID(rxcmp, valid)) + goto nothing_to_do; + valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); + cmp_type = CMP_TYPE(rxcmp); + if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) { + cmp = (rte_le_to_cpu_32( + ((struct rx_tpa_end_cmpl *) + (rxcmp))->agg_bufs_v1) & + RX_TPA_END_CMPL_AGG_BUFS_MASK) >> + RX_TPA_END_CMPL_AGG_BUFS_SFT; + desc++; + } else if (cmp_type == 0x11) { + desc++; + cmp = (rxcmp->agg_bufs_v1 & + RX_PKT_CMPL_AGG_BUFS_MASK) >> + RX_PKT_CMPL_AGG_BUFS_SFT; + } else { + cmp = 1; + } +nothing_to_do: + raw_cons += cmp ? cmp : 2; + } + + return desc; +} + +static int +bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_pkt_cmpl *rxcmp; + uint32_t cons, cp_cons; + + if (!rxq) + return -EINVAL; + + cpr = rxq->cp_ring; + rxr = rxq->rx_ring; + + if (offset >= rxq->nb_rx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + cp_cons = cpr->cp_raw_cons; + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (cons > cp_cons) { + if (CMPL_VALID(rxcmp, cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } else { + if (CMPL_VALID(rxcmp, !cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } + rx_buf = &rxr->rx_buf_ring[cons]; + if (rx_buf->mbuf == NULL) + return RTE_ETH_RX_DESC_UNAVAIL; + + + return RTE_ETH_RX_DESC_AVAIL; +} + +static int +bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + struct bnxt_tx_ring_info *txr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_pkt_cmpl *txcmp; + uint32_t cons, cp_cons; + + if (!txq) + return -EINVAL; + + cpr = txq->cp_ring; + txr = txq->tx_ring; + + if (offset >= txq->nb_tx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + cp_cons = cpr->cp_raw_cons; + + if (cons > cp_cons) { + if (CMPL_VALID(txcmp, cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } else { + if (CMPL_VALID(txcmp, !cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } + tx_buf = &txr->tx_buf_ring[cons]; + if (tx_buf->mbuf == NULL) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static struct bnxt_filter_info * +bnxt_match_and_validate_ether_filter(struct bnxt *bp, + struct rte_eth_ethertype_filter *efilter, + struct bnxt_vnic_info *vnic0, + struct bnxt_vnic_info *vnic, + int *ret) +{ + struct bnxt_filter_info *mfilter = NULL; + int match = 0; + *ret = 0; + + if (efilter->ether_type != ETHER_TYPE_IPv4 && + efilter->ether_type != ETHER_TYPE_IPv6) { + RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in" + " ethertype filter.", efilter->ether_type); + *ret = -EINVAL; + } + if (efilter->queue >= bp->rx_nr_rings) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + } + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + if (vnic == NULL) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + } + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + STAILQ_FOREACH(mfilter, &vnic0->filter, next) { + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, ETHER_ADDR_LEN) && + mfilter->flags == + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && + mfilter->ethertype == efilter->ether_type)) { + match = 1; + break; + } + } + } else { + STAILQ_FOREACH(mfilter, &vnic->filter, next) + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, ETHER_ADDR_LEN) && + mfilter->ethertype == efilter->ether_type && + mfilter->flags == + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { + match = 1; + break; + } + } + + if (match) + *ret = -EEXIST; + + return mfilter; +} + +static int +bnxt_ethertype_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct rte_eth_ethertype_filter *efilter = + (struct rte_eth_ethertype_filter *)arg; + struct bnxt_filter_info *bfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret < 0) + return ret; + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + RTE_LOG(ERR, PMD, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, + ETHER_ADDR_LEN); + memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, + ETHER_ADDR_LEN); + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + bfilter->ethertype = efilter->ether_type; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); + if (filter1 == NULL) { + ret = -1; + goto cleanup; + } + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + bfilter->dst_id = vnic->fw_vnic_id; + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + bfilter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + } + + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto cleanup; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + break; + case RTE_ETH_FILTER_DELETE: + filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret == -EEXIST) { + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); + + STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, + next); + bnxt_free_filter(bp, filter1); + } else if (ret == 0) { + RTE_LOG(ERR, PMD, "No matching filter found\n"); + } + break; + default: + RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + ret = -EINVAL; + goto error; + } + return ret; +cleanup: + bnxt_free_filter(bp, bfilter); +error: + return ret; +} + +static inline int +parse_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + struct bnxt_filter_info *bfilter) +{ + uint32_t en = 0; + + if (nfilter->queue >= bp->rx_nr_rings) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); + return -EINVAL; + } + + switch (nfilter->dst_port_mask) { + case UINT16_MAX: + bfilter->dst_port_mask = -1; + bfilter->dst_port = nfilter->dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid dst_port mask."); + return -EINVAL; + } + + bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + + switch (nfilter->proto_mask) { + case UINT8_MAX: + if (nfilter->proto == 17) /* IPPROTO_UDP */ + bfilter->ip_protocol = 17; + else if (nfilter->proto == 6) /* IPPROTO_TCP */ + bfilter->ip_protocol = 6; + else + return -EINVAL; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + break; + default: + RTE_LOG(ERR, PMD, "invalid protocol mask."); + return -EINVAL; + } + + switch (nfilter->dst_ip_mask) { + case UINT32_MAX: + bfilter->dst_ipaddr_mask[0] = -1; + bfilter->dst_ipaddr[0] = nfilter->dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_ip_mask) { + case UINT32_MAX: + bfilter->src_ipaddr_mask[0] = -1; + bfilter->src_ipaddr[0] = nfilter->src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid src_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_port_mask) { + case UINT16_MAX: + bfilter->src_port_mask = -1; + bfilter->src_port = nfilter->src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid src_port mask."); + return -EINVAL; + } + + //TODO Priority + //nfilter->priority = (uint8_t)filter->priority; + + bfilter->enables = en; + return 0; +} + +static struct bnxt_filter_info* +bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic, + struct bnxt_filter_info *bfilter) +{ + struct bnxt_filter_info *mfilter = NULL; + + STAILQ_FOREACH(mfilter, &vnic->filter, next) { + if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && + bfilter->src_ipaddr_mask[0] == + mfilter->src_ipaddr_mask[0] && + bfilter->src_port == mfilter->src_port && + bfilter->src_port_mask == mfilter->src_port_mask && + bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && + bfilter->dst_ipaddr_mask[0] == + mfilter->dst_ipaddr_mask[0] && + bfilter->dst_port == mfilter->dst_port && + bfilter->dst_port_mask == mfilter->dst_port_mask && + bfilter->flags == mfilter->flags && + bfilter->enables == mfilter->enables) + return mfilter; + } + return NULL; +} + +static int +bnxt_cfg_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + enum rte_filter_op filter_op) +{ + struct bnxt_filter_info *bfilter, *mfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (nfilter->flags != RTE_5TUPLE_FLAGS) { + RTE_LOG(ERR, PMD, "only 5tuple is supported."); + return -EINVAL; + } + + if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); + return -EINVAL; + } + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + RTE_LOG(ERR, PMD, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + ret = parse_ntuple_filter(bp, nfilter, bfilter); + if (ret < 0) + goto free_filter; + + vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = STAILQ_FIRST(&vnic0->filter); + if (filter1 == NULL) { + ret = -1; + goto free_filter; + } + + bfilter->dst_id = vnic->fw_vnic_id; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->ethertype = 0x800; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + mfilter = bnxt_match_ntuple_filter(vnic, bfilter); + + if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) { + RTE_LOG(ERR, PMD, "filter exists."); + ret = -EEXIST; + goto free_filter; + } + if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + RTE_LOG(ERR, PMD, "filter doesn't exist."); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + } else { + ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); + + STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, + next); + bnxt_free_filter(bp, mfilter); + bfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + } + + return 0; +free_filter: + bfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + return ret; +} + +static int +bnxt_ntuple_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + case RTE_ETH_FILTER_DELETE: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + default: + RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} +static int +bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_FDIR: + case RTE_ETH_FILTER_TUNNEL: + /* FALLTHROUGH */ + RTE_LOG(ERR, PMD, + "filter type: %d: To be implemented\n", filter_type); + break; + case RTE_ETH_FILTER_NTUPLE: + ret = bnxt_ntuple_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = bnxt_ethertype_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &bnxt_flow_ops; + break; + default: + RTE_LOG(ERR, PMD, + "Filter type (%d) not supported", filter_type); + ret = -EINVAL; + break; + } + return ret; +} + +static const uint32_t * +bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == bnxt_recv_pkts) + return ptypes; + return NULL; +} + + + +static int +bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + int rc; + uint32_t dir_entries; + uint32_t entry_length; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", + __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + return dir_entries * entry_length; +} + +static int +bnxt_get_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint32_t index; + uint32_t offset; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", __func__, bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (in_eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(bp, in_eeprom->length, + in_eeprom->data); + + index = in_eeprom->offset >> 24; + offset = in_eeprom->offset & 0xffffff; + + if (index != 0) + return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, + in_eeprom->length, in_eeprom->data); + + return 0; +} + +static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(uint16_t dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_other_exec_format(dir_type); +} + +static int +bnxt_set_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint8_t index, dir_op; + uint16_t type, ext, ordinal, attr; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", __func__, bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); + return -EINVAL; + } + + type = in_eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = in_eeprom->magic & 0xff; + dir_op = in_eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (in_eeprom->offset != ~in_eeprom->magic) + return -EINVAL; + return bnxt_hwrm_erase_nvram_directory(bp, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EOPNOTSUPP; + ext = in_eeprom->magic & 0xffff; + ordinal = in_eeprom->offset >> 16; + attr = in_eeprom->offset & 0xffff; + + return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, + in_eeprom->data, in_eeprom->length); + return 0; +} + /* * Initialization */ @@ -1532,6 +2287,8 @@ static const struct eth_dev_ops bnxt_dev_ops = { .rx_queue_release = bnxt_rx_queue_release_op, .tx_queue_setup = bnxt_tx_queue_setup_op, .tx_queue_release = bnxt_tx_queue_release_op, + .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, + .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, .reta_update = bnxt_reta_update_op, .reta_query = bnxt_reta_query_op, .rss_hash_update = bnxt_rss_hash_update_op, @@ -1561,6 +2318,16 @@ static const struct eth_dev_ops bnxt_dev_ops = { .txq_info_get = bnxt_txq_info_get_op, .dev_led_on = bnxt_dev_led_on_op, .dev_led_off = bnxt_dev_led_off_op, + .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, + .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, + .rx_queue_count = bnxt_rx_queue_count_op, + .rx_descriptor_status = bnxt_rx_descriptor_status_op, + .tx_descriptor_status = bnxt_tx_descriptor_status_op, + .filter_ctrl = bnxt_filter_ctrl_op, + .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, + .get_eeprom_length = bnxt_get_eeprom_length_op, + .get_eeprom = bnxt_get_eeprom_op, + .set_eeprom = bnxt_set_eeprom_op, }; static bool bnxt_vf_pciid(uint16_t id) @@ -1569,7 +2336,8 @@ static bool bnxt_vf_pciid(uint16_t id) id == BROADCOM_DEV_ID_57406_VF || id == BROADCOM_DEV_ID_5731X_VF || id == BROADCOM_DEV_ID_5741X_VF || - id == BROADCOM_DEV_ID_57414_VF) + id == BROADCOM_DEV_ID_57414_VF || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF) return true; return false; } @@ -1639,6 +2407,9 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) rte_atomic64_init(&bp->rx_mbuf_alloc_fail); bp->dev_stopped = 1; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + goto skip_init; + if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; @@ -1648,7 +2419,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) "Board initialization failed rc: %x\n", rc); goto error; } +skip_init: eth_dev->dev_ops = &bnxt_dev_ops; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; @@ -1873,6 +2647,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; int rc; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -1899,24 +2676,6 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { return rc; } -int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg) -{ - struct rte_pmd_bnxt_mb_event_param cb_param; - - cb_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED; - cb_param.vf_id = vf_id; - cb_param.msg = msg; - - _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX, - &cb_param); - - /* Default to approve */ - if (cb_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED) - cb_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK; - - return cb_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ? true : false; -} - static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1940,7 +2699,7 @@ static struct rte_pci_driver bnxt_rte_pmd = { static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { - if (strcmp(dev->data->drv_name, drv->driver.name)) + if (strcmp(dev->device->driver->name, drv->driver.name)) return false; return true;