+allmulti:
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static int
+bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
+ uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
+ uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "%d.%d.%d",
+ fw_major, fw_minor, fw_updt);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct bnxt_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_deferred_start = 0;
+}
+
+static void
+bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct bnxt_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint32_t max_dev_mtu;
+ uint32_t rc = 0;
+ uint32_t i;
+
+ bnxt_dev_info_get_op(eth_dev, &dev_info);
+ max_dev_mtu = dev_info.max_rx_pktlen -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
+
+ if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+ RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ ETHER_MIN_MTU, max_dev_mtu);
+ return -EINVAL;
+ }
+
+
+ if (new_mtu > ETHER_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ } else {
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+
+ eth_dev->data->mtu = new_mtu;
+ RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ break;
+
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static int
+bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint16_t vlan = bp->vlan;
+ int rc;
+
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "PVID cannot be modified for this function\n");
+ return -ENOTSUP;
+ }
+ bp->vlan = on ? pvid : 0;
+
+ rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
+ if (rc)
+ bp->vlan = vlan;
+ return rc;
+}
+
+static int
+bnxt_dev_led_on_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, true);
+}
+
+static int
+bnxt_dev_led_off_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, false);
+}
+
+static uint32_t
+bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ uint32_t desc = 0, raw_cons = 0, cons;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_queue *rxq;
+ struct rx_pkt_cmpl *rxcmp;
+ uint16_t cmp_type;
+ uint8_t cmp = 1;
+ bool valid;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ cpr = rxq->cp_ring;
+ valid = cpr->valid;
+
+ while (raw_cons < rxq->nb_rx_desc) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMPL_VALID(rxcmp, valid))
+ goto nothing_to_do;
+ valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ cmp = (rte_le_to_cpu_32(
+ ((struct rx_tpa_end_cmpl *)
+ (rxcmp))->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
+ RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ desc++;
+ } else if (cmp_type == 0x11) {
+ desc++;
+ cmp = (rxcmp->agg_bufs_v1 &
+ RX_PKT_CMPL_AGG_BUFS_MASK) >>
+ RX_PKT_CMPL_AGG_BUFS_SFT;
+ } else {
+ cmp = 1;
+ }
+nothing_to_do:
+ raw_cons += cmp ? cmp : 2;
+ }
+
+ return desc;
+}
+
+static int
+bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t cons, cp_cons;
+
+ if (!rxq)
+ return -EINVAL;
+
+ cpr = rxq->cp_ring;
+ rxr = rxq->rx_ring;
+
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ cp_cons = cpr->cp_raw_cons;
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(rxcmp, cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ } else {
+ if (CMPL_VALID(rxcmp, !cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ }
+ rx_buf = &rxr->rx_buf_ring[cons];
+ if (rx_buf->mbuf == NULL)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static int
+bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
+{
+ struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_pkt_cmpl *txcmp;
+ uint32_t cons, cp_cons;
+
+ if (!txq)
+ return -EINVAL;
+
+ cpr = txq->cp_ring;
+ txr = txq->tx_ring;
+
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ cp_cons = cpr->cp_raw_cons;
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(txcmp, cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ } else {
+ if (CMPL_VALID(txcmp, !cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (tx_buf->mbuf == NULL)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static struct bnxt_filter_info *
+bnxt_match_and_validate_ether_filter(struct bnxt *bp,
+ struct rte_eth_ethertype_filter *efilter,
+ struct bnxt_vnic_info *vnic0,
+ struct bnxt_vnic_info *vnic,
+ int *ret)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+ int match = 0;
+ *ret = 0;
+
+ if (efilter->ether_type != ETHER_TYPE_IPv4 &&
+ efilter->ether_type != ETHER_TYPE_IPv6) {
+ RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", efilter->ether_type);
+ *ret = -EINVAL;
+ }
+ if (efilter->queue >= bp->rx_nr_rings) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+ if (vnic == NULL) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ }
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->flags ==
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
+ mfilter->ethertype == efilter->ether_type)) {
+ match = 1;
+ break;
+ }
+ }
+ } else {
+ STAILQ_FOREACH(mfilter, &vnic->filter, next)
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->ethertype == efilter->ether_type &&
+ mfilter->flags ==
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (match)
+ *ret = -EEXIST;
+
+ return mfilter;
+}
+
+static int
+bnxt_ethertype_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_ethertype_filter *efilter =
+ (struct rte_eth_ethertype_filter *)arg;
+ struct bnxt_filter_info *bfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret < 0)
+ return ret;
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
+ bfilter->ethertype = efilter->ether_type;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto cleanup;
+ }
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ bfilter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ }
+
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto cleanup;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret == -EEXIST) {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
+
+ STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
+ next);
+ bnxt_free_filter(bp, filter1);
+ } else if (ret == 0) {
+ RTE_LOG(ERR, PMD, "No matching filter found\n");
+ }
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ goto error;
+ }
+ return ret;
+cleanup:
+ bnxt_free_filter(bp, bfilter);
+error:
+ return ret;
+}
+
+static inline int
+parse_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ struct bnxt_filter_info *bfilter)
+{
+ uint32_t en = 0;
+
+ if (nfilter->queue >= bp->rx_nr_rings) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_port_mask) {
+ case UINT16_MAX:
+ bfilter->dst_port_mask = -1;
+ bfilter->dst_port = nfilter->dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+
+ switch (nfilter->proto_mask) {
+ case UINT8_MAX:
+ if (nfilter->proto == 17) /* IPPROTO_UDP */
+ bfilter->ip_protocol = 17;
+ else if (nfilter->proto == 6) /* IPPROTO_TCP */
+ bfilter->ip_protocol = 6;
+ else
+ return -EINVAL;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_ip_mask) {
+ case UINT32_MAX:
+ bfilter->dst_ipaddr_mask[0] = -1;
+ bfilter->dst_ipaddr[0] = nfilter->dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_ip_mask) {
+ case UINT32_MAX:
+ bfilter->src_ipaddr_mask[0] = -1;
+ bfilter->src_ipaddr[0] = nfilter->src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_port_mask) {
+ case UINT16_MAX:
+ bfilter->src_port_mask = -1;
+ bfilter->src_port = nfilter->src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ //TODO Priority
+ //nfilter->priority = (uint8_t)filter->priority;
+
+ bfilter->enables = en;
+ return 0;
+}
+
+static struct bnxt_filter_info*
+bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
+ struct bnxt_filter_info *bfilter)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables)
+ return mfilter;
+ }
+ return NULL;
+}
+
+static int
+bnxt_cfg_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ enum rte_filter_op filter_op)
+{
+ struct bnxt_filter_info *bfilter, *mfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ int ret;
+
+ if (nfilter->flags != RTE_5TUPLE_FLAGS) {
+ RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ return -EINVAL;
+ }
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ ret = parse_ntuple_filter(bp, nfilter, bfilter);
+ if (ret < 0)
+ goto free_filter;
+
+ vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto free_filter;
+ }
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->ethertype = 0x800;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
+
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
+ RTE_LOG(ERR, PMD, "filter exists.");
+ ret = -EEXIST;
+ goto free_filter;
+ }
+ if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
+ RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ ret = -ENOENT;
+ goto free_filter;
+ }
+
+ if (filter_op == RTE_ETH_FILTER_ADD) {
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto free_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ } else {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
+
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
+ next);
+ bnxt_free_filter(bp, mfilter);
+ bfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ }
+
+ return 0;
+free_filter:
+ bfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ return ret;
+}
+
+static int
+bnxt_ntuple_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+static int
+bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_FDIR:
+ case RTE_ETH_FILTER_TUNNEL:
+ /* FALLTHROUGH */
+ RTE_LOG(ERR, PMD,
+ "filter type: %d: To be implemented\n", filter_type);
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = bnxt_ntuple_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = bnxt_ethertype_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &bnxt_flow_ops;
+ break;
+ default:
+ RTE_LOG(ERR, PMD,
+ "Filter type (%d) not supported", filter_type);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static const uint32_t *
+bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == bnxt_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+
+
+static int
+bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int rc;
+ uint32_t dir_entries;
+ uint32_t entry_length;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
+ __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
+
+ rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ return dir_entries * entry_length;
+}
+
+static int
+bnxt_get_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint32_t index;
+ uint32_t offset;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", __func__, bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (in_eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(bp, in_eeprom->length,
+ in_eeprom->data);
+
+ index = in_eeprom->offset >> 24;
+ offset = in_eeprom->offset & 0xffffff;
+
+ if (index != 0)
+ return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
+ in_eeprom->length, in_eeprom->data);
+
+ return 0;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(uint16_t dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_other_exec_format(dir_type);
+}
+
+static int
+bnxt_set_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t index, dir_op;
+ uint16_t type, ext, ordinal, attr;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", __func__, bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ return -EINVAL;
+ }
+
+ type = in_eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = in_eeprom->magic & 0xff;
+ dir_op = in_eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (in_eeprom->offset != ~in_eeprom->magic)
+ return -EINVAL;
+ return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EOPNOTSUPP;
+ ext = in_eeprom->magic & 0xffff;
+ ordinal = in_eeprom->offset >> 16;
+ attr = in_eeprom->offset & 0xffff;
+
+ return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
+ in_eeprom->data, in_eeprom->length);
+ return 0;
+}
+
+/*
+ * Initialization
+ */
+
+static const struct eth_dev_ops bnxt_dev_ops = {
+ .dev_infos_get = bnxt_dev_info_get_op,
+ .dev_close = bnxt_dev_close_op,
+ .dev_configure = bnxt_dev_configure_op,
+ .dev_start = bnxt_dev_start_op,
+ .dev_stop = bnxt_dev_stop_op,
+ .dev_set_link_up = bnxt_dev_set_link_up_op,
+ .dev_set_link_down = bnxt_dev_set_link_down_op,
+ .stats_get = bnxt_stats_get_op,
+ .stats_reset = bnxt_stats_reset_op,
+ .rx_queue_setup = bnxt_rx_queue_setup_op,
+ .rx_queue_release = bnxt_rx_queue_release_op,
+ .tx_queue_setup = bnxt_tx_queue_setup_op,
+ .tx_queue_release = bnxt_tx_queue_release_op,
+ .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
+ .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
+ .reta_update = bnxt_reta_update_op,
+ .reta_query = bnxt_reta_query_op,
+ .rss_hash_update = bnxt_rss_hash_update_op,
+ .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
+ .link_update = bnxt_link_update_op,
+ .promiscuous_enable = bnxt_promiscuous_enable_op,
+ .promiscuous_disable = bnxt_promiscuous_disable_op,
+ .allmulticast_enable = bnxt_allmulticast_enable_op,
+ .allmulticast_disable = bnxt_allmulticast_disable_op,
+ .mac_addr_add = bnxt_mac_addr_add_op,
+ .mac_addr_remove = bnxt_mac_addr_remove_op,
+ .flow_ctrl_get = bnxt_flow_ctrl_get_op,
+ .flow_ctrl_set = bnxt_flow_ctrl_set_op,
+ .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
+ .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
+ .vlan_filter_set = bnxt_vlan_filter_set_op,
+ .vlan_offload_set = bnxt_vlan_offload_set_op,
+ .vlan_pvid_set = bnxt_vlan_pvid_set_op,
+ .mtu_set = bnxt_mtu_set_op,
+ .mac_addr_set = bnxt_set_default_mac_addr_op,
+ .xstats_get = bnxt_dev_xstats_get_op,
+ .xstats_get_names = bnxt_dev_xstats_get_names_op,
+ .xstats_reset = bnxt_dev_xstats_reset_op,
+ .fw_version_get = bnxt_fw_version_get,
+ .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
+ .rxq_info_get = bnxt_rxq_info_get_op,
+ .txq_info_get = bnxt_txq_info_get_op,
+ .dev_led_on = bnxt_dev_led_on_op,
+ .dev_led_off = bnxt_dev_led_off_op,
+ .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
+ .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
+ .rx_queue_count = bnxt_rx_queue_count_op,
+ .rx_descriptor_status = bnxt_rx_descriptor_status_op,
+ .tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .filter_ctrl = bnxt_filter_ctrl_op,
+ .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
+ .get_eeprom_length = bnxt_get_eeprom_length_op,
+ .get_eeprom = bnxt_get_eeprom_op,
+ .set_eeprom = bnxt_set_eeprom_op,
+};
+
+static bool bnxt_vf_pciid(uint16_t id)
+{
+ if (id == BROADCOM_DEV_ID_57304_VF ||
+ id == BROADCOM_DEV_ID_57406_VF ||
+ id == BROADCOM_DEV_ID_5731X_VF ||
+ id == BROADCOM_DEV_ID_5741X_VF ||
+ id == BROADCOM_DEV_ID_57414_VF ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
+ return true;
+ return false;
+}
+
+static int bnxt_init_board(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ int rc;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ if (!pci_dev->mem_resource[0].addr) {
+ RTE_LOG(ERR, PMD,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ bp->eth_dev = eth_dev;
+ bp->pdev = pci_dev;
+
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
+ if (!bp->bar0) {
+ RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+ return 0;
+
+init_err_release:
+ if (bp->bar0)
+ bp->bar0 = NULL;
+
+init_err_disable:
+
+ return rc;
+}
+
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+
+#define ALLOW_FUNC(x) \
+ { \
+ typeof(x) arg = (x); \
+ bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
+static int
+bnxt_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ static int version_printed;
+ uint32_t total_alloc_len;
+ phys_addr_t mz_phys_addr;
+ struct bnxt *bp;
+ int rc;
+
+ if (version_printed++ == 0)
+ RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ bp = eth_dev->data->dev_private;
+
+ rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
+ bp->dev_stopped = 1;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ goto skip_init;
+
+ if (bnxt_vf_pciid(pci_dev->id.device_id))
+ bp->flags |= BNXT_FLAG_VF;
+
+ rc = bnxt_init_board(eth_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Board initialization failed rc: %x\n", rc);
+ goto error;
+ }
+skip_init:
+ eth_dev->dev_ops = &bnxt_dev_ops;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+ eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+
+ if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct rx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct tx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+ }
+
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "hwrm resource allocation failure rc: %x\n", rc);
+ goto error_free;
+ }
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto error_free;
+ bnxt_hwrm_queue_qportcfg(bp);
+
+ bnxt_hwrm_func_qcfg(bp);
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {