+}
+
+/* Unload the driver, release resources */
+static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->eth_dev->data->dev_started) {
+ /* TBD: STOP HW queues DMA */
+ eth_dev->data->dev_link.link_status = 0;
+ }
+ bnxt_shutdown_nic(bp);
+}
+
+static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
+ uint32_t index)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter, *temp_filter;
+ int i;
+
+ /*
+ * Loop through all VNICs from the specified filter flow pools to
+ * remove the corresponding MAC addr filter
+ */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ if (!(pool_mask & (1 << i)))
+ continue;
+
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ if (filter->mac_index == index) {
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ filter->mac_index = INVALID_MAC_INDEX;
+ memset(&filter->l2_addr, 0,
+ ETHER_ADDR_LEN);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+}
+
+static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
+ struct bnxt_filter_info *filter;
+
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ return;
+ }
+ /* Attach requested MAC address to the new l2_filter */
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->mac_index == index) {
+ RTE_LOG(ERR, PMD,
+ "MAC addr already existed for pool %d\n", pool);
+ return;
+ }
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ return;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ filter->mac_index = index;
+ memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+ bnxt_hwrm_set_filter(bp, vnic, filter);
+}
+
+static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
+{
+ int rc = 0;
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_link new;
+ unsigned int cnt = BNXT_LINK_WAIT_CNT;
+
+ memset(&new, 0, sizeof(new));
+ do {
+ /* Retrieve link info from hardware */
+ rc = bnxt_get_hwrm_link_config(bp, &new);
+ if (rc) {
+ new.link_speed = ETH_LINK_SPEED_100M;
+ new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ RTE_LOG(ERR, PMD,
+ "Failed to retrieve link rc = 0x%x!", rc);
+ goto out;
+ }
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+
+ } while (!new.link_status && cnt--);
+
+ /* Timed out or success */
+ if (new.link_status) {
+ /* Update only if success */
+ eth_dev->data->dev_link.link_duplex = new.link_duplex;
+ eth_dev->data->dev_link.link_speed = new.link_speed;
+ }
+ eth_dev->data->dev_link.link_status = new.link_status;
+out:
+ return rc;
+}
+
+static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ memcpy(vnic->rss_table, reta_conf, reta_size);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ /* Retrieve from the default VNIC */
+ if (!vnic)
+ return -EINVAL;
+ if (!vnic->rss_table)
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* EW - need to revisit here copying from u64 to u16 */
+ memcpy(reta_conf, vnic->rss_table, reta_size);
+
+ return 0;
+}
+
+static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ uint16_t hash_type = 0;
+ int i;
+
+ /*
+ * If RSS enablement were different than dev_configure,
+ * then return -EINVAL
+ */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (!rss_conf->rss_hf)
+ return -EINVAL;
+ } else {
+ if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int len;
+ uint32_t hash_types;
+
+ /* RSS configuration is the same for all VNICs */
+ if (vnic && vnic->rss_hash_key) {
+ if (rss_conf->rss_key) {
+ len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
+ rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
+ memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
+ }
+
+ hash_types = vnic->hash_type;
+ rss_conf->rss_hf = 0;
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_IPV4;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_IPV6;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+ }
+ if (hash_types) {
+ RTE_LOG(ERR, PMD,
+ "Unknwon RSS config from firmware (%08x), RSS disabled",
+ vnic->hash_type);
+ return -ENOTSUP;
+ }
+ } else {
+ rss_conf->rss_hf = 0;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf __rte_unused)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_link link_info;
+ int rc;
+
+ rc = bnxt_get_hwrm_link_config(bp, &link_info);
+ if (rc)
+ return rc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+ if (bp->link_info.auto_pause)
+ fc_conf->autoneg = 1;
+ switch (bp->link_info.pause) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
+ HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ case RTE_FC_TX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
+ }
+ break;
+ case RTE_FC_FULL:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ }
+ return bnxt_set_hwrm_link_config(bp, true);