net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
index d948745..5e140f0 100644 (file)
@@ -29,22 +29,26 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = {
        .dev_stop = bnxt_rep_dev_stop_op,
        .stats_get = bnxt_rep_stats_get_op,
        .stats_reset = bnxt_rep_stats_reset_op,
-       .filter_ctrl = bnxt_filter_ctrl_op
+       .flow_ops_get = bnxt_flow_ops_get_op
 };
 
 uint16_t
 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
 {
-       struct rte_mbuf **prod_rx_buf;
+       struct bnxt_representor *vfr_bp = NULL;
        struct bnxt_rx_ring_info *rep_rxr;
-       struct bnxt_rx_queue *rep_rxq;
        struct rte_eth_dev *vfr_eth_dev;
-       struct bnxt_representor *vfr_bp;
+       struct rte_mbuf **prod_rx_buf;
+       struct bnxt_rx_queue *rep_rxq;
        uint16_t mask;
        uint8_t que;
 
        vfr_eth_dev = &rte_eth_devices[port_id];
-       vfr_bp = vfr_eth_dev->data->dev_private;
+       vfr_bp = vfr_eth_dev ? vfr_eth_dev->data->dev_private : NULL;
+
+       if (unlikely(vfr_bp == NULL))
+               return 1;
+
        /* If rxq_id happens to be > nr_rings, use ring 0 */
        que = queue_id < vfr_bp->rx_nr_rings ? queue_id : 0;
        rep_rxq = vfr_bp->rx_queues[que];
@@ -104,7 +108,7 @@ bnxt_rep_rx_burst(void *rx_queue,
 static uint16_t
 bnxt_rep_tx_burst(void *tx_queue,
                     struct rte_mbuf **tx_pkts,
-                    __rte_unused uint16_t nb_pkts)
+                    uint16_t nb_pkts)
 {
        struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
        struct bnxt_tx_queue *ptxq;
@@ -187,6 +191,7 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
        eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
                                        RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        eth_dev->data->representor_id = rep_params->vf_id;
+       eth_dev->data->backer_port_id = rep_params->parent_dev->data->port_id;
 
        rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
        memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
@@ -209,7 +214,6 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
        eth_dev->data->dev_link.link_status = link->link_status;
        eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
 
-       PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
        bnxt_print_link_info(eth_dev);
 
        PMD_DRV_LOG(INFO,
@@ -315,6 +319,12 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
                BNXT_TF_DBG(ERR, "Invalid arguments\n");
                return 0;
        }
+       /* update the port id so you can backtrack to ethdev */
+       vfr->dpdk_port_id = vfr_ethdev->data->port_id;
+
+       /* If pair is present, then delete the pair */
+       if (bnxt_hwrm_cfa_pair_exists(parent_bp, vfr))
+               (void)bnxt_hwrm_cfa_pair_free(parent_bp, vfr);
 
        /* Update the ULP portdata base with the new VFR interface */
        rc = ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
@@ -385,6 +395,26 @@ static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
        return rc;
 }
 
+static void bnxt_vfr_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+{
+       struct rte_mbuf **sw_ring;
+       unsigned int i;
+
+       if (!rxq || !rxq->rx_ring)
+               return;
+
+       sw_ring = rxq->rx_ring->rx_buf_ring;
+       if (sw_ring) {
+               for (i = 0; i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+                       if (sw_ring[i]) {
+                               if (sw_ring[i] != &rxq->fake_mbuf)
+                                       rte_pktmbuf_free_seg(sw_ring[i]);
+                               sw_ring[i] = NULL;
+                       }
+               }
+       }
+}
+
 static void bnxt_rep_free_rx_mbufs(struct bnxt_representor *rep_bp)
 {
        struct bnxt_rx_queue *rxq;
@@ -392,7 +422,7 @@ static void bnxt_rep_free_rx_mbufs(struct bnxt_representor *rep_bp)
 
        for (i = 0; i < rep_bp->rx_nr_rings; i++) {
                rxq = rep_bp->rx_queues[i];
-               bnxt_rx_queue_release_mbufs(rxq);
+               bnxt_vfr_rx_queue_release_mbufs(rxq);
        }
 }
 
@@ -479,8 +509,7 @@ int bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
        struct bnxt_representor *vfr_bp = eth_dev->data->dev_private;
 
        /* Avoid crashes as we are about to free queues */
-       eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
-       eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+       bnxt_stop_rxtx(eth_dev);
 
        BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR stop\n", eth_dev->data->port_id);
 
@@ -525,6 +554,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->max_tx_queues = max_rx_rings;
        dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
        dev_info->hash_key_size = 40;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        /* MTU specifics */
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -536,7 +566,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
        dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
        if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+               dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
        dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
        dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
 
@@ -548,7 +578,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-int bnxt_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
        struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
 
@@ -630,7 +660,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        if (eth_dev->data->rx_queues) {
                rxq = eth_dev->data->rx_queues[queue_idx];
                if (rxq)
-                       bnxt_rx_queue_release_op(rxq);
+                       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        }
 
        rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
@@ -641,6 +671,8 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                return -ENOMEM;
        }
 
+       eth_dev->data->rx_queues[queue_idx] = rxq;
+
        rxq->nb_rx_desc = nb_desc;
 
        rc = bnxt_init_rep_rx_ring(rxq, socket_id);
@@ -660,20 +692,19 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        rxq->rx_ring->rx_buf_ring = buf_ring;
        rxq->queue_id = queue_idx;
        rxq->port_id = eth_dev->data->port_id;
-       eth_dev->data->rx_queues[queue_idx] = rxq;
 
        return 0;
 
 out:
        if (rxq)
-               bnxt_rep_rx_queue_release_op(rxq);
+               bnxt_rep_rx_queue_release_op(eth_dev, queue_idx);
 
        return rc;
 }
 
-void bnxt_rep_rx_queue_release_op(void *rx_queue)
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+       struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
 
        if (!rxq)
                return;
@@ -728,8 +759,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        if (eth_dev->data->tx_queues) {
                vfr_txq = eth_dev->data->tx_queues[queue_idx];
-               bnxt_rep_tx_queue_release_op(vfr_txq);
-               vfr_txq = NULL;
+               if (vfr_txq != NULL)
+                       bnxt_rep_tx_queue_release_op(eth_dev, queue_idx);
        }
 
        vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
@@ -758,15 +789,16 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-void bnxt_rep_tx_queue_release_op(void *tx_queue)
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+       struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];
 
        if (!vfr_txq)
                return;
 
        rte_free(vfr_txq->txq);
        rte_free(vfr_txq);
+       dev->data->tx_queues[queue_idx] = NULL;
 }
 
 int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
@@ -818,7 +850,7 @@ int bnxt_rep_stop_all(struct bnxt *bp)
        if (!bp->rep_info)
                return 0;
 
-       for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) {
+       for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS(bp); vf_id++) {
                rep_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
                if (!rep_eth_dev)
                        continue;