/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2020 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
.dev_stop = bnxt_rep_dev_stop_op,
.stats_get = bnxt_rep_stats_get_op,
.stats_reset = bnxt_rep_stats_reset_op,
- .filter_ctrl = bnxt_filter_ctrl_op
+ .flow_ops_get = bnxt_flow_ops_get_op
};
uint16_t
mask = rep_rxr->rx_ring_struct->ring_mask;
/* Put this mbuf on the RxQ of the Representor */
- prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_prod & mask];
- if (!*prod_rx_buf) {
+ prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_raw_prod & mask];
+ if (*prod_rx_buf == NULL) {
*prod_rx_buf = mbuf;
vfr_bp->rx_bytes[que] += mbuf->pkt_len;
vfr_bp->rx_pkts[que]++;
- rep_rxr->rx_prod++;
+ rep_rxr->rx_raw_prod++;
} else {
/* Representor Rx ring full, drop pkt */
vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
static uint16_t
bnxt_rep_tx_burst(void *tx_queue,
struct rte_mbuf **tx_pkts,
- __rte_unused uint16_t nb_pkts)
+ uint16_t nb_pkts)
{
struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
struct bnxt_tx_queue *ptxq;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->representor_id = rep_params->vf_id;
+ eth_dev->data->backer_port_id = rep_params->parent_dev->data->port_id;
rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
return rc;
}
+static void bnxt_vfr_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+{
+ struct rte_mbuf **sw_ring;
+ unsigned int i;
+
+ if (!rxq || !rxq->rx_ring)
+ return;
+
+ sw_ring = rxq->rx_ring->rx_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+ if (sw_ring[i]) {
+ if (sw_ring[i] != &rxq->fake_mbuf)
+ rte_pktmbuf_free_seg(sw_ring[i]);
+ sw_ring[i] = NULL;
+ }
+ }
+ }
+}
+
static void bnxt_rep_free_rx_mbufs(struct bnxt_representor *rep_bp)
{
struct bnxt_rx_queue *rxq;
for (i = 0; i < rep_bp->rx_nr_rings; i++) {
rxq = rep_bp->rx_queues[i];
- bnxt_rx_queue_release_mbufs(rxq);
+ bnxt_vfr_rx_queue_release_mbufs(rxq);
}
}
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
dev_info->hash_key_size = 40;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
/* MTU specifics */
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
return 0;
}
-int bnxt_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
+int bnxt_rep_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_idx];
if (rxq)
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
}
rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
return -ENOMEM;
}
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+
rxq->nb_rx_desc = nb_desc;
rc = bnxt_init_rep_rx_ring(rxq, socket_id);
rxq->rx_ring->rx_buf_ring = buf_ring;
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- eth_dev->data->rx_queues[queue_idx] = rxq;
return 0;
out:
if (rxq)
- bnxt_rep_rx_queue_release_op(rxq);
+ bnxt_rep_rx_queue_release_op(eth_dev, queue_idx);
return rc;
}
-void bnxt_rep_rx_queue_release_op(void *rx_queue)
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
if (!rxq)
return;
if (eth_dev->data->tx_queues) {
vfr_txq = eth_dev->data->tx_queues[queue_idx];
- bnxt_rep_tx_queue_release_op(vfr_txq);
- vfr_txq = NULL;
+ if (vfr_txq != NULL)
+ bnxt_rep_tx_queue_release_op(eth_dev, queue_idx);
}
vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
return 0;
}
-void bnxt_rep_tx_queue_release_op(void *tx_queue)
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+ struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];
if (!vfr_txq)
return;
rte_free(vfr_txq->txq);
rte_free(vfr_txq);
+ dev->data->tx_queues[queue_idx] = NULL;
}
int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,