static int bnxt_restore_vlan_filters(struct bnxt *bp);
static void bnxt_dev_recover(void *arg);
static void bnxt_free_error_recovery_info(struct bnxt *bp);
+static void bnxt_free_rep_info(struct bnxt *bp);
int is_bnxt_in_error(struct bnxt *bp)
{
static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
{
- int rc;
+ int rc = 0;
rc = bnxt_init_fw(bp);
if (rc)
{
pthread_mutex_destroy(&bp->flow_lock);
pthread_mutex_destroy(&bp->def_cp_lock);
+ if (bp->rep_info)
+ pthread_mutex_destroy(&bp->rep_info->vfr_lock);
}
static int
bnxt_uninit_locks(bp);
bnxt_free_flow_stats_info(bp);
+ bnxt_free_rep_info(bp);
rte_free(bp->ptp_cfg);
bp->ptp_cfg = NULL;
return rc;
return ret;
}
-static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
+static void bnxt_free_rep_info(struct bnxt *bp)
{
- char name[RTE_ETH_NAME_MAX_LEN];
- struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
- struct rte_eth_dev *backing_eth_dev, *vf_rep_eth_dev;
- uint16_t num_rep;
- int i, ret = 0;
- struct bnxt *backing_bp;
+ rte_free(bp->rep_info);
+ bp->rep_info = NULL;
+ rte_free(bp->cfa_code_map);
+ bp->cfa_code_map = NULL;
+}
- if (pci_dev->device.devargs) {
- ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
- ð_da);
- if (ret)
- return ret;
- }
+static int bnxt_init_rep_info(struct bnxt *bp)
+{
+ int i = 0, rc;
- num_rep = eth_da.nb_representor_ports;
- PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
- num_rep);
+ if (bp->rep_info)
+ return 0;
- /* We could come here after first level of probe is already invoked
- * as part of an application bringup(OVS-DPDK vswitchd), so first check
- * for already allocated eth_dev for the backing device (PF/Trusted VF)
- */
- backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
- if (backing_eth_dev == NULL) {
- ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
- sizeof(struct bnxt),
- eth_dev_pci_specific_init, pci_dev,
- bnxt_dev_init, NULL);
+ bp->rep_info = rte_zmalloc("bnxt_rep_info",
+ sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
+ 0);
+ if (!bp->rep_info) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
+ return -ENOMEM;
+ }
+ bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
+ sizeof(*bp->cfa_code_map) *
+ BNXT_MAX_CFA_CODE, 0);
+ if (!bp->cfa_code_map) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
+ bnxt_free_rep_info(bp);
+ return -ENOMEM;
+ }
- if (ret || !num_rep)
- return ret;
+ for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
+ bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
+
+ rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
+ bnxt_free_rep_info(bp);
+ return rc;
}
+ return rc;
+}
+
+static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
+ struct rte_eth_devargs eth_da,
+ struct rte_eth_dev *backing_eth_dev)
+{
+ struct rte_eth_dev *vf_rep_eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct bnxt *backing_bp;
+ uint16_t num_rep;
+ int i, ret = 0;
+ num_rep = eth_da.nb_representor_ports;
if (num_rep > BNXT_MAX_VF_REPS) {
PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
- eth_da.nb_representor_ports, BNXT_MAX_VF_REPS);
- ret = -EINVAL;
- return ret;
+ num_rep, BNXT_MAX_VF_REPS);
+ return -EINVAL;
}
- /* probe representor ports now */
- if (!backing_eth_dev)
- backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
- if (backing_eth_dev == NULL) {
- ret = -ENODEV;
- return ret;
+ if (num_rep > RTE_MAX_ETHPORTS) {
+ PMD_DRV_LOG(ERR,
+ "nb_representor_ports = %d > %d MAX ETHPORTS\n",
+ num_rep, RTE_MAX_ETHPORTS);
+ return -EINVAL;
}
+
backing_bp = backing_eth_dev->data->dev_private;
if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
/* Returning an error is not an option.
* Applications are not handling this correctly
*/
- return ret;
+ return 0;
}
- for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ if (bnxt_init_rep_info(backing_bp))
+ return 0;
+
+ for (i = 0; i < num_rep; i++) {
struct bnxt_vf_representor representor = {
.vf_id = eth_da.representor_ports[i],
.switch_domain_id = backing_bp->switch_domain_id,
- .parent_priv = backing_bp
+ .parent_dev = backing_eth_dev
};
if (representor.vf_id >= BNXT_MAX_VF_REPS) {
return ret;
}
+static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ struct rte_eth_dev *backing_eth_dev;
+ uint16_t num_rep;
+ int ret = 0;
+
+ if (pci_dev->device.devargs) {
+ ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ ð_da);
+ if (ret)
+ return ret;
+ }
+
+ num_rep = eth_da.nb_representor_ports;
+ PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
+ num_rep);
+
+ /* We could come here after first level of probe is already invoked
+ * as part of an application bringup(OVS-DPDK vswitchd), so first check
+ * for already allocated eth_dev for the backing device (PF/Trusted VF)
+ */
+ backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (backing_eth_dev == NULL) {
+ ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct bnxt),
+ eth_dev_pci_specific_init, pci_dev,
+ bnxt_dev_init, NULL);
+
+ if (ret || !num_rep)
+ return ret;
+
+ backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ }
+
+ /* probe representor ports now */
+ ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev);
+
+ return ret;
+}
+
static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
{
struct rte_eth_dev *eth_dev;
#include "bnxt.h"
#include "bnxt_ring.h"
#include "bnxt_reps.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_hwrm.h"
#include "hsi_struct_def_dpdk.h"
static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
.dev_configure = bnxt_vf_rep_dev_configure_op,
.dev_start = bnxt_vf_rep_dev_start_op,
.rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
+ .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
.tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
+ .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
.link_update = bnxt_vf_rep_link_update_op,
.dev_close = bnxt_vf_rep_dev_close_op,
- .dev_stop = bnxt_vf_rep_dev_stop_op
+ .dev_stop = bnxt_vf_rep_dev_stop_op,
+ .stats_get = bnxt_vf_rep_stats_get_op,
+ .stats_reset = bnxt_vf_rep_stats_reset_op,
};
-static uint16_t
-bnxt_vf_rep_rx_burst(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts,
- __rte_unused uint16_t nb_pkts)
+uint16_t
+bnxt_vfr_recv(struct bnxt *bp, uint16_t cfa_code, uint16_t queue_id,
+ struct rte_mbuf *mbuf)
{
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct bnxt_rx_ring_info *rep_rxr;
+ struct bnxt_rx_queue *rep_rxq;
+ struct rte_eth_dev *vfr_eth_dev;
+ struct bnxt_vf_representor *vfr_bp;
+ uint16_t vf_id;
+ uint16_t mask;
+ uint8_t que;
+
+ vf_id = bp->cfa_code_map[cfa_code];
+ /* cfa_code is invalid OR vf_id > MAX REP. Assume normal Rx */
+ if (vf_id == BNXT_VF_IDX_INVALID || vf_id > BNXT_MAX_VF_REPS)
+ return 1;
+ vfr_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
+ if (!vfr_eth_dev)
+ return 1;
+ vfr_bp = vfr_eth_dev->data->dev_private;
+ if (vfr_bp->rx_cfa_code != cfa_code) {
+ /* cfa_code not meant for this VF rep!!?? */
+ return 1;
+ }
+ /* If rxq_id happens to be > max rep_queue, use rxq0 */
+ que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
+ rep_rxq = vfr_bp->rx_queues[que];
+ rep_rxr = rep_rxq->rx_ring;
+ mask = rep_rxr->rx_ring_struct->ring_mask;
+
+ /* Put this mbuf on the RxQ of the Representor */
+ prod_rx_buf =
+ &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
+ if (!prod_rx_buf->mbuf) {
+ prod_rx_buf->mbuf = mbuf;
+ vfr_bp->rx_bytes[que] += mbuf->pkt_len;
+ vfr_bp->rx_pkts[que]++;
+ } else {
+ vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
+ vfr_bp->rx_drop_pkts[que]++;
+ rte_free(mbuf); /* Representor Rx ring full, drop pkt */
+ }
+
return 0;
}
static uint16_t
-bnxt_vf_rep_tx_burst(__rte_unused void *tx_queue,
- __rte_unused struct rte_mbuf **tx_pkts,
+bnxt_vf_rep_rx_burst(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct bnxt_rx_queue *rxq = rx_queue;
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct bnxt_rx_ring_info *rxr;
+ uint16_t nb_rx_pkts = 0;
+ uint16_t mask, i;
+
+ if (!rxq)
+ return 0;
+
+ rxr = rxq->rx_ring;
+ mask = rxr->rx_ring_struct->ring_mask;
+ for (i = 0; i < nb_pkts; i++) {
+ cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
+ if (!cons_rx_buf->mbuf)
+ return nb_rx_pkts;
+ rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
+ rx_pkts[nb_rx_pkts]->port = rxq->port_id;
+ cons_rx_buf->mbuf = NULL;
+ nb_rx_pkts++;
+ rxr->rx_cons++;
+ }
+
+ return nb_rx_pkts;
+}
+
+static uint16_t
+bnxt_vf_rep_tx_burst(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
{
+ struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+ struct bnxt_tx_queue *ptxq;
+ struct bnxt *parent;
+ struct bnxt_vf_representor *vf_rep_bp;
+ int qid;
+ int rc;
+ int i;
+
+ if (!vfr_txq)
+ return 0;
+
+ qid = vfr_txq->txq->queue_id;
+ vf_rep_bp = vfr_txq->bp;
+ parent = vf_rep_bp->parent_dev->data->dev_private;
+ pthread_mutex_lock(&parent->rep_info->vfr_lock);
+ ptxq = parent->tx_queues[qid];
+
+ ptxq->tx_cfa_action = vf_rep_bp->tx_cfa_action;
+
+ for (i = 0; i < nb_pkts; i++) {
+ vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
+ vf_rep_bp->tx_pkts[qid]++;
+ }
+
+ rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
+ ptxq->tx_cfa_action = 0;
+ pthread_mutex_unlock(&parent->rep_info->vfr_lock);
+
+ return rc;
+
return 0;
}
vf_rep_bp->vf_id = rep_params->vf_id;
vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
- vf_rep_bp->parent_priv = rep_params->parent_priv;
+ vf_rep_bp->parent_dev = rep_params->parent_dev;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = rep_params->vf_id;
eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
/* Link state. Inherited from PF or trusted VF */
- parent_bp = vf_rep_bp->parent_priv;
+ parent_bp = vf_rep_bp->parent_dev->data->dev_private;
link = &parent_bp->eth_dev->data->dev_link;
eth_dev->data->dev_link.link_speed = link->link_speed;
uint16_t vf_id;
eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
- parent_bp = rep->parent_priv;
- if (parent_bp) {
- parent_bp->num_reps--;
- vf_id = rep->vf_id;
+ parent_bp = rep->parent_dev->data->dev_private;
+ if (!parent_bp)
+ return 0;
+
+ parent_bp->num_reps--;
+ vf_id = rep->vf_id;
+ if (parent_bp->rep_info)
memset(&parent_bp->rep_info[vf_id], 0,
sizeof(parent_bp->rep_info[vf_id]));
/* mark that this representor has been freed */
- }
- eth_dev->dev_ops = NULL;
return 0;
}
struct rte_eth_link *link;
int rc;
- parent_bp = rep->parent_priv;
+ parent_bp = rep->parent_dev->data->dev_private;
rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
/* Link state. Inherited from PF or trusted VF */
return rc;
}
+static int bnxt_vfr_alloc(struct bnxt_vf_representor *vfr)
+{
+ int rc = 0;
+ struct bnxt *parent_bp;
+
+ if (!vfr || !vfr->parent_dev) {
+ PMD_DRV_LOG(ERR,
+ "No memory allocated for representor\n");
+ return -ENOMEM;
+ }
+
+ parent_bp = vfr->parent_dev->data->dev_private;
+
+ /* Check if representor has been already allocated in FW */
+ if (vfr->tx_cfa_action && vfr->rx_cfa_code)
+ return 0;
+
+ /*
+ * Alloc VF rep rules in CFA after default VNIC is created.
+ * Otherwise the FW will create the VF-rep rules with
+ * default drop action.
+ */
+
+ /*
+ * This is where we need to replace invoking an HWRM cmd
+ * with the new TFLIB ULP API to do more/less the same job
+ rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp,
+ vfr->vf_id,
+ &vfr->tx_cfa_action,
+ &vfr->rx_cfa_code);
+ */
+ if (!rc) {
+ parent_bp->cfa_code_map[vfr->rx_cfa_code] = vfr->vf_id;
+ PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
+ vfr->vf_id);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc representor %d in FW\n",
+ vfr->vf_id);
+ }
+
+ return rc;
+}
+
+static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
+{
+ struct bnxt_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < rep_bp->rx_nr_rings; i++) {
+ rxq = rep_bp->rx_queues[i];
+ bnxt_rx_queue_release_mbufs(rxq);
+ }
+}
+
int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
{
- bnxt_vf_rep_link_update_op(eth_dev, 1);
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+ int rc;
- return 0;
+ rc = bnxt_vfr_alloc(rep_bp);
+
+ if (!rc) {
+ eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
+ eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
+
+ bnxt_vf_rep_link_update_op(eth_dev, 1);
+ } else {
+ eth_dev->data->dev_link.link_status = 0;
+ bnxt_vf_rep_free_rx_mbufs(rep_bp);
+ }
+
+ return rc;
+}
+
+static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
+{
+ int rc = 0;
+ struct bnxt *parent_bp;
+
+ if (!vfr || !vfr->parent_dev) {
+ PMD_DRV_LOG(ERR,
+ "No memory allocated for representor\n");
+ return -ENOMEM;
+ }
+
+ parent_bp = vfr->parent_dev->data->dev_private;
+
+ /* Check if representor has been already freed in FW */
+ if (!vfr->tx_cfa_action && !vfr->rx_cfa_code)
+ return 0;
+
+ /*
+ * This is where we need to replace invoking an HWRM cmd
+ * with the new TFLIB ULP API to do more/less the same job
+ rc = bnxt_hwrm_cfa_vfr_free(parent_bp,
+ vfr->vf_id);
+ */
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to free representor %d in FW\n",
+ vfr->vf_id);
+ return rc;
+ }
+
+ parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
+ PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
+ vfr->vf_id);
+ vfr->tx_cfa_action = 0;
+ vfr->rx_cfa_code = 0;
+
+ return rc;
}
-void bnxt_vf_rep_dev_stop_op(__rte_unused struct rte_eth_dev *eth_dev)
+void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
{
+ struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
+
+ /* Avoid crashes as we are about to free queues */
+ eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+
+ bnxt_vfr_free(vfr_bp);
+
+ if (eth_dev->data->dev_started)
+ eth_dev->data->dev_link.link_status = 0;
+
+ bnxt_vf_rep_free_rx_mbufs(vfr_bp);
}
void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
int rc = 0;
/* MAC Specifics */
- parent_bp = rep_bp->parent_priv;
+ parent_bp = rep_bp->parent_dev->data->dev_private;
if (!parent_bp) {
PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
return rc;
int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
{
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+
PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
+ rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
+ rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+ rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+
+ return 0;
+}
+
+int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mp)
+{
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+ struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
+ struct bnxt_rx_queue *parent_rxq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_sw_rx_bd *buf_ring;
+ int rc = 0;
+
+ if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Rx ring %d. %d rings available\n",
+ queue_idx, BNXT_MAX_VF_REP_RINGS);
+ return -EINVAL;
+ }
+
+ if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
+ return -EINVAL;
+ }
+
+ parent_rxq = parent_bp->rx_queues[queue_idx];
+ if (!parent_rxq) {
+ PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
+ return -EINVAL;
+ }
+
+ if (nb_desc != parent_rxq->nb_rx_desc) {
+ PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
+ return -EINVAL;
+ }
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_idx];
+ if (rxq)
+ bnxt_rx_queue_release_op(rxq);
+ }
+
+ rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
+ sizeof(struct bnxt_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ rxq->nb_rx_desc = nb_desc;
+
+ rc = bnxt_init_rx_ring_struct(rxq, socket_id);
+ if (rc)
+ goto out;
+
+ buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
+ sizeof(struct bnxt_sw_rx_bd) *
+ rxq->rx_ring->rx_ring_struct->ring_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!buf_ring) {
+ PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rxq->rx_ring->rx_buf_ring = buf_ring;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = eth_dev->data->port_id;
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+
+out:
+ if (rxq)
+ bnxt_rx_queue_release_op(rxq);
+
+ return rc;
+}
+
+void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+
+ if (!rxq)
+ return;
+
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+ bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+
+ rte_free(rxq);
+}
+
+int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+ struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
+ struct bnxt_tx_queue *parent_txq, *txq;
+ struct bnxt_vf_rep_tx_queue *vfr_txq;
+
+ if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Tx rings %d. %d rings available\n",
+ queue_idx, BNXT_MAX_VF_REP_RINGS);
+ return -EINVAL;
+ }
+
+ if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ parent_txq = parent_bp->tx_queues[queue_idx];
+ if (!parent_txq) {
+ PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
+ return -EINVAL;
+ }
+
+ if (nb_desc != parent_txq->nb_tx_desc) {
+ PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
+ return -EINVAL;
+ }
+
+ if (eth_dev->data->tx_queues) {
+ vfr_txq = eth_dev->data->tx_queues[queue_idx];
+ bnxt_vf_rep_tx_queue_release_op(vfr_txq);
+ vfr_txq = NULL;
+ }
+
+ vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
+ sizeof(struct bnxt_vf_rep_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!vfr_txq) {
+ PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
+ return -ENOMEM;
+ }
+ txq = rte_zmalloc_socket("bnxt_tx_queue",
+ sizeof(struct bnxt_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
+ rte_free(vfr_txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->queue_id = queue_idx;
+ txq->port_id = eth_dev->data->port_id;
+ vfr_txq->txq = txq;
+ vfr_txq->bp = rep_bp;
+ eth_dev->data->tx_queues[queue_idx] = vfr_txq;
+
return 0;
}
-int bnxt_vf_rep_rx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_idx,
- __rte_unused uint16_t nb_desc,
- __rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_rxconf *
- rx_conf,
- __rte_unused struct rte_mempool *mp)
+void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
+{
+ struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+
+ if (!vfr_txq)
+ return;
+
+ rte_free(vfr_txq->txq);
+ rte_free(vfr_txq);
+}
+
+int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
{
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
+ stats->obytes += rep_bp->tx_bytes[i];
+ stats->opackets += rep_bp->tx_pkts[i];
+ stats->ibytes += rep_bp->rx_bytes[i];
+ stats->ipackets += rep_bp->rx_pkts[i];
+ stats->imissed += rep_bp->rx_drop_pkts[i];
+
+ stats->q_ipackets[i] = rep_bp->rx_pkts[i];
+ stats->q_ibytes[i] = rep_bp->rx_bytes[i];
+ stats->q_opackets[i] = rep_bp->tx_pkts[i];
+ stats->q_obytes[i] = rep_bp->tx_bytes[i];
+ stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
+ }
+
return 0;
}
-int bnxt_vf_rep_tx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_idx,
- __rte_unused uint16_t nb_desc,
- __rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *
- tx_conf)
+int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
{
+ struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
+ rep_bp->tx_pkts[i] = 0;
+ rep_bp->tx_bytes[i] = 0;
+ rep_bp->rx_pkts[i] = 0;
+ rep_bp->rx_bytes[i] = 0;
+ rep_bp->rx_drop_pkts[i] = 0;
+ }
return 0;
}