00e44bce549f53f2f0a3a9e1321bd2183f9f59a2
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32         .filter_ctrl = bnxt_filter_ctrl_op
33 };
34
35 uint16_t
36 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
37 {
38         struct rte_mbuf **prod_rx_buf;
39         struct bnxt_rx_ring_info *rep_rxr;
40         struct bnxt_rx_queue *rep_rxq;
41         struct rte_eth_dev *vfr_eth_dev;
42         struct bnxt_vf_representor *vfr_bp;
43         uint16_t mask;
44         uint8_t que;
45
46         vfr_eth_dev = &rte_eth_devices[port_id];
47         vfr_bp = vfr_eth_dev->data->dev_private;
48         /* If rxq_id happens to be > max rep_queue, use rxq0 */
49         que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
50         rep_rxq = vfr_bp->rx_queues[que];
51         rep_rxr = rep_rxq->rx_ring;
52         mask = rep_rxr->rx_ring_struct->ring_mask;
53
54         /* Put this mbuf on the RxQ of the Representor */
55         prod_rx_buf = &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
56         if (!*prod_rx_buf) {
57                 *prod_rx_buf = mbuf;
58                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
59                 vfr_bp->rx_pkts[que]++;
60         } else {
61                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
62                 vfr_bp->rx_drop_pkts[que]++;
63                 rte_pktmbuf_free(mbuf); /* Representor Rx ring full, drop pkt */
64         }
65
66         return 0;
67 }
68
69 static uint16_t
70 bnxt_vf_rep_rx_burst(void *rx_queue,
71                      struct rte_mbuf **rx_pkts,
72                      uint16_t nb_pkts)
73 {
74         struct bnxt_rx_queue *rxq = rx_queue;
75         struct rte_mbuf **cons_rx_buf;
76         struct bnxt_rx_ring_info *rxr;
77         uint16_t nb_rx_pkts = 0;
78         uint16_t mask, i;
79
80         if (!rxq)
81                 return 0;
82
83         rxr = rxq->rx_ring;
84         mask = rxr->rx_ring_struct->ring_mask;
85         for (i = 0; i < nb_pkts; i++) {
86                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
87                 if (*cons_rx_buf == NULL)
88                         return nb_rx_pkts;
89                 rx_pkts[nb_rx_pkts] = *cons_rx_buf;
90                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
91                 *cons_rx_buf = NULL;
92                 nb_rx_pkts++;
93                 rxr->rx_cons++;
94         }
95
96         return nb_rx_pkts;
97 }
98
99 static uint16_t
100 bnxt_vf_rep_tx_burst(void *tx_queue,
101                      struct rte_mbuf **tx_pkts,
102                      __rte_unused uint16_t nb_pkts)
103 {
104         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
105         struct bnxt_tx_queue *ptxq;
106         struct bnxt *parent;
107         struct  bnxt_vf_representor *vf_rep_bp;
108         int qid;
109         int rc;
110         int i;
111
112         if (!vfr_txq)
113                 return 0;
114
115         qid = vfr_txq->txq->queue_id;
116         vf_rep_bp = vfr_txq->bp;
117         parent = vf_rep_bp->parent_dev->data->dev_private;
118         pthread_mutex_lock(&parent->rep_info->vfr_lock);
119         ptxq = parent->tx_queues[qid];
120
121         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
122
123         for (i = 0; i < nb_pkts; i++) {
124                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
125                 vf_rep_bp->tx_pkts[qid]++;
126         }
127
128         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
129         ptxq->vfr_tx_cfa_action = 0;
130         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
131
132         return rc;
133 }
134
135 static int
136 bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_vf_representor *vf_rep_bp)
137 {
138         struct bnxt_rep_info *rep_info;
139         int rc;
140
141         rc = bnxt_hwrm_get_dflt_vnic_svif(bp, vf_rep_bp->fw_fid,
142                                           &vf_rep_bp->dflt_vnic_id,
143                                           &vf_rep_bp->svif);
144         if (rc) {
145                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
146                 vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
147                 vf_rep_bp->svif = BNXT_SVIF_INVALID;
148         } else {
149                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
150                                 vf_rep_bp->dflt_vnic_id);
151         }
152         if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
153             vf_rep_bp->svif != BNXT_SVIF_INVALID) {
154                 rep_info = &bp->rep_info[vf_rep_bp->vf_id];
155                 rep_info->conduit_valid = true;
156         }
157
158         return rc;
159 }
160
161 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
162 {
163         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
164         struct bnxt_vf_representor *rep_params =
165                                  (struct bnxt_vf_representor *)params;
166         struct rte_eth_link *link;
167         struct bnxt *parent_bp;
168
169         vf_rep_bp->vf_id = rep_params->vf_id;
170         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
171         vf_rep_bp->parent_dev = rep_params->parent_dev;
172
173         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
174         eth_dev->data->representor_id = rep_params->vf_id;
175
176         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
177         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
178                sizeof(vf_rep_bp->mac_addr));
179         eth_dev->data->mac_addrs =
180                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
181         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
182
183         /* No data-path, but need stub Rx/Tx functions to avoid crash
184          * when testing with ovs-dpdk
185          */
186         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
187         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
188         /* Link state. Inherited from PF or trusted VF */
189         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
190         link = &parent_bp->eth_dev->data->dev_link;
191
192         eth_dev->data->dev_link.link_speed = link->link_speed;
193         eth_dev->data->dev_link.link_duplex = link->link_duplex;
194         eth_dev->data->dev_link.link_status = link->link_status;
195         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
196
197         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
198         bnxt_print_link_info(eth_dev);
199
200         /* Pass the information to the rte_eth_dev_close() that it should also
201          * release the private port resources.
202          */
203         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
204         PMD_DRV_LOG(INFO,
205                     "Switch domain id %d: Representor Device %d init done\n",
206                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
207
208         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
209         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
210
211         return 0;
212 }
213
214 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
215 {
216         struct bnxt *parent_bp;
217         struct bnxt_vf_representor *rep =
218                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
219
220         uint16_t vf_id;
221
222         eth_dev->data->mac_addrs = NULL;
223         eth_dev->dev_ops = NULL;
224
225         parent_bp = rep->parent_dev->data->dev_private;
226         if (!parent_bp)
227                 return 0;
228
229         parent_bp->num_reps--;
230         vf_id = rep->vf_id;
231         if (parent_bp->rep_info)
232                 memset(&parent_bp->rep_info[vf_id], 0,
233                        sizeof(parent_bp->rep_info[vf_id]));
234                 /* mark that this representor has been freed */
235         return 0;
236 }
237
238 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
239 {
240         struct bnxt *parent_bp;
241         struct bnxt_vf_representor *rep =
242                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
243         struct rte_eth_link *link;
244         int rc;
245
246         parent_bp = rep->parent_dev->data->dev_private;
247         if (!parent_bp)
248                 return 0;
249
250         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
251
252         /* Link state. Inherited from PF or trusted VF */
253         link = &parent_bp->eth_dev->data->dev_link;
254
255         eth_dev->data->dev_link.link_speed = link->link_speed;
256         eth_dev->data->dev_link.link_duplex = link->link_duplex;
257         eth_dev->data->dev_link.link_status = link->link_status;
258         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
259         bnxt_print_link_info(eth_dev);
260
261         return rc;
262 }
263
264 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
265 {
266         int rc;
267         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
268         struct rte_eth_dev *parent_dev = vfr->parent_dev;
269         struct bnxt *parent_bp = parent_dev->data->dev_private;
270
271         if (!parent_bp || !parent_bp->ulp_ctx) {
272                 BNXT_TF_DBG(ERR, "Invalid arguments\n");
273                 return 0;
274         }
275
276         /* Update the ULP portdata base with the new VFR interface */
277         rc = ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
278         if (rc) {
279                 BNXT_TF_DBG(ERR, "Failed to update ulp port details vfr:%u\n",
280                             vfr->vf_id);
281                 return rc;
282         }
283
284         /* Create the default rules for the VFR */
285         rc = bnxt_ulp_create_vfr_default_rules(vfr_ethdev);
286         if (rc) {
287                 BNXT_TF_DBG(ERR, "Failed to create VFR default rules vfr:%u\n",
288                             vfr->vf_id);
289                 return rc;
290         }
291         /* update the port id so you can backtrack to ethdev */
292         vfr->dpdk_port_id = vfr_ethdev->data->port_id;
293         rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
294         if (rc) {
295                 BNXT_TF_DBG(ERR, "Failed in hwrm vfr alloc vfr:%u rc=%d\n",
296                             vfr->vf_id, rc);
297                 (void)bnxt_ulp_delete_vfr_default_rules(vfr);
298         }
299
300         return rc;
301 }
302
303 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
304 {
305         int rc = 0;
306         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
307         struct bnxt *parent_bp;
308
309         if (!vfr || !vfr->parent_dev) {
310                 PMD_DRV_LOG(ERR,
311                                 "No memory allocated for representor\n");
312                 return -ENOMEM;
313         }
314
315         parent_bp = vfr->parent_dev->data->dev_private;
316         if (parent_bp && !parent_bp->ulp_ctx) {
317                 PMD_DRV_LOG(ERR,
318                             "ulp context not allocated for parent\n");
319                 return -EIO;
320         }
321
322         /* Check if representor has been already allocated in FW */
323         if (vfr->vfr_tx_cfa_action)
324                 return 0;
325
326         /*
327          * Alloc VF rep rules in CFA after default VNIC is created.
328          * Otherwise the FW will create the VF-rep rules with
329          * default drop action.
330          */
331         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
332         if (!rc)
333                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
334                             vfr->vf_id);
335         else
336                 PMD_DRV_LOG(ERR,
337                             "Failed to alloc representor %d in FW\n",
338                             vfr->vf_id);
339
340         return rc;
341 }
342
343 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
344 {
345         struct bnxt_rx_queue *rxq;
346         unsigned int i;
347
348         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
349                 rxq = rep_bp->rx_queues[i];
350                 bnxt_rx_queue_release_mbufs(rxq);
351         }
352 }
353
354 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
355 {
356         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
357         struct bnxt_rep_info *rep_info;
358         struct bnxt *parent_bp;
359         int rc;
360
361         parent_bp = rep_bp->parent_dev->data->dev_private;
362         rep_info = &parent_bp->rep_info[rep_bp->vf_id];
363
364         pthread_mutex_lock(&rep_info->vfr_start_lock);
365         if (!rep_info->conduit_valid) {
366                 rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
367                 if (rc || !rep_info->conduit_valid) {
368                         pthread_mutex_unlock(&rep_info->vfr_start_lock);
369                         return rc;
370                 }
371         }
372         pthread_mutex_unlock(&rep_info->vfr_start_lock);
373
374         rc = bnxt_vfr_alloc(eth_dev);
375         if (rc) {
376                 eth_dev->data->dev_link.link_status = 0;
377                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
378                 return rc;
379         }
380         eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
381         eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
382         bnxt_vf_rep_link_update_op(eth_dev, 1);
383
384         return 0;
385 }
386
387 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
388 {
389         return bnxt_ulp_delete_vfr_default_rules(vfr);
390 }
391
392 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
393 {
394         int rc = 0;
395         struct bnxt *parent_bp;
396
397         if (!vfr || !vfr->parent_dev) {
398                 PMD_DRV_LOG(ERR,
399                             "No memory allocated for representor\n");
400                 return -ENOMEM;
401         }
402
403         parent_bp = vfr->parent_dev->data->dev_private;
404         if (!parent_bp)
405                 return 0;
406
407         /* Check if representor has been already freed in FW */
408         if (!vfr->vfr_tx_cfa_action)
409                 return 0;
410
411         rc = bnxt_tf_vfr_free(vfr);
412         if (rc) {
413                 PMD_DRV_LOG(ERR,
414                             "Failed to free representor %d in FW\n",
415                             vfr->vf_id);
416         }
417
418         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
419                     vfr->vf_id);
420         vfr->vfr_tx_cfa_action = 0;
421
422         rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
423
424         return rc;
425 }
426
427 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
428 {
429         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
430
431         /* Avoid crashes as we are about to free queues */
432         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
433         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
434
435         bnxt_vfr_free(vfr_bp);
436
437         if (eth_dev->data->dev_started)
438                 eth_dev->data->dev_link.link_status = 0;
439
440         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
441 }
442
443 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
444 {
445         bnxt_vf_representor_uninit(eth_dev);
446 }
447
448 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
449                                 struct rte_eth_dev_info *dev_info)
450 {
451         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
452         struct bnxt *parent_bp;
453         unsigned int max_rx_rings;
454         int rc = 0;
455
456         /* MAC Specifics */
457         parent_bp = rep_bp->parent_dev->data->dev_private;
458         if (!parent_bp) {
459                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
460                 return rc;
461         }
462         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
463         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
464         dev_info->max_hash_mac_addrs = 0;
465
466         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
467         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
468         dev_info->max_rx_queues = max_rx_rings;
469         dev_info->max_tx_queues = max_rx_rings;
470         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
471         dev_info->hash_key_size = 40;
472
473         /* MTU specifics */
474         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
475         dev_info->max_mtu = BNXT_MAX_MTU;
476
477         /* Fast path specifics */
478         dev_info->min_rx_bufsize = 1;
479         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
480
481         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
482         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
483                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
484         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
485         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
486
487         return 0;
488 }
489
490 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
491 {
492         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
493
494         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
495         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
496         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
497         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
498
499         return 0;
500 }
501
502 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
503                           uint16_t queue_idx,
504                           uint16_t nb_desc,
505                           unsigned int socket_id,
506                           __rte_unused const struct rte_eth_rxconf *rx_conf,
507                           __rte_unused struct rte_mempool *mp)
508 {
509         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
510         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
511         struct bnxt_rx_queue *parent_rxq;
512         struct bnxt_rx_queue *rxq;
513         struct rte_mbuf **buf_ring;
514         int rc = 0;
515
516         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
517                 PMD_DRV_LOG(ERR,
518                             "Cannot create Rx ring %d. %d rings available\n",
519                             queue_idx, BNXT_MAX_VF_REP_RINGS);
520                 return -EINVAL;
521         }
522
523         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
524                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
525                 return -EINVAL;
526         }
527
528         if (!parent_bp->rx_queues) {
529                 PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
530                 return -EINVAL;
531         }
532
533         parent_rxq = parent_bp->rx_queues[queue_idx];
534         if (!parent_rxq) {
535                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
536                 return -EINVAL;
537         }
538
539         if (nb_desc != parent_rxq->nb_rx_desc) {
540                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
541                 return -EINVAL;
542         }
543
544         if (eth_dev->data->rx_queues) {
545                 rxq = eth_dev->data->rx_queues[queue_idx];
546                 if (rxq)
547                         bnxt_rx_queue_release_op(rxq);
548         }
549
550         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
551                                  sizeof(struct bnxt_rx_queue),
552                                  RTE_CACHE_LINE_SIZE, socket_id);
553         if (!rxq) {
554                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
555                 return -ENOMEM;
556         }
557
558         rxq->nb_rx_desc = nb_desc;
559
560         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
561         if (rc)
562                 goto out;
563
564         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
565                                       sizeof(struct rte_mbuf *) *
566                                       rxq->rx_ring->rx_ring_struct->ring_size,
567                                       RTE_CACHE_LINE_SIZE, socket_id);
568         if (!buf_ring) {
569                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
570                 rc = -ENOMEM;
571                 goto out;
572         }
573
574         rxq->rx_ring->rx_buf_ring = buf_ring;
575         rxq->queue_id = queue_idx;
576         rxq->port_id = eth_dev->data->port_id;
577         eth_dev->data->rx_queues[queue_idx] = rxq;
578
579         return 0;
580
581 out:
582         if (rxq)
583                 bnxt_rx_queue_release_op(rxq);
584
585         return rc;
586 }
587
588 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
589 {
590         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
591
592         if (!rxq)
593                 return;
594
595         bnxt_rx_queue_release_mbufs(rxq);
596
597         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
598         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
599         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
600
601         rte_free(rxq);
602 }
603
604 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
605                           uint16_t queue_idx,
606                           uint16_t nb_desc,
607                           unsigned int socket_id,
608                           __rte_unused const struct rte_eth_txconf *tx_conf)
609 {
610         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
611         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
612         struct bnxt_tx_queue *parent_txq, *txq;
613         struct bnxt_vf_rep_tx_queue *vfr_txq;
614
615         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
616                 PMD_DRV_LOG(ERR,
617                             "Cannot create Tx rings %d. %d rings available\n",
618                             queue_idx, BNXT_MAX_VF_REP_RINGS);
619                 return -EINVAL;
620         }
621
622         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
623                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
624                 return -EINVAL;
625         }
626
627         if (!parent_bp->tx_queues) {
628                 PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
629                 return -EINVAL;
630         }
631
632         parent_txq = parent_bp->tx_queues[queue_idx];
633         if (!parent_txq) {
634                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
635                 return -EINVAL;
636         }
637
638         if (nb_desc != parent_txq->nb_tx_desc) {
639                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
640                 return -EINVAL;
641         }
642
643         if (eth_dev->data->tx_queues) {
644                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
645                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
646                 vfr_txq = NULL;
647         }
648
649         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
650                                      sizeof(struct bnxt_vf_rep_tx_queue),
651                                      RTE_CACHE_LINE_SIZE, socket_id);
652         if (!vfr_txq) {
653                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
654                 return -ENOMEM;
655         }
656         txq = rte_zmalloc_socket("bnxt_tx_queue",
657                                  sizeof(struct bnxt_tx_queue),
658                                  RTE_CACHE_LINE_SIZE, socket_id);
659         if (!txq) {
660                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
661                 rte_free(vfr_txq);
662                 return -ENOMEM;
663         }
664
665         txq->nb_tx_desc = nb_desc;
666         txq->queue_id = queue_idx;
667         txq->port_id = eth_dev->data->port_id;
668         vfr_txq->txq = txq;
669         vfr_txq->bp = rep_bp;
670         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
671
672         return 0;
673 }
674
675 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
676 {
677         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
678
679         if (!vfr_txq)
680                 return;
681
682         rte_free(vfr_txq->txq);
683         rte_free(vfr_txq);
684 }
685
686 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
687                              struct rte_eth_stats *stats)
688 {
689         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
690         int i;
691
692         memset(stats, 0, sizeof(*stats));
693         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
694                 stats->obytes += rep_bp->tx_bytes[i];
695                 stats->opackets += rep_bp->tx_pkts[i];
696                 stats->ibytes += rep_bp->rx_bytes[i];
697                 stats->ipackets += rep_bp->rx_pkts[i];
698                 stats->imissed += rep_bp->rx_drop_pkts[i];
699
700                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
701                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
702                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
703                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
704                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
705         }
706
707         return 0;
708 }
709
710 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
711 {
712         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
713         int i;
714
715         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
716                 rep_bp->tx_pkts[i] = 0;
717                 rep_bp->tx_bytes[i] = 0;
718                 rep_bp->rx_pkts[i] = 0;
719                 rep_bp->rx_bytes[i] = 0;
720                 rep_bp->rx_drop_pkts[i] = 0;
721         }
722         return 0;
723 }