6695896dd252b8e883bc71a057a0de819f1049a6
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32         .filter_ctrl = bnxt_filter_ctrl_op
33 };
34
35 uint16_t
36 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
37 {
38         struct bnxt_sw_rx_bd *prod_rx_buf;
39         struct bnxt_rx_ring_info *rep_rxr;
40         struct bnxt_rx_queue *rep_rxq;
41         struct rte_eth_dev *vfr_eth_dev;
42         struct bnxt_vf_representor *vfr_bp;
43         uint16_t mask;
44         uint8_t que;
45
46         vfr_eth_dev = &rte_eth_devices[port_id];
47         vfr_bp = vfr_eth_dev->data->dev_private;
48         /* If rxq_id happens to be > max rep_queue, use rxq0 */
49         que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
50         rep_rxq = vfr_bp->rx_queues[que];
51         rep_rxr = rep_rxq->rx_ring;
52         mask = rep_rxr->rx_ring_struct->ring_mask;
53
54         /* Put this mbuf on the RxQ of the Representor */
55         prod_rx_buf =
56                 &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
57         if (!prod_rx_buf->mbuf) {
58                 prod_rx_buf->mbuf = mbuf;
59                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
60                 vfr_bp->rx_pkts[que]++;
61         } else {
62                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
63                 vfr_bp->rx_drop_pkts[que]++;
64                 rte_free(mbuf); /* Representor Rx ring full, drop pkt */
65         }
66
67         return 0;
68 }
69
70 static uint16_t
71 bnxt_vf_rep_rx_burst(void *rx_queue,
72                      struct rte_mbuf **rx_pkts,
73                      uint16_t nb_pkts)
74 {
75         struct bnxt_rx_queue *rxq = rx_queue;
76         struct bnxt_sw_rx_bd *cons_rx_buf;
77         struct bnxt_rx_ring_info *rxr;
78         uint16_t nb_rx_pkts = 0;
79         uint16_t mask, i;
80
81         if (!rxq)
82                 return 0;
83
84         rxr = rxq->rx_ring;
85         mask = rxr->rx_ring_struct->ring_mask;
86         for (i = 0; i < nb_pkts; i++) {
87                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
88                 if (!cons_rx_buf->mbuf)
89                         return nb_rx_pkts;
90                 rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
91                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
92                 cons_rx_buf->mbuf = NULL;
93                 nb_rx_pkts++;
94                 rxr->rx_cons++;
95         }
96
97         return nb_rx_pkts;
98 }
99
100 static uint16_t
101 bnxt_vf_rep_tx_burst(void *tx_queue,
102                      struct rte_mbuf **tx_pkts,
103                      __rte_unused uint16_t nb_pkts)
104 {
105         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
106         struct bnxt_tx_queue *ptxq;
107         struct bnxt *parent;
108         struct  bnxt_vf_representor *vf_rep_bp;
109         int qid;
110         int rc;
111         int i;
112
113         if (!vfr_txq)
114                 return 0;
115
116         qid = vfr_txq->txq->queue_id;
117         vf_rep_bp = vfr_txq->bp;
118         parent = vf_rep_bp->parent_dev->data->dev_private;
119         pthread_mutex_lock(&parent->rep_info->vfr_lock);
120         ptxq = parent->tx_queues[qid];
121
122         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
123
124         for (i = 0; i < nb_pkts; i++) {
125                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
126                 vf_rep_bp->tx_pkts[qid]++;
127         }
128
129         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
130         ptxq->vfr_tx_cfa_action = 0;
131         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
132
133         return rc;
134 }
135
136 static int
137 bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_vf_representor *vf_rep_bp)
138 {
139         struct bnxt_rep_info *rep_info;
140         int rc;
141
142         rc = bnxt_hwrm_get_dflt_vnic_svif(bp, vf_rep_bp->fw_fid,
143                                           &vf_rep_bp->dflt_vnic_id,
144                                           &vf_rep_bp->svif);
145         if (rc) {
146                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
147                 vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
148                 vf_rep_bp->svif = BNXT_SVIF_INVALID;
149         } else {
150                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
151                                 vf_rep_bp->dflt_vnic_id);
152         }
153         if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
154             vf_rep_bp->svif != BNXT_SVIF_INVALID) {
155                 rep_info = &bp->rep_info[vf_rep_bp->vf_id];
156                 rep_info->conduit_valid = true;
157         }
158
159         return rc;
160 }
161
162 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
163 {
164         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
165         struct bnxt_vf_representor *rep_params =
166                                  (struct bnxt_vf_representor *)params;
167         struct rte_eth_link *link;
168         struct bnxt *parent_bp;
169
170         vf_rep_bp->vf_id = rep_params->vf_id;
171         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
172         vf_rep_bp->parent_dev = rep_params->parent_dev;
173
174         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
175         eth_dev->data->representor_id = rep_params->vf_id;
176
177         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
178         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
179                sizeof(vf_rep_bp->mac_addr));
180         eth_dev->data->mac_addrs =
181                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
182         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
183
184         /* No data-path, but need stub Rx/Tx functions to avoid crash
185          * when testing with ovs-dpdk
186          */
187         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
188         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
189         /* Link state. Inherited from PF or trusted VF */
190         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
191         link = &parent_bp->eth_dev->data->dev_link;
192
193         eth_dev->data->dev_link.link_speed = link->link_speed;
194         eth_dev->data->dev_link.link_duplex = link->link_duplex;
195         eth_dev->data->dev_link.link_status = link->link_status;
196         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
197
198         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
199         bnxt_print_link_info(eth_dev);
200
201         /* Pass the information to the rte_eth_dev_close() that it should also
202          * release the private port resources.
203          */
204         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
205         PMD_DRV_LOG(INFO,
206                     "Switch domain id %d: Representor Device %d init done\n",
207                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
208
209         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
210         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
211
212         return 0;
213 }
214
215 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
216 {
217         struct bnxt *parent_bp;
218         struct bnxt_vf_representor *rep =
219                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
220
221         uint16_t vf_id;
222
223         eth_dev->data->mac_addrs = NULL;
224         eth_dev->dev_ops = NULL;
225
226         parent_bp = rep->parent_dev->data->dev_private;
227         if (!parent_bp)
228                 return 0;
229
230         parent_bp->num_reps--;
231         vf_id = rep->vf_id;
232         if (parent_bp->rep_info)
233                 memset(&parent_bp->rep_info[vf_id], 0,
234                        sizeof(parent_bp->rep_info[vf_id]));
235                 /* mark that this representor has been freed */
236         return 0;
237 }
238
239 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
240 {
241         struct bnxt *parent_bp;
242         struct bnxt_vf_representor *rep =
243                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
244         struct rte_eth_link *link;
245         int rc;
246
247         parent_bp = rep->parent_dev->data->dev_private;
248         if (!parent_bp)
249                 return 0;
250
251         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
252
253         /* Link state. Inherited from PF or trusted VF */
254         link = &parent_bp->eth_dev->data->dev_link;
255
256         eth_dev->data->dev_link.link_speed = link->link_speed;
257         eth_dev->data->dev_link.link_duplex = link->link_duplex;
258         eth_dev->data->dev_link.link_status = link->link_status;
259         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
260         bnxt_print_link_info(eth_dev);
261
262         return rc;
263 }
264
265 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
266 {
267         int rc;
268         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
269         struct rte_eth_dev *parent_dev = vfr->parent_dev;
270         struct bnxt *parent_bp = parent_dev->data->dev_private;
271         uint16_t vfr_port_id = vfr_ethdev->data->port_id;
272         struct ulp_tlv_param param_list[] = {
273                 {
274                         .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
275                         .length = 2,
276                         .value = {(vfr_port_id >> 8) & 0xff, vfr_port_id & 0xff}
277                 },
278                 {
279                         .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
280                         .length = 0,
281                         .value = {0}
282                 }
283         };
284
285         ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
286
287         rc = ulp_default_flow_create(parent_dev, param_list,
288                                      BNXT_ULP_DF_TPL_VFREP_TO_VF,
289                                      &vfr->rep2vf_flow_id);
290         if (rc) {
291                 BNXT_TF_DBG(DEBUG,
292                             "Default flow rule creation for VFR->VF failed!\n");
293                 goto err;
294         }
295
296         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
297         BNXT_TF_DBG(DEBUG, "rep2vf_flow_id = %d\n", vfr->rep2vf_flow_id);
298         rc = ulp_default_flow_db_cfa_action_get(parent_bp->ulp_ctx,
299                                                 vfr->rep2vf_flow_id,
300                                                 &vfr->vfr_tx_cfa_action);
301         if (rc) {
302                 BNXT_TF_DBG(DEBUG,
303                             "Failed to get action_ptr for VFR->VF dflt rule\n");
304                 goto rep2vf_free;
305         }
306         BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
307         rc = ulp_default_flow_create(parent_dev, param_list,
308                                      BNXT_ULP_DF_TPL_VF_TO_VFREP,
309                                      &vfr->vf2rep_flow_id);
310         if (rc) {
311                 BNXT_TF_DBG(DEBUG,
312                             "Default flow rule creation for VF->VFR failed!\n");
313                 goto rep2vf_free;
314         }
315
316         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
317         BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
318
319         rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
320         if (rc)
321                 goto vf2rep_free;
322
323         return 0;
324
325 vf2rep_free:
326         ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
327 rep2vf_free:
328         ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
329 err:
330         return -EIO;
331 }
332
333 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
334 {
335         int rc = 0;
336         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
337         struct bnxt *parent_bp;
338
339         if (!vfr || !vfr->parent_dev) {
340                 PMD_DRV_LOG(ERR,
341                             "No memory allocated for representor\n");
342                 return -ENOMEM;
343         }
344
345         parent_bp = vfr->parent_dev->data->dev_private;
346         if (parent_bp && !parent_bp->ulp_ctx) {
347                 PMD_DRV_LOG(ERR,
348                             "ulp context not allocated for parent\n");
349                 return -EIO;
350         }
351
352         /* Check if representor has been already allocated in FW */
353         if (vfr->vfr_tx_cfa_action)
354                 return 0;
355
356         /*
357          * Alloc VF rep rules in CFA after default VNIC is created.
358          * Otherwise the FW will create the VF-rep rules with
359          * default drop action.
360          */
361         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
362         if (!rc)
363                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
364                             vfr->vf_id);
365         else
366                 PMD_DRV_LOG(ERR,
367                             "Failed to alloc representor %d in FW\n",
368                             vfr->vf_id);
369
370         return rc;
371 }
372
373 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
374 {
375         struct bnxt_rx_queue *rxq;
376         unsigned int i;
377
378         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
379                 rxq = rep_bp->rx_queues[i];
380                 bnxt_rx_queue_release_mbufs(rxq);
381         }
382 }
383
384 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
385 {
386         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
387         struct bnxt_rep_info *rep_info;
388         struct bnxt *parent_bp;
389         int rc;
390
391         parent_bp = rep_bp->parent_dev->data->dev_private;
392         rep_info = &parent_bp->rep_info[rep_bp->vf_id];
393
394         pthread_mutex_lock(&rep_info->vfr_start_lock);
395         if (rep_info->conduit_valid) {
396                 pthread_mutex_unlock(&rep_info->vfr_start_lock);
397                 return 0;
398         }
399         rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
400         if (rc || !rep_info->conduit_valid) {
401                 pthread_mutex_unlock(&rep_info->vfr_start_lock);
402                 return rc;
403         }
404         pthread_mutex_unlock(&rep_info->vfr_start_lock);
405
406         rc = bnxt_vfr_alloc(eth_dev);
407         if (rc) {
408                 eth_dev->data->dev_link.link_status = 0;
409                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
410                 return rc;
411         }
412         eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
413         eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
414         bnxt_vf_rep_link_update_op(eth_dev, 1);
415
416         return 0;
417 }
418
419 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
420 {
421         int rc = 0;
422
423         rc = ulp_default_flow_destroy(vfr->parent_dev,
424                                       vfr->rep2vf_flow_id);
425         if (rc)
426                 PMD_DRV_LOG(ERR,
427                             "default flow destroy failed rep2vf flowid: %d\n",
428                             vfr->rep2vf_flow_id);
429         rc = ulp_default_flow_destroy(vfr->parent_dev,
430                                       vfr->vf2rep_flow_id);
431         if (rc)
432                 PMD_DRV_LOG(ERR,
433                             "default flow destroy failed vf2rep flowid: %d\n",
434                             vfr->vf2rep_flow_id);
435         return 0;
436 }
437
438 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
439 {
440         int rc = 0;
441         struct bnxt *parent_bp;
442
443         if (!vfr || !vfr->parent_dev) {
444                 PMD_DRV_LOG(ERR,
445                             "No memory allocated for representor\n");
446                 return -ENOMEM;
447         }
448
449         parent_bp = vfr->parent_dev->data->dev_private;
450         if (!parent_bp)
451                 return 0;
452
453         /* Check if representor has been already freed in FW */
454         if (!vfr->vfr_tx_cfa_action)
455                 return 0;
456
457         rc = bnxt_tf_vfr_free(vfr);
458         if (rc) {
459                 PMD_DRV_LOG(ERR,
460                             "Failed to free representor %d in FW\n",
461                             vfr->vf_id);
462                 return rc;
463         }
464
465         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
466                     vfr->vf_id);
467         vfr->vfr_tx_cfa_action = 0;
468
469         rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
470
471         return rc;
472 }
473
474 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
475 {
476         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
477
478         /* Avoid crashes as we are about to free queues */
479         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
480         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
481
482         bnxt_vfr_free(vfr_bp);
483
484         if (eth_dev->data->dev_started)
485                 eth_dev->data->dev_link.link_status = 0;
486
487         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
488 }
489
490 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
491 {
492         bnxt_vf_representor_uninit(eth_dev);
493 }
494
495 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
496                                 struct rte_eth_dev_info *dev_info)
497 {
498         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
499         struct bnxt *parent_bp;
500         unsigned int max_rx_rings;
501         int rc = 0;
502
503         /* MAC Specifics */
504         parent_bp = rep_bp->parent_dev->data->dev_private;
505         if (!parent_bp) {
506                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
507                 return rc;
508         }
509         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
510         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
511         dev_info->max_hash_mac_addrs = 0;
512
513         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
514         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
515         dev_info->max_rx_queues = max_rx_rings;
516         dev_info->max_tx_queues = max_rx_rings;
517         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
518         dev_info->hash_key_size = 40;
519
520         /* MTU specifics */
521         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
522         dev_info->max_mtu = BNXT_MAX_MTU;
523
524         /* Fast path specifics */
525         dev_info->min_rx_bufsize = 1;
526         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
527
528         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
529         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
530                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
531         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
532         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
533
534         return 0;
535 }
536
537 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
538 {
539         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
540
541         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
542         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
543         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
544         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
545
546         return 0;
547 }
548
549 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
550                           uint16_t queue_idx,
551                           uint16_t nb_desc,
552                           unsigned int socket_id,
553                           __rte_unused const struct rte_eth_rxconf *rx_conf,
554                           __rte_unused struct rte_mempool *mp)
555 {
556         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
557         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
558         struct bnxt_rx_queue *parent_rxq;
559         struct bnxt_rx_queue *rxq;
560         struct bnxt_sw_rx_bd *buf_ring;
561         int rc = 0;
562
563         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
564                 PMD_DRV_LOG(ERR,
565                             "Cannot create Rx ring %d. %d rings available\n",
566                             queue_idx, BNXT_MAX_VF_REP_RINGS);
567                 return -EINVAL;
568         }
569
570         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
571                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
572                 return -EINVAL;
573         }
574
575         if (!parent_bp->rx_queues) {
576                 PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
577                 return -EINVAL;
578         }
579
580         parent_rxq = parent_bp->rx_queues[queue_idx];
581         if (!parent_rxq) {
582                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
583                 return -EINVAL;
584         }
585
586         if (nb_desc != parent_rxq->nb_rx_desc) {
587                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
588                 return -EINVAL;
589         }
590
591         if (eth_dev->data->rx_queues) {
592                 rxq = eth_dev->data->rx_queues[queue_idx];
593                 if (rxq)
594                         bnxt_rx_queue_release_op(rxq);
595         }
596
597         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
598                                  sizeof(struct bnxt_rx_queue),
599                                  RTE_CACHE_LINE_SIZE, socket_id);
600         if (!rxq) {
601                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
602                 return -ENOMEM;
603         }
604
605         rxq->nb_rx_desc = nb_desc;
606
607         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
608         if (rc)
609                 goto out;
610
611         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
612                                       sizeof(struct bnxt_sw_rx_bd) *
613                                       rxq->rx_ring->rx_ring_struct->ring_size,
614                                       RTE_CACHE_LINE_SIZE, socket_id);
615         if (!buf_ring) {
616                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
617                 rc = -ENOMEM;
618                 goto out;
619         }
620
621         rxq->rx_ring->rx_buf_ring = buf_ring;
622         rxq->queue_id = queue_idx;
623         rxq->port_id = eth_dev->data->port_id;
624         eth_dev->data->rx_queues[queue_idx] = rxq;
625
626         return 0;
627
628 out:
629         if (rxq)
630                 bnxt_rx_queue_release_op(rxq);
631
632         return rc;
633 }
634
635 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
636 {
637         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
638
639         if (!rxq)
640                 return;
641
642         bnxt_rx_queue_release_mbufs(rxq);
643
644         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
645         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
646         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
647
648         rte_free(rxq);
649 }
650
651 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
652                           uint16_t queue_idx,
653                           uint16_t nb_desc,
654                           unsigned int socket_id,
655                           __rte_unused const struct rte_eth_txconf *tx_conf)
656 {
657         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
658         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
659         struct bnxt_tx_queue *parent_txq, *txq;
660         struct bnxt_vf_rep_tx_queue *vfr_txq;
661
662         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
663                 PMD_DRV_LOG(ERR,
664                             "Cannot create Tx rings %d. %d rings available\n",
665                             queue_idx, BNXT_MAX_VF_REP_RINGS);
666                 return -EINVAL;
667         }
668
669         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
670                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
671                 return -EINVAL;
672         }
673
674         if (!parent_bp->tx_queues) {
675                 PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
676                 return -EINVAL;
677         }
678
679         parent_txq = parent_bp->tx_queues[queue_idx];
680         if (!parent_txq) {
681                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
682                 return -EINVAL;
683         }
684
685         if (nb_desc != parent_txq->nb_tx_desc) {
686                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
687                 return -EINVAL;
688         }
689
690         if (eth_dev->data->tx_queues) {
691                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
692                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
693                 vfr_txq = NULL;
694         }
695
696         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
697                                      sizeof(struct bnxt_vf_rep_tx_queue),
698                                      RTE_CACHE_LINE_SIZE, socket_id);
699         if (!vfr_txq) {
700                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
701                 return -ENOMEM;
702         }
703         txq = rte_zmalloc_socket("bnxt_tx_queue",
704                                  sizeof(struct bnxt_tx_queue),
705                                  RTE_CACHE_LINE_SIZE, socket_id);
706         if (!txq) {
707                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
708                 rte_free(vfr_txq);
709                 return -ENOMEM;
710         }
711
712         txq->nb_tx_desc = nb_desc;
713         txq->queue_id = queue_idx;
714         txq->port_id = eth_dev->data->port_id;
715         vfr_txq->txq = txq;
716         vfr_txq->bp = rep_bp;
717         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
718
719         return 0;
720 }
721
722 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
723 {
724         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
725
726         if (!vfr_txq)
727                 return;
728
729         rte_free(vfr_txq->txq);
730         rte_free(vfr_txq);
731 }
732
733 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
734                              struct rte_eth_stats *stats)
735 {
736         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
737         int i;
738
739         memset(stats, 0, sizeof(*stats));
740         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
741                 stats->obytes += rep_bp->tx_bytes[i];
742                 stats->opackets += rep_bp->tx_pkts[i];
743                 stats->ibytes += rep_bp->rx_bytes[i];
744                 stats->ipackets += rep_bp->rx_pkts[i];
745                 stats->imissed += rep_bp->rx_drop_pkts[i];
746
747                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
748                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
749                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
750                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
751                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
752         }
753
754         return 0;
755 }
756
757 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
758 {
759         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
760         int i;
761
762         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
763                 rep_bp->tx_pkts[i] = 0;
764                 rep_bp->tx_bytes[i] = 0;
765                 rep_bp->rx_pkts[i] = 0;
766                 rep_bp->rx_bytes[i] = 0;
767                 rep_bp->rx_drop_pkts[i] = 0;
768         }
769         return 0;
770 }