2f775e0c069ad8057b66ae1a10e669d44815525e
[dpdk.git] / drivers / net / bnxt / bnxt_reps.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "bnxt_ring.h"
8 #include "bnxt_reps.h"
9 #include "bnxt_rxq.h"
10 #include "bnxt_rxr.h"
11 #include "bnxt_txq.h"
12 #include "bnxt_txr.h"
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18
19 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
20         .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
21         .dev_configure = bnxt_vf_rep_dev_configure_op,
22         .dev_start = bnxt_vf_rep_dev_start_op,
23         .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
24         .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
25         .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
26         .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
27         .link_update = bnxt_vf_rep_link_update_op,
28         .dev_close = bnxt_vf_rep_dev_close_op,
29         .dev_stop = bnxt_vf_rep_dev_stop_op,
30         .stats_get = bnxt_vf_rep_stats_get_op,
31         .stats_reset = bnxt_vf_rep_stats_reset_op,
32         .filter_ctrl = bnxt_filter_ctrl_op
33 };
34
35 uint16_t
36 bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf)
37 {
38         struct bnxt_sw_rx_bd *prod_rx_buf;
39         struct bnxt_rx_ring_info *rep_rxr;
40         struct bnxt_rx_queue *rep_rxq;
41         struct rte_eth_dev *vfr_eth_dev;
42         struct bnxt_vf_representor *vfr_bp;
43         uint16_t mask;
44         uint8_t que;
45
46         vfr_eth_dev = &rte_eth_devices[port_id];
47         if (!vfr_eth_dev)
48                 return 1;
49         vfr_bp = vfr_eth_dev->data->dev_private;
50         /* If rxq_id happens to be > max rep_queue, use rxq0 */
51         que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
52         rep_rxq = vfr_bp->rx_queues[que];
53         rep_rxr = rep_rxq->rx_ring;
54         mask = rep_rxr->rx_ring_struct->ring_mask;
55
56         /* Put this mbuf on the RxQ of the Representor */
57         prod_rx_buf =
58                 &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
59         if (!prod_rx_buf->mbuf) {
60                 prod_rx_buf->mbuf = mbuf;
61                 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
62                 vfr_bp->rx_pkts[que]++;
63         } else {
64                 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
65                 vfr_bp->rx_drop_pkts[que]++;
66                 rte_free(mbuf); /* Representor Rx ring full, drop pkt */
67         }
68
69         return 0;
70 }
71
72 static uint16_t
73 bnxt_vf_rep_rx_burst(void *rx_queue,
74                      struct rte_mbuf **rx_pkts,
75                      uint16_t nb_pkts)
76 {
77         struct bnxt_rx_queue *rxq = rx_queue;
78         struct bnxt_sw_rx_bd *cons_rx_buf;
79         struct bnxt_rx_ring_info *rxr;
80         uint16_t nb_rx_pkts = 0;
81         uint16_t mask, i;
82
83         if (!rxq)
84                 return 0;
85
86         rxr = rxq->rx_ring;
87         mask = rxr->rx_ring_struct->ring_mask;
88         for (i = 0; i < nb_pkts; i++) {
89                 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
90                 if (!cons_rx_buf->mbuf)
91                         return nb_rx_pkts;
92                 rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
93                 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
94                 cons_rx_buf->mbuf = NULL;
95                 nb_rx_pkts++;
96                 rxr->rx_cons++;
97         }
98
99         return nb_rx_pkts;
100 }
101
102 static uint16_t
103 bnxt_vf_rep_tx_burst(void *tx_queue,
104                      struct rte_mbuf **tx_pkts,
105                      __rte_unused uint16_t nb_pkts)
106 {
107         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
108         struct bnxt_tx_queue *ptxq;
109         struct bnxt *parent;
110         struct  bnxt_vf_representor *vf_rep_bp;
111         int qid;
112         int rc;
113         int i;
114
115         if (!vfr_txq)
116                 return 0;
117
118         qid = vfr_txq->txq->queue_id;
119         vf_rep_bp = vfr_txq->bp;
120         parent = vf_rep_bp->parent_dev->data->dev_private;
121         pthread_mutex_lock(&parent->rep_info->vfr_lock);
122         ptxq = parent->tx_queues[qid];
123
124         ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
125
126         for (i = 0; i < nb_pkts; i++) {
127                 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
128                 vf_rep_bp->tx_pkts[qid]++;
129         }
130
131         rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
132         ptxq->vfr_tx_cfa_action = 0;
133         pthread_mutex_unlock(&parent->rep_info->vfr_lock);
134
135         return rc;
136 }
137
138 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
139 {
140         struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
141         struct bnxt_vf_representor *rep_params =
142                                  (struct bnxt_vf_representor *)params;
143         struct rte_eth_link *link;
144         struct bnxt *parent_bp;
145         int rc = 0;
146
147         vf_rep_bp->vf_id = rep_params->vf_id;
148         vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
149         vf_rep_bp->parent_dev = rep_params->parent_dev;
150
151         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
152         eth_dev->data->representor_id = rep_params->vf_id;
153
154         rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
155         memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
156                sizeof(vf_rep_bp->mac_addr));
157         eth_dev->data->mac_addrs =
158                 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
159         eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
160
161         /* No data-path, but need stub Rx/Tx functions to avoid crash
162          * when testing with ovs-dpdk
163          */
164         eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
165         eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
166         /* Link state. Inherited from PF or trusted VF */
167         parent_bp = vf_rep_bp->parent_dev->data->dev_private;
168         link = &parent_bp->eth_dev->data->dev_link;
169
170         eth_dev->data->dev_link.link_speed = link->link_speed;
171         eth_dev->data->dev_link.link_duplex = link->link_duplex;
172         eth_dev->data->dev_link.link_status = link->link_status;
173         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
174
175         vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
176         PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
177         rc = bnxt_hwrm_get_dflt_vnic_svif(parent_bp, vf_rep_bp->fw_fid,
178                                           &vf_rep_bp->dflt_vnic_id,
179                                           &vf_rep_bp->svif);
180         if (rc)
181                 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
182         else
183                 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
184                             vf_rep_bp->dflt_vnic_id);
185
186         PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
187         bnxt_print_link_info(eth_dev);
188
189         /* Pass the information to the rte_eth_dev_close() that it should also
190          * release the private port resources.
191          */
192         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
193         PMD_DRV_LOG(INFO,
194                     "Switch domain id %d: Representor Device %d init done\n",
195                     vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
196
197         return 0;
198 }
199
200 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
201 {
202         struct bnxt *parent_bp;
203         struct bnxt_vf_representor *rep =
204                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
205
206         uint16_t vf_id;
207
208         eth_dev->data->mac_addrs = NULL;
209         eth_dev->dev_ops = NULL;
210
211         parent_bp = rep->parent_dev->data->dev_private;
212         if (!parent_bp)
213                 return 0;
214
215         parent_bp->num_reps--;
216         vf_id = rep->vf_id;
217         if (parent_bp->rep_info)
218                 memset(&parent_bp->rep_info[vf_id], 0,
219                        sizeof(parent_bp->rep_info[vf_id]));
220                 /* mark that this representor has been freed */
221         return 0;
222 }
223
224 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
225 {
226         struct bnxt *parent_bp;
227         struct bnxt_vf_representor *rep =
228                 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
229         struct rte_eth_link *link;
230         int rc;
231
232         parent_bp = rep->parent_dev->data->dev_private;
233         rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
234
235         /* Link state. Inherited from PF or trusted VF */
236         link = &parent_bp->eth_dev->data->dev_link;
237
238         eth_dev->data->dev_link.link_speed = link->link_speed;
239         eth_dev->data->dev_link.link_duplex = link->link_duplex;
240         eth_dev->data->dev_link.link_status = link->link_status;
241         eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
242         bnxt_print_link_info(eth_dev);
243
244         return rc;
245 }
246
247 static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
248 {
249         int rc;
250         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
251         struct rte_eth_dev *parent_dev = vfr->parent_dev;
252         struct bnxt *parent_bp = parent_dev->data->dev_private;
253         uint16_t vfr_port_id = vfr_ethdev->data->port_id;
254         struct ulp_tlv_param param_list[] = {
255                 {
256                         .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
257                         .length = 2,
258                         .value = {(vfr_port_id >> 8) & 0xff, vfr_port_id & 0xff}
259                 },
260                 {
261                         .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
262                         .length = 0,
263                         .value = {0}
264                 }
265         };
266
267         ulp_port_db_dev_port_intf_update(parent_bp->ulp_ctx, vfr_ethdev);
268
269         rc = ulp_default_flow_create(parent_dev, param_list,
270                                      BNXT_ULP_DF_TPL_VFREP_TO_VF,
271                                      &vfr->rep2vf_flow_id);
272         if (rc) {
273                 BNXT_TF_DBG(DEBUG,
274                             "Default flow rule creation for VFR->VF failed!\n");
275                 goto err;
276         }
277
278         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
279         BNXT_TF_DBG(DEBUG, "rep2vf_flow_id = %d\n", vfr->rep2vf_flow_id);
280         rc = ulp_default_flow_db_cfa_action_get(parent_bp->ulp_ctx,
281                                                 vfr->rep2vf_flow_id,
282                                                 &vfr->vfr_tx_cfa_action);
283         if (rc) {
284                 BNXT_TF_DBG(DEBUG,
285                             "Failed to get action_ptr for VFR->VF dflt rule\n");
286                 goto rep2vf_free;
287         }
288         BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
289         rc = ulp_default_flow_create(parent_dev, param_list,
290                                      BNXT_ULP_DF_TPL_VF_TO_VFREP,
291                                      &vfr->vf2rep_flow_id);
292         if (rc) {
293                 BNXT_TF_DBG(DEBUG,
294                             "Default flow rule creation for VF->VFR failed!\n");
295                 goto rep2vf_free;
296         }
297
298         BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
299         BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
300
301         rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
302         if (rc)
303                 goto vf2rep_free;
304
305         return 0;
306
307 vf2rep_free:
308         ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
309 rep2vf_free:
310         ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
311 err:
312         return -EIO;
313 }
314
315 static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
316 {
317         int rc = 0;
318         struct bnxt_vf_representor *vfr = vfr_ethdev->data->dev_private;
319
320         if (!vfr || !vfr->parent_dev) {
321                 PMD_DRV_LOG(ERR,
322                             "No memory allocated for representor\n");
323                 return -ENOMEM;
324         }
325
326         /* Check if representor has been already allocated in FW */
327         if (vfr->vfr_tx_cfa_action && vfr->rx_cfa_code)
328                 return 0;
329
330         /*
331          * Alloc VF rep rules in CFA after default VNIC is created.
332          * Otherwise the FW will create the VF-rep rules with
333          * default drop action.
334          */
335         rc = bnxt_tf_vfr_alloc(vfr_ethdev);
336         if (!rc)
337                 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
338                             vfr->vf_id);
339         else
340                 PMD_DRV_LOG(ERR,
341                             "Failed to alloc representor %d in FW\n",
342                             vfr->vf_id);
343
344         return rc;
345 }
346
347 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
348 {
349         struct bnxt_rx_queue *rxq;
350         unsigned int i;
351
352         for (i = 0; i < rep_bp->rx_nr_rings; i++) {
353                 rxq = rep_bp->rx_queues[i];
354                 bnxt_rx_queue_release_mbufs(rxq);
355         }
356 }
357
358 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
359 {
360         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
361         int rc;
362
363         rc = bnxt_vfr_alloc(eth_dev);
364
365         if (!rc) {
366                 eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
367                 eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
368
369                 bnxt_vf_rep_link_update_op(eth_dev, 1);
370         } else {
371                 eth_dev->data->dev_link.link_status = 0;
372                 bnxt_vf_rep_free_rx_mbufs(rep_bp);
373         }
374
375         return rc;
376 }
377
378 static int bnxt_tf_vfr_free(struct bnxt_vf_representor *vfr)
379 {
380         int rc = 0;
381
382         rc = ulp_default_flow_destroy(vfr->parent_dev,
383                                       vfr->rep2vf_flow_id);
384         if (rc)
385                 PMD_DRV_LOG(ERR,
386                             "default flow destroy failed rep2vf flowid: %d\n",
387                             vfr->rep2vf_flow_id);
388         rc = ulp_default_flow_destroy(vfr->parent_dev,
389                                       vfr->vf2rep_flow_id);
390         if (rc)
391                 PMD_DRV_LOG(ERR,
392                             "default flow destroy failed vf2rep flowid: %d\n",
393                             vfr->vf2rep_flow_id);
394         return 0;
395 }
396
397 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
398 {
399         int rc = 0;
400         struct bnxt *parent_bp;
401
402         if (!vfr || !vfr->parent_dev) {
403                 PMD_DRV_LOG(ERR,
404                             "No memory allocated for representor\n");
405                 return -ENOMEM;
406         }
407
408         parent_bp = vfr->parent_dev->data->dev_private;
409
410         /* Check if representor has been already freed in FW */
411         if (!vfr->vfr_tx_cfa_action && !vfr->rx_cfa_code)
412                 return 0;
413
414         rc = bnxt_tf_vfr_free(vfr);
415         if (rc) {
416                 PMD_DRV_LOG(ERR,
417                             "Failed to free representor %d in FW\n",
418                             vfr->vf_id);
419                 return rc;
420         }
421
422         parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
423         PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
424                     vfr->vf_id);
425         vfr->vfr_tx_cfa_action = 0;
426         vfr->rx_cfa_code = 0;
427
428         rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
429
430         return rc;
431 }
432
433 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
434 {
435         struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
436
437         /* Avoid crashes as we are about to free queues */
438         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
439         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
440
441         bnxt_vfr_free(vfr_bp);
442
443         if (eth_dev->data->dev_started)
444                 eth_dev->data->dev_link.link_status = 0;
445
446         bnxt_vf_rep_free_rx_mbufs(vfr_bp);
447 }
448
449 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
450 {
451         bnxt_vf_representor_uninit(eth_dev);
452 }
453
454 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
455                                 struct rte_eth_dev_info *dev_info)
456 {
457         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
458         struct bnxt *parent_bp;
459         uint16_t max_vnics, i, j, vpool, vrxq;
460         unsigned int max_rx_rings;
461         int rc = 0;
462
463         /* MAC Specifics */
464         parent_bp = rep_bp->parent_dev->data->dev_private;
465         if (!parent_bp) {
466                 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
467                 return rc;
468         }
469         PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
470         dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
471         dev_info->max_hash_mac_addrs = 0;
472
473         max_rx_rings = BNXT_MAX_VF_REP_RINGS;
474         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
475         dev_info->max_rx_queues = max_rx_rings;
476         dev_info->max_tx_queues = max_rx_rings;
477         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
478         dev_info->hash_key_size = 40;
479         max_vnics = parent_bp->max_vnics;
480
481         /* MTU specifics */
482         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
483         dev_info->max_mtu = BNXT_MAX_MTU;
484
485         /* Fast path specifics */
486         dev_info->min_rx_bufsize = 1;
487         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
488
489         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
490         if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
491                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
492         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
493         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
494
495         /* *INDENT-OFF* */
496         dev_info->default_rxconf = (struct rte_eth_rxconf) {
497                 .rx_thresh = {
498                         .pthresh = 8,
499                         .hthresh = 8,
500                         .wthresh = 0,
501                 },
502                 .rx_free_thresh = 32,
503                 /* If no descriptors available, pkts are dropped by default */
504                 .rx_drop_en = 1,
505         };
506
507         dev_info->default_txconf = (struct rte_eth_txconf) {
508                 .tx_thresh = {
509                         .pthresh = 32,
510                         .hthresh = 0,
511                         .wthresh = 0,
512                 },
513                 .tx_free_thresh = 32,
514                 .tx_rs_thresh = 32,
515         };
516         eth_dev->data->dev_conf.intr_conf.lsc = 1;
517
518         eth_dev->data->dev_conf.intr_conf.rxq = 1;
519         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
520         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
521         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
522         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
523
524         /* *INDENT-ON* */
525
526         /*
527          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
528          *       need further investigation.
529          */
530
531         /* VMDq resources */
532         vpool = 64; /* ETH_64_POOLS */
533         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
534         for (i = 0; i < 4; vpool >>= 1, i++) {
535                 if (max_vnics > vpool) {
536                         for (j = 0; j < 5; vrxq >>= 1, j++) {
537                                 if (dev_info->max_rx_queues > vrxq) {
538                                         if (vpool > vrxq)
539                                                 vpool = vrxq;
540                                         goto found;
541                                 }
542                         }
543                         /* Not enough resources to support VMDq */
544                         break;
545                 }
546         }
547         /* Not enough resources to support VMDq */
548         vpool = 0;
549         vrxq = 0;
550 found:
551         dev_info->max_vmdq_pools = vpool;
552         dev_info->vmdq_queue_num = vrxq;
553
554         dev_info->vmdq_pool_base = 0;
555         dev_info->vmdq_queue_base = 0;
556
557         return 0;
558 }
559
560 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
561 {
562         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
563
564         PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
565         rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
566         rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
567         rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
568
569         return 0;
570 }
571
572 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
573                           uint16_t queue_idx,
574                           uint16_t nb_desc,
575                           unsigned int socket_id,
576                           __rte_unused const struct rte_eth_rxconf *rx_conf,
577                           __rte_unused struct rte_mempool *mp)
578 {
579         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
580         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
581         struct bnxt_rx_queue *parent_rxq;
582         struct bnxt_rx_queue *rxq;
583         struct bnxt_sw_rx_bd *buf_ring;
584         int rc = 0;
585
586         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
587                 PMD_DRV_LOG(ERR,
588                             "Cannot create Rx ring %d. %d rings available\n",
589                             queue_idx, BNXT_MAX_VF_REP_RINGS);
590                 return -EINVAL;
591         }
592
593         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
594                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
595                 return -EINVAL;
596         }
597
598         parent_rxq = parent_bp->rx_queues[queue_idx];
599         if (!parent_rxq) {
600                 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
601                 return -EINVAL;
602         }
603
604         if (nb_desc != parent_rxq->nb_rx_desc) {
605                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
606                 return -EINVAL;
607         }
608
609         if (eth_dev->data->rx_queues) {
610                 rxq = eth_dev->data->rx_queues[queue_idx];
611                 if (rxq)
612                         bnxt_rx_queue_release_op(rxq);
613         }
614
615         rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
616                                  sizeof(struct bnxt_rx_queue),
617                                  RTE_CACHE_LINE_SIZE, socket_id);
618         if (!rxq) {
619                 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
620                 return -ENOMEM;
621         }
622
623         rxq->nb_rx_desc = nb_desc;
624
625         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
626         if (rc)
627                 goto out;
628
629         buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
630                                       sizeof(struct bnxt_sw_rx_bd) *
631                                       rxq->rx_ring->rx_ring_struct->ring_size,
632                                       RTE_CACHE_LINE_SIZE, socket_id);
633         if (!buf_ring) {
634                 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
635                 rc = -ENOMEM;
636                 goto out;
637         }
638
639         rxq->rx_ring->rx_buf_ring = buf_ring;
640         rxq->queue_id = queue_idx;
641         rxq->port_id = eth_dev->data->port_id;
642         eth_dev->data->rx_queues[queue_idx] = rxq;
643
644         return 0;
645
646 out:
647         if (rxq)
648                 bnxt_rx_queue_release_op(rxq);
649
650         return rc;
651 }
652
653 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
654 {
655         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
656
657         if (!rxq)
658                 return;
659
660         bnxt_rx_queue_release_mbufs(rxq);
661
662         bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
663         bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
664         bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
665
666         rte_free(rxq);
667 }
668
669 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
670                           uint16_t queue_idx,
671                           uint16_t nb_desc,
672                           unsigned int socket_id,
673                           __rte_unused const struct rte_eth_txconf *tx_conf)
674 {
675         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
676         struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
677         struct bnxt_tx_queue *parent_txq, *txq;
678         struct bnxt_vf_rep_tx_queue *vfr_txq;
679
680         if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
681                 PMD_DRV_LOG(ERR,
682                             "Cannot create Tx rings %d. %d rings available\n",
683                             queue_idx, BNXT_MAX_VF_REP_RINGS);
684                 return -EINVAL;
685         }
686
687         if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
688                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
689                 return -EINVAL;
690         }
691
692         parent_txq = parent_bp->tx_queues[queue_idx];
693         if (!parent_txq) {
694                 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
695                 return -EINVAL;
696         }
697
698         if (nb_desc != parent_txq->nb_tx_desc) {
699                 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
700                 return -EINVAL;
701         }
702
703         if (eth_dev->data->tx_queues) {
704                 vfr_txq = eth_dev->data->tx_queues[queue_idx];
705                 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
706                 vfr_txq = NULL;
707         }
708
709         vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
710                                      sizeof(struct bnxt_vf_rep_tx_queue),
711                                      RTE_CACHE_LINE_SIZE, socket_id);
712         if (!vfr_txq) {
713                 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
714                 return -ENOMEM;
715         }
716         txq = rte_zmalloc_socket("bnxt_tx_queue",
717                                  sizeof(struct bnxt_tx_queue),
718                                  RTE_CACHE_LINE_SIZE, socket_id);
719         if (!txq) {
720                 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
721                 rte_free(vfr_txq);
722                 return -ENOMEM;
723         }
724
725         txq->nb_tx_desc = nb_desc;
726         txq->queue_id = queue_idx;
727         txq->port_id = eth_dev->data->port_id;
728         vfr_txq->txq = txq;
729         vfr_txq->bp = rep_bp;
730         eth_dev->data->tx_queues[queue_idx] = vfr_txq;
731
732         return 0;
733 }
734
735 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
736 {
737         struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
738
739         if (!vfr_txq)
740                 return;
741
742         rte_free(vfr_txq->txq);
743         rte_free(vfr_txq);
744 }
745
746 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
747                              struct rte_eth_stats *stats)
748 {
749         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
750         int i;
751
752         memset(stats, 0, sizeof(*stats));
753         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
754                 stats->obytes += rep_bp->tx_bytes[i];
755                 stats->opackets += rep_bp->tx_pkts[i];
756                 stats->ibytes += rep_bp->rx_bytes[i];
757                 stats->ipackets += rep_bp->rx_pkts[i];
758                 stats->imissed += rep_bp->rx_drop_pkts[i];
759
760                 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
761                 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
762                 stats->q_opackets[i] = rep_bp->tx_pkts[i];
763                 stats->q_obytes[i] = rep_bp->tx_bytes[i];
764                 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
765         }
766
767         return 0;
768 }
769
770 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
771 {
772         struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
773         int i;
774
775         for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
776                 rep_bp->tx_pkts[i] = 0;
777                 rep_bp->tx_bytes[i] = 0;
778                 rep_bp->rx_pkts[i] = 0;
779                 rep_bp->rx_bytes[i] = 0;
780                 rep_bp->rx_drop_pkts[i] = 0;
781         }
782         return 0;
783 }