b199abb783961e4c1f7c1f401708dfc904c9281c
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16 #include <rte_vect.h>
17
18 #include "bnxt.h"
19 #include "bnxt_filter.h"
20 #include "bnxt_hwrm.h"
21 #include "bnxt_irq.h"
22 #include "bnxt_reps.h"
23 #include "bnxt_ring.h"
24 #include "bnxt_rxq.h"
25 #include "bnxt_rxr.h"
26 #include "bnxt_stats.h"
27 #include "bnxt_txq.h"
28 #include "bnxt_txr.h"
29 #include "bnxt_vnic.h"
30 #include "hsi_struct_def_dpdk.h"
31 #include "bnxt_nvm_defs.h"
32 #include "bnxt_tf_common.h"
33 #include "ulp_flow_db.h"
34 #include "rte_pmd_bnxt.h"
35
36 #define DRV_MODULE_NAME         "bnxt"
37 static const char bnxt_version[] =
38         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
39
40 /*
41  * The set of PCI devices this driver supports
42  */
43 static const struct rte_pci_id bnxt_pci_id_map[] = {
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
45                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
47                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
94         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
95         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
96         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
97         { .vendor_id = 0, /* sentinel */ },
98 };
99
100 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
101 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
102 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
103 #define BNXT_DEVARG_REPRESENTOR "representor"
104 #define BNXT_DEVARG_REP_BASED_PF  "rep-based-pf"
105 #define BNXT_DEVARG_REP_IS_PF  "rep-is-pf"
106 #define BNXT_DEVARG_REP_Q_R2F  "rep-q-r2f"
107 #define BNXT_DEVARG_REP_Q_F2R  "rep-q-f2r"
108 #define BNXT_DEVARG_REP_FC_R2F  "rep-fc-r2f"
109 #define BNXT_DEVARG_REP_FC_F2R  "rep-fc-f2r"
110
111 static const char *const bnxt_dev_args[] = {
112         BNXT_DEVARG_REPRESENTOR,
113         BNXT_DEVARG_TRUFLOW,
114         BNXT_DEVARG_FLOW_XSTAT,
115         BNXT_DEVARG_MAX_NUM_KFLOWS,
116         BNXT_DEVARG_REP_BASED_PF,
117         BNXT_DEVARG_REP_IS_PF,
118         BNXT_DEVARG_REP_Q_R2F,
119         BNXT_DEVARG_REP_Q_F2R,
120         BNXT_DEVARG_REP_FC_R2F,
121         BNXT_DEVARG_REP_FC_F2R,
122         NULL
123 };
124
125 /*
126  * truflow == false to disable the feature
127  * truflow == true to enable the feature
128  */
129 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
130
131 /*
132  * flow_xstat == false to disable the feature
133  * flow_xstat == true to enable the feature
134  */
135 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
136
137 /*
138  * rep_is_pf == false to indicate VF representor
139  * rep_is_pf == true to indicate PF representor
140  */
141 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)        ((rep_is_pf) > 1)
142
143 /*
144  * rep_based_pf == Physical index of the PF
145  */
146 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)  ((rep_based_pf) > 15)
147 /*
148  * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction
149  */
150 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)        ((rep_q_r2f) > 3)
151
152 /*
153  * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction
154  */
155 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)        ((rep_q_f2r) > 3)
156
157 /*
158  * rep_fc_r2f == Flow control for the representor to endpoint direction
159  */
160 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)      ((rep_fc_r2f) > 1)
161
162 /*
163  * rep_fc_f2r == Flow control for the endpoint to representor direction
164  */
165 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)      ((rep_fc_f2r) > 1)
166
167 int bnxt_cfa_code_dynfield_offset = -1;
168
169 /*
170  * max_num_kflows must be >= 32
171  * and must be a power-of-2 supported value
172  * return: 1 -> invalid
173  *         0 -> valid
174  */
175 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
176 {
177         if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
178                 return 1;
179         return 0;
180 }
181
182 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
183 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
184 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
185 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
186 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
187 static int bnxt_restore_vlan_filters(struct bnxt *bp);
188 static void bnxt_dev_recover(void *arg);
189 static void bnxt_free_error_recovery_info(struct bnxt *bp);
190 static void bnxt_free_rep_info(struct bnxt *bp);
191
192 int is_bnxt_in_error(struct bnxt *bp)
193 {
194         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
195                 return -EIO;
196         if (bp->flags & BNXT_FLAG_FW_RESET)
197                 return -EBUSY;
198
199         return 0;
200 }
201
202 /***********************/
203
204 /*
205  * High level utility functions
206  */
207
208 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
209 {
210         if (!BNXT_CHIP_THOR(bp))
211                 return 1;
212
213         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
214                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
215                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
216 }
217
218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
219 {
220         if (!BNXT_CHIP_THOR(bp))
221                 return HW_HASH_INDEX_SIZE;
222
223         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
224 }
225
226 static void bnxt_free_parent_info(struct bnxt *bp)
227 {
228         rte_free(bp->parent);
229 }
230
231 static void bnxt_free_pf_info(struct bnxt *bp)
232 {
233         rte_free(bp->pf);
234 }
235
236 static void bnxt_free_link_info(struct bnxt *bp)
237 {
238         rte_free(bp->link_info);
239 }
240
241 static void bnxt_free_leds_info(struct bnxt *bp)
242 {
243         if (BNXT_VF(bp))
244                 return;
245
246         rte_free(bp->leds);
247         bp->leds = NULL;
248 }
249
250 static void bnxt_free_flow_stats_info(struct bnxt *bp)
251 {
252         rte_free(bp->flow_stat);
253         bp->flow_stat = NULL;
254 }
255
256 static void bnxt_free_cos_queues(struct bnxt *bp)
257 {
258         rte_free(bp->rx_cos_queue);
259         rte_free(bp->tx_cos_queue);
260 }
261
262 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
263 {
264         bnxt_free_filter_mem(bp);
265         bnxt_free_vnic_attributes(bp);
266         bnxt_free_vnic_mem(bp);
267
268         /* tx/rx rings are configured as part of *_queue_setup callbacks.
269          * If the number of rings change across fw update,
270          * we don't have much choice except to warn the user.
271          */
272         if (!reconfig) {
273                 bnxt_free_stats(bp);
274                 bnxt_free_tx_rings(bp);
275                 bnxt_free_rx_rings(bp);
276         }
277         bnxt_free_async_cp_ring(bp);
278         bnxt_free_rxtx_nq_ring(bp);
279
280         rte_free(bp->grp_info);
281         bp->grp_info = NULL;
282 }
283
284 static int bnxt_alloc_parent_info(struct bnxt *bp)
285 {
286         bp->parent = rte_zmalloc("bnxt_parent_info",
287                                  sizeof(struct bnxt_parent_info), 0);
288         if (bp->parent == NULL)
289                 return -ENOMEM;
290
291         return 0;
292 }
293
294 static int bnxt_alloc_pf_info(struct bnxt *bp)
295 {
296         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
297         if (bp->pf == NULL)
298                 return -ENOMEM;
299
300         return 0;
301 }
302
303 static int bnxt_alloc_link_info(struct bnxt *bp)
304 {
305         bp->link_info =
306                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
307         if (bp->link_info == NULL)
308                 return -ENOMEM;
309
310         return 0;
311 }
312
313 static int bnxt_alloc_leds_info(struct bnxt *bp)
314 {
315         if (BNXT_VF(bp))
316                 return 0;
317
318         bp->leds = rte_zmalloc("bnxt_leds",
319                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
320                                0);
321         if (bp->leds == NULL)
322                 return -ENOMEM;
323
324         return 0;
325 }
326
327 static int bnxt_alloc_cos_queues(struct bnxt *bp)
328 {
329         bp->rx_cos_queue =
330                 rte_zmalloc("bnxt_rx_cosq",
331                             BNXT_COS_QUEUE_COUNT *
332                             sizeof(struct bnxt_cos_queue_info),
333                             0);
334         if (bp->rx_cos_queue == NULL)
335                 return -ENOMEM;
336
337         bp->tx_cos_queue =
338                 rte_zmalloc("bnxt_tx_cosq",
339                             BNXT_COS_QUEUE_COUNT *
340                             sizeof(struct bnxt_cos_queue_info),
341                             0);
342         if (bp->tx_cos_queue == NULL)
343                 return -ENOMEM;
344
345         return 0;
346 }
347
348 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
349 {
350         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
351                                     sizeof(struct bnxt_flow_stat_info), 0);
352         if (bp->flow_stat == NULL)
353                 return -ENOMEM;
354
355         return 0;
356 }
357
358 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
359 {
360         int rc;
361
362         rc = bnxt_alloc_ring_grps(bp);
363         if (rc)
364                 goto alloc_mem_err;
365
366         rc = bnxt_alloc_async_ring_struct(bp);
367         if (rc)
368                 goto alloc_mem_err;
369
370         rc = bnxt_alloc_vnic_mem(bp);
371         if (rc)
372                 goto alloc_mem_err;
373
374         rc = bnxt_alloc_vnic_attributes(bp);
375         if (rc)
376                 goto alloc_mem_err;
377
378         rc = bnxt_alloc_filter_mem(bp);
379         if (rc)
380                 goto alloc_mem_err;
381
382         rc = bnxt_alloc_async_cp_ring(bp);
383         if (rc)
384                 goto alloc_mem_err;
385
386         rc = bnxt_alloc_rxtx_nq_ring(bp);
387         if (rc)
388                 goto alloc_mem_err;
389
390         if (BNXT_FLOW_XSTATS_EN(bp)) {
391                 rc = bnxt_alloc_flow_stats_info(bp);
392                 if (rc)
393                         goto alloc_mem_err;
394         }
395
396         return 0;
397
398 alloc_mem_err:
399         bnxt_free_mem(bp, reconfig);
400         return rc;
401 }
402
403 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
404 {
405         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
406         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
407         uint64_t rx_offloads = dev_conf->rxmode.offloads;
408         struct bnxt_rx_queue *rxq;
409         unsigned int j;
410         int rc;
411
412         rc = bnxt_vnic_grp_alloc(bp, vnic);
413         if (rc)
414                 goto err_out;
415
416         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
417                     vnic_id, vnic, vnic->fw_grp_ids);
418
419         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
420         if (rc)
421                 goto err_out;
422
423         /* Alloc RSS context only if RSS mode is enabled */
424         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
425                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
426
427                 rc = 0;
428                 for (j = 0; j < nr_ctxs; j++) {
429                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
430                         if (rc)
431                                 break;
432                 }
433                 if (rc) {
434                         PMD_DRV_LOG(ERR,
435                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
436                                     vnic_id, j, rc);
437                         goto err_out;
438                 }
439                 vnic->num_lb_ctxts = nr_ctxs;
440         }
441
442         /*
443          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
444          * setting is not available at this time, it will not be
445          * configured correctly in the CFA.
446          */
447         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
448                 vnic->vlan_strip = true;
449         else
450                 vnic->vlan_strip = false;
451
452         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
453         if (rc)
454                 goto err_out;
455
456         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
457         if (rc)
458                 goto err_out;
459
460         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
461                 rxq = bp->eth_dev->data->rx_queues[j];
462
463                 PMD_DRV_LOG(DEBUG,
464                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
465                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
466
467                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
468                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
469                 else
470                         vnic->rx_queue_cnt++;
471         }
472
473         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
474
475         rc = bnxt_vnic_rss_configure(bp, vnic);
476         if (rc)
477                 goto err_out;
478
479         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
480
481         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
482                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
483         else
484                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
485
486         return 0;
487 err_out:
488         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
489                     vnic_id, rc);
490         return rc;
491 }
492
493 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
494 {
495         int rc = 0;
496
497         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
498                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
499         if (rc)
500                 return rc;
501
502         PMD_DRV_LOG(DEBUG,
503                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
504                     " rx_fc_in_tbl.ctx_id = %d\n",
505                     bp->flow_stat->rx_fc_in_tbl.va,
506                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
507                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
508
509         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
510                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
511         if (rc)
512                 return rc;
513
514         PMD_DRV_LOG(DEBUG,
515                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
516                     " rx_fc_out_tbl.ctx_id = %d\n",
517                     bp->flow_stat->rx_fc_out_tbl.va,
518                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
519                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
520
521         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
522                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
523         if (rc)
524                 return rc;
525
526         PMD_DRV_LOG(DEBUG,
527                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
528                     " tx_fc_in_tbl.ctx_id = %d\n",
529                     bp->flow_stat->tx_fc_in_tbl.va,
530                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
531                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
532
533         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
534                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
535         if (rc)
536                 return rc;
537
538         PMD_DRV_LOG(DEBUG,
539                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
540                     " tx_fc_out_tbl.ctx_id = %d\n",
541                     bp->flow_stat->tx_fc_out_tbl.va,
542                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
543                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
544
545         memset(bp->flow_stat->rx_fc_out_tbl.va,
546                0,
547                bp->flow_stat->rx_fc_out_tbl.size);
548         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
549                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
550                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
551                                        bp->flow_stat->max_fc,
552                                        true);
553         if (rc)
554                 return rc;
555
556         memset(bp->flow_stat->tx_fc_out_tbl.va,
557                0,
558                bp->flow_stat->tx_fc_out_tbl.size);
559         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
560                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
561                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
562                                        bp->flow_stat->max_fc,
563                                        true);
564
565         return rc;
566 }
567
568 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
569                                   struct bnxt_ctx_mem_buf_info *ctx)
570 {
571         if (!ctx)
572                 return -EINVAL;
573
574         ctx->va = rte_zmalloc(type, size, 0);
575         if (ctx->va == NULL)
576                 return -ENOMEM;
577         rte_mem_lock_page(ctx->va);
578         ctx->size = size;
579         ctx->dma = rte_mem_virt2iova(ctx->va);
580         if (ctx->dma == RTE_BAD_IOVA)
581                 return -ENOMEM;
582
583         return 0;
584 }
585
586 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
587 {
588         struct rte_pci_device *pdev = bp->pdev;
589         char type[RTE_MEMZONE_NAMESIZE];
590         uint16_t max_fc;
591         int rc = 0;
592
593         max_fc = bp->flow_stat->max_fc;
594
595         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
596                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
597         /* 4 bytes for each counter-id */
598         rc = bnxt_alloc_ctx_mem_buf(type,
599                                     max_fc * 4,
600                                     &bp->flow_stat->rx_fc_in_tbl);
601         if (rc)
602                 return rc;
603
604         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
605                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
606         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
607         rc = bnxt_alloc_ctx_mem_buf(type,
608                                     max_fc * 16,
609                                     &bp->flow_stat->rx_fc_out_tbl);
610         if (rc)
611                 return rc;
612
613         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
614                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
615         /* 4 bytes for each counter-id */
616         rc = bnxt_alloc_ctx_mem_buf(type,
617                                     max_fc * 4,
618                                     &bp->flow_stat->tx_fc_in_tbl);
619         if (rc)
620                 return rc;
621
622         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
623                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
624         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
625         rc = bnxt_alloc_ctx_mem_buf(type,
626                                     max_fc * 16,
627                                     &bp->flow_stat->tx_fc_out_tbl);
628         if (rc)
629                 return rc;
630
631         rc = bnxt_register_fc_ctx_mem(bp);
632
633         return rc;
634 }
635
636 static int bnxt_init_ctx_mem(struct bnxt *bp)
637 {
638         int rc = 0;
639
640         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
641             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
642             !BNXT_FLOW_XSTATS_EN(bp))
643                 return 0;
644
645         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
646         if (rc)
647                 return rc;
648
649         rc = bnxt_init_fc_ctx_mem(bp);
650
651         return rc;
652 }
653
654 static int bnxt_update_phy_setting(struct bnxt *bp)
655 {
656         struct rte_eth_link new;
657         int rc;
658
659         rc = bnxt_get_hwrm_link_config(bp, &new);
660         if (rc) {
661                 PMD_DRV_LOG(ERR, "Failed to get link settings\n");
662                 return rc;
663         }
664
665         /*
666          * On BCM957508-N2100 adapters, FW will not allow any user other
667          * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call
668          * always returns link up. Force phy update always in that case.
669          */
670         if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) {
671                 rc = bnxt_set_hwrm_link_config(bp, true);
672                 if (rc) {
673                         PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
674                         return rc;
675                 }
676         }
677
678         return rc;
679 }
680
681 static int bnxt_init_chip(struct bnxt *bp)
682 {
683         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
684         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
685         uint32_t intr_vector = 0;
686         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
687         uint32_t vec = BNXT_MISC_VEC_ID;
688         unsigned int i, j;
689         int rc;
690
691         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
692                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
693                         DEV_RX_OFFLOAD_JUMBO_FRAME;
694                 bp->flags |= BNXT_FLAG_JUMBO;
695         } else {
696                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
697                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
698                 bp->flags &= ~BNXT_FLAG_JUMBO;
699         }
700
701         /* THOR does not support ring groups.
702          * But we will use the array to save RSS context IDs.
703          */
704         if (BNXT_CHIP_THOR(bp))
705                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
706
707         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
708         if (rc) {
709                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
710                 goto err_out;
711         }
712
713         rc = bnxt_alloc_hwrm_rings(bp);
714         if (rc) {
715                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
716                 goto err_out;
717         }
718
719         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
720         if (rc) {
721                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
722                 goto err_out;
723         }
724
725         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
726                 goto skip_cosq_cfg;
727
728         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
729                 if (bp->rx_cos_queue[i].id != 0xff) {
730                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
731
732                         if (!vnic) {
733                                 PMD_DRV_LOG(ERR,
734                                             "Num pools more than FW profile\n");
735                                 rc = -EINVAL;
736                                 goto err_out;
737                         }
738                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
739                         bp->rx_cosq_cnt++;
740                 }
741         }
742
743 skip_cosq_cfg:
744         rc = bnxt_mq_rx_configure(bp);
745         if (rc) {
746                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
747                 goto err_out;
748         }
749
750         /* VNIC configuration */
751         for (i = 0; i < bp->nr_vnics; i++) {
752                 rc = bnxt_setup_one_vnic(bp, i);
753                 if (rc)
754                         goto err_out;
755         }
756
757         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
758         if (rc) {
759                 PMD_DRV_LOG(ERR,
760                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
761                 goto err_out;
762         }
763
764         /* check and configure queue intr-vector mapping */
765         if ((rte_intr_cap_multiple(intr_handle) ||
766              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
767             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
768                 intr_vector = bp->eth_dev->data->nb_rx_queues;
769                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
770                 if (intr_vector > bp->rx_cp_nr_rings) {
771                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
772                                         bp->rx_cp_nr_rings);
773                         return -ENOTSUP;
774                 }
775                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
776                 if (rc)
777                         return rc;
778         }
779
780         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
781                 intr_handle->intr_vec =
782                         rte_zmalloc("intr_vec",
783                                     bp->eth_dev->data->nb_rx_queues *
784                                     sizeof(int), 0);
785                 if (intr_handle->intr_vec == NULL) {
786                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
787                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
788                         rc = -ENOMEM;
789                         goto err_disable;
790                 }
791                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
792                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
793                          intr_handle->intr_vec, intr_handle->nb_efd,
794                         intr_handle->max_intr);
795                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
796                      queue_id++) {
797                         intr_handle->intr_vec[queue_id] =
798                                                         vec + BNXT_RX_VEC_START;
799                         if (vec < base + intr_handle->nb_efd - 1)
800                                 vec++;
801                 }
802         }
803
804         /* enable uio/vfio intr/eventfd mapping */
805         rc = rte_intr_enable(intr_handle);
806 #ifndef RTE_EXEC_ENV_FREEBSD
807         /* In FreeBSD OS, nic_uio driver does not support interrupts */
808         if (rc)
809                 goto err_free;
810 #endif
811
812         rc = bnxt_update_phy_setting(bp);
813         if (rc)
814                 goto err_free;
815
816         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
817         if (!bp->mark_table)
818                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
819
820         return 0;
821
822 err_free:
823         rte_free(intr_handle->intr_vec);
824 err_disable:
825         rte_intr_efd_disable(intr_handle);
826 err_out:
827         /* Some of the error status returned by FW may not be from errno.h */
828         if (rc > 0)
829                 rc = -EIO;
830
831         return rc;
832 }
833
834 static int bnxt_shutdown_nic(struct bnxt *bp)
835 {
836         bnxt_free_all_hwrm_resources(bp);
837         bnxt_free_all_filters(bp);
838         bnxt_free_all_vnics(bp);
839         return 0;
840 }
841
842 /*
843  * Device configuration and status function
844  */
845
846 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
847 {
848         uint32_t link_speed = bp->link_info->support_speeds;
849         uint32_t speed_capa = 0;
850
851         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
852                 speed_capa |= ETH_LINK_SPEED_100M;
853         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
854                 speed_capa |= ETH_LINK_SPEED_100M_HD;
855         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
856                 speed_capa |= ETH_LINK_SPEED_1G;
857         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
858                 speed_capa |= ETH_LINK_SPEED_2_5G;
859         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
860                 speed_capa |= ETH_LINK_SPEED_10G;
861         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
862                 speed_capa |= ETH_LINK_SPEED_20G;
863         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
864                 speed_capa |= ETH_LINK_SPEED_25G;
865         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
866                 speed_capa |= ETH_LINK_SPEED_40G;
867         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
868                 speed_capa |= ETH_LINK_SPEED_50G;
869         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
870                 speed_capa |= ETH_LINK_SPEED_100G;
871         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
872                 speed_capa |= ETH_LINK_SPEED_50G;
873         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
874                 speed_capa |= ETH_LINK_SPEED_100G;
875         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
876                 speed_capa |= ETH_LINK_SPEED_200G;
877
878         if (bp->link_info->auto_mode ==
879             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
880                 speed_capa |= ETH_LINK_SPEED_FIXED;
881         else
882                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
883
884         return speed_capa;
885 }
886
887 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
888                                 struct rte_eth_dev_info *dev_info)
889 {
890         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
891         struct bnxt *bp = eth_dev->data->dev_private;
892         uint16_t max_vnics, i, j, vpool, vrxq;
893         unsigned int max_rx_rings;
894         int rc;
895
896         rc = is_bnxt_in_error(bp);
897         if (rc)
898                 return rc;
899
900         /* MAC Specifics */
901         dev_info->max_mac_addrs = bp->max_l2_ctx;
902         dev_info->max_hash_mac_addrs = 0;
903
904         /* PF/VF specifics */
905         if (BNXT_PF(bp))
906                 dev_info->max_vfs = pdev->max_vfs;
907
908         max_rx_rings = BNXT_MAX_RINGS(bp);
909         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
910         dev_info->max_rx_queues = max_rx_rings;
911         dev_info->max_tx_queues = max_rx_rings;
912         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
913         dev_info->hash_key_size = 40;
914         max_vnics = bp->max_vnics;
915
916         /* MTU specifics */
917         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
918         dev_info->max_mtu = BNXT_MAX_MTU;
919
920         /* Fast path specifics */
921         dev_info->min_rx_bufsize = 1;
922         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
923
924         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
925         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
926                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
927         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
928         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
929                                     dev_info->tx_queue_offload_capa;
930         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
931
932         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
933
934         /* *INDENT-OFF* */
935         dev_info->default_rxconf = (struct rte_eth_rxconf) {
936                 .rx_thresh = {
937                         .pthresh = 8,
938                         .hthresh = 8,
939                         .wthresh = 0,
940                 },
941                 .rx_free_thresh = 32,
942                 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
943         };
944
945         dev_info->default_txconf = (struct rte_eth_txconf) {
946                 .tx_thresh = {
947                         .pthresh = 32,
948                         .hthresh = 0,
949                         .wthresh = 0,
950                 },
951                 .tx_free_thresh = 32,
952                 .tx_rs_thresh = 32,
953         };
954         eth_dev->data->dev_conf.intr_conf.lsc = 1;
955
956         eth_dev->data->dev_conf.intr_conf.rxq = 1;
957         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
958         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
959         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
960         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
961
962         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
963                 dev_info->switch_info.name = eth_dev->device->name;
964                 dev_info->switch_info.domain_id = bp->switch_domain_id;
965                 dev_info->switch_info.port_id =
966                                 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF :
967                                     BNXT_SWITCH_PORT_ID_TRUSTED_VF;
968         }
969
970         /* *INDENT-ON* */
971
972         /*
973          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
974          *       need further investigation.
975          */
976
977         /* VMDq resources */
978         vpool = 64; /* ETH_64_POOLS */
979         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
980         for (i = 0; i < 4; vpool >>= 1, i++) {
981                 if (max_vnics > vpool) {
982                         for (j = 0; j < 5; vrxq >>= 1, j++) {
983                                 if (dev_info->max_rx_queues > vrxq) {
984                                         if (vpool > vrxq)
985                                                 vpool = vrxq;
986                                         goto found;
987                                 }
988                         }
989                         /* Not enough resources to support VMDq */
990                         break;
991                 }
992         }
993         /* Not enough resources to support VMDq */
994         vpool = 0;
995         vrxq = 0;
996 found:
997         dev_info->max_vmdq_pools = vpool;
998         dev_info->vmdq_queue_num = vrxq;
999
1000         dev_info->vmdq_pool_base = 0;
1001         dev_info->vmdq_queue_base = 0;
1002
1003         return 0;
1004 }
1005
1006 /* Configure the device based on the configuration provided */
1007 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
1008 {
1009         struct bnxt *bp = eth_dev->data->dev_private;
1010         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1011         int rc;
1012
1013         bp->rx_queues = (void *)eth_dev->data->rx_queues;
1014         bp->tx_queues = (void *)eth_dev->data->tx_queues;
1015         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
1016         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
1017
1018         rc = is_bnxt_in_error(bp);
1019         if (rc)
1020                 return rc;
1021
1022         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
1023                 rc = bnxt_hwrm_check_vf_rings(bp);
1024                 if (rc) {
1025                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
1026                         return -ENOSPC;
1027                 }
1028
1029                 /* If a resource has already been allocated - in this case
1030                  * it is the async completion ring, free it. Reallocate it after
1031                  * resource reservation. This will ensure the resource counts
1032                  * are calculated correctly.
1033                  */
1034
1035                 pthread_mutex_lock(&bp->def_cp_lock);
1036
1037                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
1038                         bnxt_disable_int(bp);
1039                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
1040                 }
1041
1042                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
1043                 if (rc) {
1044                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
1045                         pthread_mutex_unlock(&bp->def_cp_lock);
1046                         return -ENOSPC;
1047                 }
1048
1049                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
1050                         rc = bnxt_alloc_async_cp_ring(bp);
1051                         if (rc) {
1052                                 pthread_mutex_unlock(&bp->def_cp_lock);
1053                                 return rc;
1054                         }
1055                         bnxt_enable_int(bp);
1056                 }
1057
1058                 pthread_mutex_unlock(&bp->def_cp_lock);
1059         } else {
1060                 /* legacy driver needs to get updated values */
1061                 rc = bnxt_hwrm_func_qcaps(bp);
1062                 if (rc) {
1063                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
1064                         return rc;
1065                 }
1066         }
1067
1068         /* Inherit new configurations */
1069         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
1070             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
1071             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
1072                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
1073             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
1074             bp->max_stat_ctx)
1075                 goto resource_error;
1076
1077         if (BNXT_HAS_RING_GRPS(bp) &&
1078             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
1079                 goto resource_error;
1080
1081         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1082             bp->max_vnics < eth_dev->data->nb_rx_queues)
1083                 goto resource_error;
1084
1085         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1086         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1087
1088         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1089                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1090         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1091
1092         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1093                 eth_dev->data->mtu =
1094                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1095                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1096                         BNXT_NUM_VLANS;
1097                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1098         }
1099         return 0;
1100
1101 resource_error:
1102         PMD_DRV_LOG(ERR,
1103                     "Insufficient resources to support requested config\n");
1104         PMD_DRV_LOG(ERR,
1105                     "Num Queues Requested: Tx %d, Rx %d\n",
1106                     eth_dev->data->nb_tx_queues,
1107                     eth_dev->data->nb_rx_queues);
1108         PMD_DRV_LOG(ERR,
1109                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1110                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1111                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1112         return -ENOSPC;
1113 }
1114
1115 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1116 {
1117         struct rte_eth_link *link = &eth_dev->data->dev_link;
1118
1119         if (link->link_status)
1120                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1121                         eth_dev->data->port_id,
1122                         (uint32_t)link->link_speed,
1123                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1124                         ("full-duplex") : ("half-duplex\n"));
1125         else
1126                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1127                         eth_dev->data->port_id);
1128 }
1129
1130 /*
1131  * Determine whether the current configuration requires support for scattered
1132  * receive; return 1 if scattered receive is required and 0 if not.
1133  */
1134 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1135 {
1136         uint16_t buf_size;
1137         int i;
1138
1139         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1140                 return 1;
1141
1142         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1143                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1144
1145                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1146                                       RTE_PKTMBUF_HEADROOM);
1147                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1148                         return 1;
1149         }
1150         return 0;
1151 }
1152
1153 static eth_rx_burst_t
1154 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1155 {
1156         struct bnxt *bp = eth_dev->data->dev_private;
1157
1158 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1159 #ifndef RTE_LIBRTE_IEEE1588
1160         /*
1161          * Vector mode receive can be enabled only if scatter rx is not
1162          * in use and rx offloads are limited to VLAN stripping and
1163          * CRC stripping.
1164          */
1165         if (!eth_dev->data->scattered_rx &&
1166             !(eth_dev->data->dev_conf.rxmode.offloads &
1167               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1168                 DEV_RX_OFFLOAD_KEEP_CRC |
1169                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1170                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1171                 DEV_RX_OFFLOAD_UDP_CKSUM |
1172                 DEV_RX_OFFLOAD_TCP_CKSUM |
1173                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1174                 DEV_RX_OFFLOAD_RSS_HASH |
1175                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1176             !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) &&
1177             rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
1178                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1179                             eth_dev->data->port_id);
1180                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1181                 return bnxt_recv_pkts_vec;
1182         }
1183         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1184                     eth_dev->data->port_id);
1185         PMD_DRV_LOG(INFO,
1186                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1187                     eth_dev->data->port_id,
1188                     eth_dev->data->scattered_rx,
1189                     eth_dev->data->dev_conf.rxmode.offloads);
1190 #endif
1191 #endif
1192         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1193         return bnxt_recv_pkts;
1194 }
1195
1196 static eth_tx_burst_t
1197 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1198 {
1199 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1200 #ifndef RTE_LIBRTE_IEEE1588
1201         uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
1202         struct bnxt *bp = eth_dev->data->dev_private;
1203
1204         /*
1205          * Vector mode transmit can be enabled only if not using scatter rx
1206          * or tx offloads.
1207          */
1208         if (!eth_dev->data->scattered_rx &&
1209             !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
1210             !BNXT_TRUFLOW_EN(bp) &&
1211             rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
1212                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1213                             eth_dev->data->port_id);
1214                 return bnxt_xmit_pkts_vec;
1215         }
1216         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1217                     eth_dev->data->port_id);
1218         PMD_DRV_LOG(INFO,
1219                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1220                     eth_dev->data->port_id,
1221                     eth_dev->data->scattered_rx,
1222                     offloads);
1223 #endif
1224 #endif
1225         return bnxt_xmit_pkts;
1226 }
1227
1228 static int bnxt_handle_if_change_status(struct bnxt *bp)
1229 {
1230         int rc;
1231
1232         /* Since fw has undergone a reset and lost all contexts,
1233          * set fatal flag to not issue hwrm during cleanup
1234          */
1235         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1236         bnxt_uninit_resources(bp, true);
1237
1238         /* clear fatal flag so that re-init happens */
1239         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1240         rc = bnxt_init_resources(bp, true);
1241
1242         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1243
1244         return rc;
1245 }
1246
1247 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1248 {
1249         struct bnxt *bp = eth_dev->data->dev_private;
1250         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1251         int vlan_mask = 0;
1252         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1253
1254         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1255                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1256                 return -EINVAL;
1257         }
1258
1259         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1260                 PMD_DRV_LOG(ERR,
1261                         "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1262                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1263         }
1264
1265         do {
1266                 rc = bnxt_hwrm_if_change(bp, true);
1267                 if (rc == 0 || rc != -EAGAIN)
1268                         break;
1269
1270                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1271         } while (retry_cnt--);
1272
1273         if (rc)
1274                 return rc;
1275
1276         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1277                 rc = bnxt_handle_if_change_status(bp);
1278                 if (rc)
1279                         return rc;
1280         }
1281
1282         bnxt_enable_int(bp);
1283
1284         rc = bnxt_init_chip(bp);
1285         if (rc)
1286                 goto error;
1287
1288         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1289         eth_dev->data->dev_started = 1;
1290
1291         bnxt_link_update_op(eth_dev, 1);
1292
1293         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1294                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1295         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1296                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1297         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1298         if (rc)
1299                 goto error;
1300
1301         /* Initialize bnxt ULP port details */
1302         rc = bnxt_ulp_port_init(bp);
1303         if (rc)
1304                 goto error;
1305
1306         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1307         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1308
1309         bnxt_schedule_fw_health_check(bp);
1310
1311         return 0;
1312
1313 error:
1314         bnxt_shutdown_nic(bp);
1315         bnxt_free_tx_mbufs(bp);
1316         bnxt_free_rx_mbufs(bp);
1317         bnxt_hwrm_if_change(bp, false);
1318         eth_dev->data->dev_started = 0;
1319         return rc;
1320 }
1321
1322 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1323 {
1324         struct bnxt *bp = eth_dev->data->dev_private;
1325         int rc = 0;
1326
1327         if (!bp->link_info->link_up)
1328                 rc = bnxt_set_hwrm_link_config(bp, true);
1329         if (!rc)
1330                 eth_dev->data->dev_link.link_status = 1;
1331
1332         bnxt_print_link_info(eth_dev);
1333         return rc;
1334 }
1335
1336 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1337 {
1338         struct bnxt *bp = eth_dev->data->dev_private;
1339
1340         eth_dev->data->dev_link.link_status = 0;
1341         bnxt_set_hwrm_link_config(bp, false);
1342         bp->link_info->link_up = 0;
1343
1344         return 0;
1345 }
1346
1347 static void bnxt_free_switch_domain(struct bnxt *bp)
1348 {
1349         if (bp->switch_domain_id)
1350                 rte_eth_switch_domain_free(bp->switch_domain_id);
1351 }
1352
1353 /* Unload the driver, release resources */
1354 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1355 {
1356         struct bnxt *bp = eth_dev->data->dev_private;
1357         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1358         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1359         struct rte_eth_link link;
1360         int ret;
1361
1362         eth_dev->data->dev_started = 0;
1363         eth_dev->data->scattered_rx = 0;
1364
1365         /* Prevent crashes when queues are still in use */
1366         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1367         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1368
1369         bnxt_disable_int(bp);
1370
1371         /* disable uio/vfio intr/eventfd mapping */
1372         rte_intr_disable(intr_handle);
1373
1374         /* Stop the child representors for this device */
1375         ret = bnxt_rep_stop_all(bp);
1376         if (ret != 0)
1377                 return ret;
1378
1379         /* delete the bnxt ULP port details */
1380         bnxt_ulp_port_deinit(bp);
1381
1382         bnxt_cancel_fw_health_check(bp);
1383
1384         /* Do not bring link down during reset recovery */
1385         if (!is_bnxt_in_error(bp)) {
1386                 bnxt_dev_set_link_down_op(eth_dev);
1387                 /* Wait for link to be reset */
1388                 if (BNXT_SINGLE_PF(bp))
1389                         rte_delay_ms(500);
1390                 /* clear the recorded link status */
1391                 memset(&link, 0, sizeof(link));
1392                 rte_eth_linkstatus_set(eth_dev, &link);
1393         }
1394
1395         /* Clean queue intr-vector mapping */
1396         rte_intr_efd_disable(intr_handle);
1397         if (intr_handle->intr_vec != NULL) {
1398                 rte_free(intr_handle->intr_vec);
1399                 intr_handle->intr_vec = NULL;
1400         }
1401
1402         bnxt_hwrm_port_clr_stats(bp);
1403         bnxt_free_tx_mbufs(bp);
1404         bnxt_free_rx_mbufs(bp);
1405         /* Process any remaining notifications in default completion queue */
1406         bnxt_int_handler(eth_dev);
1407         bnxt_shutdown_nic(bp);
1408         bnxt_hwrm_if_change(bp, false);
1409
1410         rte_free(bp->mark_table);
1411         bp->mark_table = NULL;
1412
1413         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1414         bp->rx_cosq_cnt = 0;
1415         /* All filters are deleted on a port stop. */
1416         if (BNXT_FLOW_XSTATS_EN(bp))
1417                 bp->flow_stat->flow_count = 0;
1418
1419         return 0;
1420 }
1421
1422 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1423 {
1424         struct bnxt *bp = eth_dev->data->dev_private;
1425         int ret = 0;
1426
1427         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1428                 return 0;
1429
1430         /* cancel the recovery handler before remove dev */
1431         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1432         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1433         bnxt_cancel_fc_thread(bp);
1434
1435         if (eth_dev->data->dev_started)
1436                 ret = bnxt_dev_stop_op(eth_dev);
1437
1438         bnxt_free_switch_domain(bp);
1439
1440         bnxt_uninit_resources(bp, false);
1441
1442         bnxt_free_leds_info(bp);
1443         bnxt_free_cos_queues(bp);
1444         bnxt_free_link_info(bp);
1445         bnxt_free_pf_info(bp);
1446         bnxt_free_parent_info(bp);
1447
1448         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1449         bp->tx_mem_zone = NULL;
1450         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1451         bp->rx_mem_zone = NULL;
1452
1453         bnxt_hwrm_free_vf_info(bp);
1454
1455         rte_free(bp->grp_info);
1456         bp->grp_info = NULL;
1457
1458         return ret;
1459 }
1460
1461 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1462                                     uint32_t index)
1463 {
1464         struct bnxt *bp = eth_dev->data->dev_private;
1465         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1466         struct bnxt_vnic_info *vnic;
1467         struct bnxt_filter_info *filter, *temp_filter;
1468         uint32_t i;
1469
1470         if (is_bnxt_in_error(bp))
1471                 return;
1472
1473         /*
1474          * Loop through all VNICs from the specified filter flow pools to
1475          * remove the corresponding MAC addr filter
1476          */
1477         for (i = 0; i < bp->nr_vnics; i++) {
1478                 if (!(pool_mask & (1ULL << i)))
1479                         continue;
1480
1481                 vnic = &bp->vnic_info[i];
1482                 filter = STAILQ_FIRST(&vnic->filter);
1483                 while (filter) {
1484                         temp_filter = STAILQ_NEXT(filter, next);
1485                         if (filter->mac_index == index) {
1486                                 STAILQ_REMOVE(&vnic->filter, filter,
1487                                                 bnxt_filter_info, next);
1488                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1489                                 bnxt_free_filter(bp, filter);
1490                         }
1491                         filter = temp_filter;
1492                 }
1493         }
1494 }
1495
1496 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1497                                struct rte_ether_addr *mac_addr, uint32_t index,
1498                                uint32_t pool)
1499 {
1500         struct bnxt_filter_info *filter;
1501         int rc = 0;
1502
1503         /* Attach requested MAC address to the new l2_filter */
1504         STAILQ_FOREACH(filter, &vnic->filter, next) {
1505                 if (filter->mac_index == index) {
1506                         PMD_DRV_LOG(DEBUG,
1507                                     "MAC addr already existed for pool %d\n",
1508                                     pool);
1509                         return 0;
1510                 }
1511         }
1512
1513         filter = bnxt_alloc_filter(bp);
1514         if (!filter) {
1515                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1516                 return -ENODEV;
1517         }
1518
1519         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1520          * if the MAC that's been programmed now is a different one, then,
1521          * copy that addr to filter->l2_addr
1522          */
1523         if (mac_addr)
1524                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1525         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1526
1527         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1528         if (!rc) {
1529                 filter->mac_index = index;
1530                 if (filter->mac_index == 0)
1531                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1532                 else
1533                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1534         } else {
1535                 bnxt_free_filter(bp, filter);
1536         }
1537
1538         return rc;
1539 }
1540
1541 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1542                                 struct rte_ether_addr *mac_addr,
1543                                 uint32_t index, uint32_t pool)
1544 {
1545         struct bnxt *bp = eth_dev->data->dev_private;
1546         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1547         int rc = 0;
1548
1549         rc = is_bnxt_in_error(bp);
1550         if (rc)
1551                 return rc;
1552
1553         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1554                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1555                 return -ENOTSUP;
1556         }
1557
1558         if (!vnic) {
1559                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1560                 return -EINVAL;
1561         }
1562
1563         /* Filter settings will get applied when port is started */
1564         if (!eth_dev->data->dev_started)
1565                 return 0;
1566
1567         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1568
1569         return rc;
1570 }
1571
1572 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
1573 {
1574         int rc = 0;
1575         struct bnxt *bp = eth_dev->data->dev_private;
1576         struct rte_eth_link new;
1577         int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT :
1578                         BNXT_MIN_LINK_WAIT_CNT;
1579
1580         rc = is_bnxt_in_error(bp);
1581         if (rc)
1582                 return rc;
1583
1584         memset(&new, 0, sizeof(new));
1585         do {
1586                 /* Retrieve link info from hardware */
1587                 rc = bnxt_get_hwrm_link_config(bp, &new);
1588                 if (rc) {
1589                         new.link_speed = ETH_LINK_SPEED_100M;
1590                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1591                         PMD_DRV_LOG(ERR,
1592                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1593                         goto out;
1594                 }
1595
1596                 if (!wait_to_complete || new.link_status)
1597                         break;
1598
1599                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1600         } while (cnt--);
1601
1602         /* Only single function PF can bring phy down.
1603          * When port is stopped, report link down for VF/MH/NPAR functions.
1604          */
1605         if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
1606                 memset(&new, 0, sizeof(new));
1607
1608 out:
1609         /* Timed out or success */
1610         if (new.link_status != eth_dev->data->dev_link.link_status ||
1611         new.link_speed != eth_dev->data->dev_link.link_speed) {
1612                 rte_eth_linkstatus_set(eth_dev, &new);
1613
1614                 rte_eth_dev_callback_process(eth_dev,
1615                                              RTE_ETH_EVENT_INTR_LSC,
1616                                              NULL);
1617
1618                 bnxt_print_link_info(eth_dev);
1619         }
1620
1621         return rc;
1622 }
1623
1624 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1625 {
1626         struct bnxt *bp = eth_dev->data->dev_private;
1627         struct bnxt_vnic_info *vnic;
1628         uint32_t old_flags;
1629         int rc;
1630
1631         rc = is_bnxt_in_error(bp);
1632         if (rc)
1633                 return rc;
1634
1635         /* Filter settings will get applied when port is started */
1636         if (!eth_dev->data->dev_started)
1637                 return 0;
1638
1639         if (bp->vnic_info == NULL)
1640                 return 0;
1641
1642         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1643
1644         old_flags = vnic->flags;
1645         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1646         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1647         if (rc != 0)
1648                 vnic->flags = old_flags;
1649
1650         return rc;
1651 }
1652
1653 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1654 {
1655         struct bnxt *bp = eth_dev->data->dev_private;
1656         struct bnxt_vnic_info *vnic;
1657         uint32_t old_flags;
1658         int rc;
1659
1660         rc = is_bnxt_in_error(bp);
1661         if (rc)
1662                 return rc;
1663
1664         /* Filter settings will get applied when port is started */
1665         if (!eth_dev->data->dev_started)
1666                 return 0;
1667
1668         if (bp->vnic_info == NULL)
1669                 return 0;
1670
1671         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1672
1673         old_flags = vnic->flags;
1674         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1675         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1676         if (rc != 0)
1677                 vnic->flags = old_flags;
1678
1679         return rc;
1680 }
1681
1682 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1683 {
1684         struct bnxt *bp = eth_dev->data->dev_private;
1685         struct bnxt_vnic_info *vnic;
1686         uint32_t old_flags;
1687         int rc;
1688
1689         rc = is_bnxt_in_error(bp);
1690         if (rc)
1691                 return rc;
1692
1693         /* Filter settings will get applied when port is started */
1694         if (!eth_dev->data->dev_started)
1695                 return 0;
1696
1697         if (bp->vnic_info == NULL)
1698                 return 0;
1699
1700         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1701
1702         old_flags = vnic->flags;
1703         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1704         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1705         if (rc != 0)
1706                 vnic->flags = old_flags;
1707
1708         return rc;
1709 }
1710
1711 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1712 {
1713         struct bnxt *bp = eth_dev->data->dev_private;
1714         struct bnxt_vnic_info *vnic;
1715         uint32_t old_flags;
1716         int rc;
1717
1718         rc = is_bnxt_in_error(bp);
1719         if (rc)
1720                 return rc;
1721
1722         /* Filter settings will get applied when port is started */
1723         if (!eth_dev->data->dev_started)
1724                 return 0;
1725
1726         if (bp->vnic_info == NULL)
1727                 return 0;
1728
1729         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1730
1731         old_flags = vnic->flags;
1732         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1733         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1734         if (rc != 0)
1735                 vnic->flags = old_flags;
1736
1737         return rc;
1738 }
1739
1740 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1741 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1742 {
1743         if (qid >= bp->rx_nr_rings)
1744                 return NULL;
1745
1746         return bp->eth_dev->data->rx_queues[qid];
1747 }
1748
1749 /* Return rxq corresponding to a given rss table ring/group ID. */
1750 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1751 {
1752         struct bnxt_rx_queue *rxq;
1753         unsigned int i;
1754
1755         if (!BNXT_HAS_RING_GRPS(bp)) {
1756                 for (i = 0; i < bp->rx_nr_rings; i++) {
1757                         rxq = bp->eth_dev->data->rx_queues[i];
1758                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1759                                 return rxq->index;
1760                 }
1761         } else {
1762                 for (i = 0; i < bp->rx_nr_rings; i++) {
1763                         if (bp->grp_info[i].fw_grp_id == fwr)
1764                                 return i;
1765                 }
1766         }
1767
1768         return INVALID_HW_RING_ID;
1769 }
1770
1771 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1772                             struct rte_eth_rss_reta_entry64 *reta_conf,
1773                             uint16_t reta_size)
1774 {
1775         struct bnxt *bp = eth_dev->data->dev_private;
1776         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1777         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1778         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1779         uint16_t idx, sft;
1780         int i, rc;
1781
1782         rc = is_bnxt_in_error(bp);
1783         if (rc)
1784                 return rc;
1785
1786         if (!vnic->rss_table)
1787                 return -EINVAL;
1788
1789         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1790                 return -EINVAL;
1791
1792         if (reta_size != tbl_size) {
1793                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1794                         "(%d) must equal the size supported by the hardware "
1795                         "(%d)\n", reta_size, tbl_size);
1796                 return -EINVAL;
1797         }
1798
1799         for (i = 0; i < reta_size; i++) {
1800                 struct bnxt_rx_queue *rxq;
1801
1802                 idx = i / RTE_RETA_GROUP_SIZE;
1803                 sft = i % RTE_RETA_GROUP_SIZE;
1804
1805                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1806                         continue;
1807
1808                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1809                 if (!rxq) {
1810                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1811                         return -EINVAL;
1812                 }
1813
1814                 if (BNXT_CHIP_THOR(bp)) {
1815                         vnic->rss_table[i * 2] =
1816                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1817                         vnic->rss_table[i * 2 + 1] =
1818                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1819                 } else {
1820                         vnic->rss_table[i] =
1821                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1822                 }
1823         }
1824
1825         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1826         return 0;
1827 }
1828
1829 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1830                               struct rte_eth_rss_reta_entry64 *reta_conf,
1831                               uint16_t reta_size)
1832 {
1833         struct bnxt *bp = eth_dev->data->dev_private;
1834         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1835         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1836         uint16_t idx, sft, i;
1837         int rc;
1838
1839         rc = is_bnxt_in_error(bp);
1840         if (rc)
1841                 return rc;
1842
1843         /* Retrieve from the default VNIC */
1844         if (!vnic)
1845                 return -EINVAL;
1846         if (!vnic->rss_table)
1847                 return -EINVAL;
1848
1849         if (reta_size != tbl_size) {
1850                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1851                         "(%d) must equal the size supported by the hardware "
1852                         "(%d)\n", reta_size, tbl_size);
1853                 return -EINVAL;
1854         }
1855
1856         for (idx = 0, i = 0; i < reta_size; i++) {
1857                 idx = i / RTE_RETA_GROUP_SIZE;
1858                 sft = i % RTE_RETA_GROUP_SIZE;
1859
1860                 if (reta_conf[idx].mask & (1ULL << sft)) {
1861                         uint16_t qid;
1862
1863                         if (BNXT_CHIP_THOR(bp))
1864                                 qid = bnxt_rss_to_qid(bp,
1865                                                       vnic->rss_table[i * 2]);
1866                         else
1867                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1868
1869                         if (qid == INVALID_HW_RING_ID) {
1870                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1871                                 return -EINVAL;
1872                         }
1873                         reta_conf[idx].reta[sft] = qid;
1874                 }
1875         }
1876
1877         return 0;
1878 }
1879
1880 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1881                                    struct rte_eth_rss_conf *rss_conf)
1882 {
1883         struct bnxt *bp = eth_dev->data->dev_private;
1884         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1885         struct bnxt_vnic_info *vnic;
1886         int rc;
1887
1888         rc = is_bnxt_in_error(bp);
1889         if (rc)
1890                 return rc;
1891
1892         /*
1893          * If RSS enablement were different than dev_configure,
1894          * then return -EINVAL
1895          */
1896         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1897                 if (!rss_conf->rss_hf)
1898                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1899         } else {
1900                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1901                         return -EINVAL;
1902         }
1903
1904         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1905         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1906                rss_conf,
1907                sizeof(*rss_conf));
1908
1909         /* Update the default RSS VNIC(s) */
1910         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1911         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1912         vnic->hash_mode =
1913                 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
1914                                             ETH_RSS_LEVEL(rss_conf->rss_hf));
1915
1916         /*
1917          * If hashkey is not specified, use the previously configured
1918          * hashkey
1919          */
1920         if (!rss_conf->rss_key)
1921                 goto rss_config;
1922
1923         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1924                 PMD_DRV_LOG(ERR,
1925                             "Invalid hashkey length, should be 16 bytes\n");
1926                 return -EINVAL;
1927         }
1928         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1929
1930 rss_config:
1931         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1932         return 0;
1933 }
1934
1935 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1936                                      struct rte_eth_rss_conf *rss_conf)
1937 {
1938         struct bnxt *bp = eth_dev->data->dev_private;
1939         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1940         int len, rc;
1941         uint32_t hash_types;
1942
1943         rc = is_bnxt_in_error(bp);
1944         if (rc)
1945                 return rc;
1946
1947         /* RSS configuration is the same for all VNICs */
1948         if (vnic && vnic->rss_hash_key) {
1949                 if (rss_conf->rss_key) {
1950                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1951                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1952                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1953                 }
1954
1955                 hash_types = vnic->hash_type;
1956                 rss_conf->rss_hf = 0;
1957                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1958                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1959                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1960                 }
1961                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1962                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1963                         hash_types &=
1964                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1965                 }
1966                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1967                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1968                         hash_types &=
1969                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1970                 }
1971                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1972                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1973                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1974                 }
1975                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1976                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1977                         hash_types &=
1978                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1979                 }
1980                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1981                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1982                         hash_types &=
1983                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1984                 }
1985
1986                 rss_conf->rss_hf |=
1987                         bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode);
1988
1989                 if (hash_types) {
1990                         PMD_DRV_LOG(ERR,
1991                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1992                                 vnic->hash_type);
1993                         return -ENOTSUP;
1994                 }
1995         } else {
1996                 rss_conf->rss_hf = 0;
1997         }
1998         return 0;
1999 }
2000
2001 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
2002                                struct rte_eth_fc_conf *fc_conf)
2003 {
2004         struct bnxt *bp = dev->data->dev_private;
2005         struct rte_eth_link link_info;
2006         int rc;
2007
2008         rc = is_bnxt_in_error(bp);
2009         if (rc)
2010                 return rc;
2011
2012         rc = bnxt_get_hwrm_link_config(bp, &link_info);
2013         if (rc)
2014                 return rc;
2015
2016         memset(fc_conf, 0, sizeof(*fc_conf));
2017         if (bp->link_info->auto_pause)
2018                 fc_conf->autoneg = 1;
2019         switch (bp->link_info->pause) {
2020         case 0:
2021                 fc_conf->mode = RTE_FC_NONE;
2022                 break;
2023         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
2024                 fc_conf->mode = RTE_FC_TX_PAUSE;
2025                 break;
2026         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
2027                 fc_conf->mode = RTE_FC_RX_PAUSE;
2028                 break;
2029         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
2030                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
2031                 fc_conf->mode = RTE_FC_FULL;
2032                 break;
2033         }
2034         return 0;
2035 }
2036
2037 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
2038                                struct rte_eth_fc_conf *fc_conf)
2039 {
2040         struct bnxt *bp = dev->data->dev_private;
2041         int rc;
2042
2043         rc = is_bnxt_in_error(bp);
2044         if (rc)
2045                 return rc;
2046
2047         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2048                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
2049                 return -ENOTSUP;
2050         }
2051
2052         switch (fc_conf->mode) {
2053         case RTE_FC_NONE:
2054                 bp->link_info->auto_pause = 0;
2055                 bp->link_info->force_pause = 0;
2056                 break;
2057         case RTE_FC_RX_PAUSE:
2058                 if (fc_conf->autoneg) {
2059                         bp->link_info->auto_pause =
2060                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2061                         bp->link_info->force_pause = 0;
2062                 } else {
2063                         bp->link_info->auto_pause = 0;
2064                         bp->link_info->force_pause =
2065                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2066                 }
2067                 break;
2068         case RTE_FC_TX_PAUSE:
2069                 if (fc_conf->autoneg) {
2070                         bp->link_info->auto_pause =
2071                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
2072                         bp->link_info->force_pause = 0;
2073                 } else {
2074                         bp->link_info->auto_pause = 0;
2075                         bp->link_info->force_pause =
2076                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
2077                 }
2078                 break;
2079         case RTE_FC_FULL:
2080                 if (fc_conf->autoneg) {
2081                         bp->link_info->auto_pause =
2082                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
2083                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2084                         bp->link_info->force_pause = 0;
2085                 } else {
2086                         bp->link_info->auto_pause = 0;
2087                         bp->link_info->force_pause =
2088                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
2089                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2090                 }
2091                 break;
2092         }
2093         return bnxt_set_hwrm_link_config(bp, true);
2094 }
2095
2096 /* Add UDP tunneling port */
2097 static int
2098 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
2099                          struct rte_eth_udp_tunnel *udp_tunnel)
2100 {
2101         struct bnxt *bp = eth_dev->data->dev_private;
2102         uint16_t tunnel_type = 0;
2103         int rc = 0;
2104
2105         rc = is_bnxt_in_error(bp);
2106         if (rc)
2107                 return rc;
2108
2109         switch (udp_tunnel->prot_type) {
2110         case RTE_TUNNEL_TYPE_VXLAN:
2111                 if (bp->vxlan_port_cnt) {
2112                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2113                                 udp_tunnel->udp_port);
2114                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2115                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2116                                 return -ENOSPC;
2117                         }
2118                         bp->vxlan_port_cnt++;
2119                         return 0;
2120                 }
2121                 tunnel_type =
2122                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2123                 bp->vxlan_port_cnt++;
2124                 break;
2125         case RTE_TUNNEL_TYPE_GENEVE:
2126                 if (bp->geneve_port_cnt) {
2127                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2128                                 udp_tunnel->udp_port);
2129                         if (bp->geneve_port != udp_tunnel->udp_port) {
2130                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2131                                 return -ENOSPC;
2132                         }
2133                         bp->geneve_port_cnt++;
2134                         return 0;
2135                 }
2136                 tunnel_type =
2137                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2138                 bp->geneve_port_cnt++;
2139                 break;
2140         default:
2141                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2142                 return -ENOTSUP;
2143         }
2144         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2145                                              tunnel_type);
2146         return rc;
2147 }
2148
2149 static int
2150 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2151                          struct rte_eth_udp_tunnel *udp_tunnel)
2152 {
2153         struct bnxt *bp = eth_dev->data->dev_private;
2154         uint16_t tunnel_type = 0;
2155         uint16_t port = 0;
2156         int rc = 0;
2157
2158         rc = is_bnxt_in_error(bp);
2159         if (rc)
2160                 return rc;
2161
2162         switch (udp_tunnel->prot_type) {
2163         case RTE_TUNNEL_TYPE_VXLAN:
2164                 if (!bp->vxlan_port_cnt) {
2165                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2166                         return -EINVAL;
2167                 }
2168                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2169                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2170                                 udp_tunnel->udp_port, bp->vxlan_port);
2171                         return -EINVAL;
2172                 }
2173                 if (--bp->vxlan_port_cnt)
2174                         return 0;
2175
2176                 tunnel_type =
2177                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2178                 port = bp->vxlan_fw_dst_port_id;
2179                 break;
2180         case RTE_TUNNEL_TYPE_GENEVE:
2181                 if (!bp->geneve_port_cnt) {
2182                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2183                         return -EINVAL;
2184                 }
2185                 if (bp->geneve_port != udp_tunnel->udp_port) {
2186                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2187                                 udp_tunnel->udp_port, bp->geneve_port);
2188                         return -EINVAL;
2189                 }
2190                 if (--bp->geneve_port_cnt)
2191                         return 0;
2192
2193                 tunnel_type =
2194                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2195                 port = bp->geneve_fw_dst_port_id;
2196                 break;
2197         default:
2198                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2199                 return -ENOTSUP;
2200         }
2201
2202         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2203         return rc;
2204 }
2205
2206 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2207 {
2208         struct bnxt_filter_info *filter;
2209         struct bnxt_vnic_info *vnic;
2210         int rc = 0;
2211         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2212
2213         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2214         filter = STAILQ_FIRST(&vnic->filter);
2215         while (filter) {
2216                 /* Search for this matching MAC+VLAN filter */
2217                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2218                         /* Delete the filter */
2219                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2220                         if (rc)
2221                                 return rc;
2222                         STAILQ_REMOVE(&vnic->filter, filter,
2223                                       bnxt_filter_info, next);
2224                         bnxt_free_filter(bp, filter);
2225                         PMD_DRV_LOG(INFO,
2226                                     "Deleted vlan filter for %d\n",
2227                                     vlan_id);
2228                         return 0;
2229                 }
2230                 filter = STAILQ_NEXT(filter, next);
2231         }
2232         return -ENOENT;
2233 }
2234
2235 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2236 {
2237         struct bnxt_filter_info *filter;
2238         struct bnxt_vnic_info *vnic;
2239         int rc = 0;
2240         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2241                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2242         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2243
2244         /* Implementation notes on the use of VNIC in this command:
2245          *
2246          * By default, these filters belong to default vnic for the function.
2247          * Once these filters are set up, only destination VNIC can be modified.
2248          * If the destination VNIC is not specified in this command,
2249          * then the HWRM shall only create an l2 context id.
2250          */
2251
2252         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2253         filter = STAILQ_FIRST(&vnic->filter);
2254         /* Check if the VLAN has already been added */
2255         while (filter) {
2256                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2257                         return -EEXIST;
2258
2259                 filter = STAILQ_NEXT(filter, next);
2260         }
2261
2262         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2263          * command to create MAC+VLAN filter with the right flags, enables set.
2264          */
2265         filter = bnxt_alloc_filter(bp);
2266         if (!filter) {
2267                 PMD_DRV_LOG(ERR,
2268                             "MAC/VLAN filter alloc failed\n");
2269                 return -ENOMEM;
2270         }
2271         /* MAC + VLAN ID filter */
2272         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2273          * untagged packets are received
2274          *
2275          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2276          * packets and only the programmed vlan's packets are received
2277          */
2278         filter->l2_ivlan = vlan_id;
2279         filter->l2_ivlan_mask = 0x0FFF;
2280         filter->enables |= en;
2281         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2282
2283         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2284         if (rc) {
2285                 /* Free the newly allocated filter as we were
2286                  * not able to create the filter in hardware.
2287                  */
2288                 bnxt_free_filter(bp, filter);
2289                 return rc;
2290         }
2291
2292         filter->mac_index = 0;
2293         /* Add this new filter to the list */
2294         if (vlan_id == 0)
2295                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2296         else
2297                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2298
2299         PMD_DRV_LOG(INFO,
2300                     "Added Vlan filter for %d\n", vlan_id);
2301         return rc;
2302 }
2303
2304 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2305                 uint16_t vlan_id, int on)
2306 {
2307         struct bnxt *bp = eth_dev->data->dev_private;
2308         int rc;
2309
2310         rc = is_bnxt_in_error(bp);
2311         if (rc)
2312                 return rc;
2313
2314         if (!eth_dev->data->dev_started) {
2315                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2316                 return -EINVAL;
2317         }
2318
2319         /* These operations apply to ALL existing MAC/VLAN filters */
2320         if (on)
2321                 return bnxt_add_vlan_filter(bp, vlan_id);
2322         else
2323                 return bnxt_del_vlan_filter(bp, vlan_id);
2324 }
2325
2326 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2327                                     struct bnxt_vnic_info *vnic)
2328 {
2329         struct bnxt_filter_info *filter;
2330         int rc;
2331
2332         filter = STAILQ_FIRST(&vnic->filter);
2333         while (filter) {
2334                 if (filter->mac_index == 0 &&
2335                     !memcmp(filter->l2_addr, bp->mac_addr,
2336                             RTE_ETHER_ADDR_LEN)) {
2337                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2338                         if (!rc) {
2339                                 STAILQ_REMOVE(&vnic->filter, filter,
2340                                               bnxt_filter_info, next);
2341                                 bnxt_free_filter(bp, filter);
2342                         }
2343                         return rc;
2344                 }
2345                 filter = STAILQ_NEXT(filter, next);
2346         }
2347         return 0;
2348 }
2349
2350 static int
2351 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2352 {
2353         struct bnxt_vnic_info *vnic;
2354         unsigned int i;
2355         int rc;
2356
2357         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2358         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2359                 /* Remove any VLAN filters programmed */
2360                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2361                         bnxt_del_vlan_filter(bp, i);
2362
2363                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2364                 if (rc)
2365                         return rc;
2366         } else {
2367                 /* Default filter will allow packets that match the
2368                  * dest mac. So, it has to be deleted, otherwise, we
2369                  * will endup receiving vlan packets for which the
2370                  * filter is not programmed, when hw-vlan-filter
2371                  * configuration is ON
2372                  */
2373                 bnxt_del_dflt_mac_filter(bp, vnic);
2374                 /* This filter will allow only untagged packets */
2375                 bnxt_add_vlan_filter(bp, 0);
2376         }
2377         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2378                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2379
2380         return 0;
2381 }
2382
2383 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2384 {
2385         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2386         unsigned int i;
2387         int rc;
2388
2389         /* Destroy vnic filters and vnic */
2390         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2391             DEV_RX_OFFLOAD_VLAN_FILTER) {
2392                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2393                         bnxt_del_vlan_filter(bp, i);
2394         }
2395         bnxt_del_dflt_mac_filter(bp, vnic);
2396
2397         rc = bnxt_hwrm_vnic_free(bp, vnic);
2398         if (rc)
2399                 return rc;
2400
2401         rte_free(vnic->fw_grp_ids);
2402         vnic->fw_grp_ids = NULL;
2403
2404         vnic->rx_queue_cnt = 0;
2405
2406         return 0;
2407 }
2408
2409 static int
2410 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2411 {
2412         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2413         int rc;
2414
2415         /* Destroy, recreate and reconfigure the default vnic */
2416         rc = bnxt_free_one_vnic(bp, 0);
2417         if (rc)
2418                 return rc;
2419
2420         /* default vnic 0 */
2421         rc = bnxt_setup_one_vnic(bp, 0);
2422         if (rc)
2423                 return rc;
2424
2425         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2426             DEV_RX_OFFLOAD_VLAN_FILTER) {
2427                 rc = bnxt_add_vlan_filter(bp, 0);
2428                 if (rc)
2429                         return rc;
2430                 rc = bnxt_restore_vlan_filters(bp);
2431                 if (rc)
2432                         return rc;
2433         } else {
2434                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2435                 if (rc)
2436                         return rc;
2437         }
2438
2439         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2440         if (rc)
2441                 return rc;
2442
2443         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2444                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2445
2446         return rc;
2447 }
2448
2449 static int
2450 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2451 {
2452         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2453         struct bnxt *bp = dev->data->dev_private;
2454         int rc;
2455
2456         rc = is_bnxt_in_error(bp);
2457         if (rc)
2458                 return rc;
2459
2460         /* Filter settings will get applied when port is started */
2461         if (!dev->data->dev_started)
2462                 return 0;
2463
2464         if (mask & ETH_VLAN_FILTER_MASK) {
2465                 /* Enable or disable VLAN filtering */
2466                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2467                 if (rc)
2468                         return rc;
2469         }
2470
2471         if (mask & ETH_VLAN_STRIP_MASK) {
2472                 /* Enable or disable VLAN stripping */
2473                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2474                 if (rc)
2475                         return rc;
2476         }
2477
2478         if (mask & ETH_VLAN_EXTEND_MASK) {
2479                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2480                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2481                 else
2482                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2483         }
2484
2485         return 0;
2486 }
2487
2488 static int
2489 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2490                       uint16_t tpid)
2491 {
2492         struct bnxt *bp = dev->data->dev_private;
2493         int qinq = dev->data->dev_conf.rxmode.offloads &
2494                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2495
2496         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2497             vlan_type != ETH_VLAN_TYPE_OUTER) {
2498                 PMD_DRV_LOG(ERR,
2499                             "Unsupported vlan type.");
2500                 return -EINVAL;
2501         }
2502         if (!qinq) {
2503                 PMD_DRV_LOG(ERR,
2504                             "QinQ not enabled. Needs to be ON as we can "
2505                             "accelerate only outer vlan\n");
2506                 return -EINVAL;
2507         }
2508
2509         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2510                 switch (tpid) {
2511                 case RTE_ETHER_TYPE_QINQ:
2512                         bp->outer_tpid_bd =
2513                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2514                                 break;
2515                 case RTE_ETHER_TYPE_VLAN:
2516                         bp->outer_tpid_bd =
2517                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2518                                 break;
2519                 case RTE_ETHER_TYPE_QINQ1:
2520                         bp->outer_tpid_bd =
2521                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2522                                 break;
2523                 case RTE_ETHER_TYPE_QINQ2:
2524                         bp->outer_tpid_bd =
2525                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2526                                 break;
2527                 case RTE_ETHER_TYPE_QINQ3:
2528                         bp->outer_tpid_bd =
2529                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2530                                 break;
2531                 default:
2532                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2533                         return -EINVAL;
2534                 }
2535                 bp->outer_tpid_bd |= tpid;
2536                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2537         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2538                 PMD_DRV_LOG(ERR,
2539                             "Can accelerate only outer vlan in QinQ\n");
2540                 return -EINVAL;
2541         }
2542
2543         return 0;
2544 }
2545
2546 static int
2547 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2548                              struct rte_ether_addr *addr)
2549 {
2550         struct bnxt *bp = dev->data->dev_private;
2551         /* Default Filter is tied to VNIC 0 */
2552         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2553         int rc;
2554
2555         rc = is_bnxt_in_error(bp);
2556         if (rc)
2557                 return rc;
2558
2559         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2560                 return -EPERM;
2561
2562         if (rte_is_zero_ether_addr(addr))
2563                 return -EINVAL;
2564
2565         /* Filter settings will get applied when port is started */
2566         if (!dev->data->dev_started)
2567                 return 0;
2568
2569         /* Check if the requested MAC is already added */
2570         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2571                 return 0;
2572
2573         /* Destroy filter and re-create it */
2574         bnxt_del_dflt_mac_filter(bp, vnic);
2575
2576         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2577         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2578                 /* This filter will allow only untagged packets */
2579                 rc = bnxt_add_vlan_filter(bp, 0);
2580         } else {
2581                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2582         }
2583
2584         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2585         return rc;
2586 }
2587
2588 static int
2589 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2590                           struct rte_ether_addr *mc_addr_set,
2591                           uint32_t nb_mc_addr)
2592 {
2593         struct bnxt *bp = eth_dev->data->dev_private;
2594         char *mc_addr_list = (char *)mc_addr_set;
2595         struct bnxt_vnic_info *vnic;
2596         uint32_t off = 0, i = 0;
2597         int rc;
2598
2599         rc = is_bnxt_in_error(bp);
2600         if (rc)
2601                 return rc;
2602
2603         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2604
2605         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2606                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2607                 goto allmulti;
2608         }
2609
2610         /* TODO Check for Duplicate mcast addresses */
2611         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2612         for (i = 0; i < nb_mc_addr; i++) {
2613                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2614                         RTE_ETHER_ADDR_LEN);
2615                 off += RTE_ETHER_ADDR_LEN;
2616         }
2617
2618         vnic->mc_addr_cnt = i;
2619         if (vnic->mc_addr_cnt)
2620                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2621         else
2622                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2623
2624 allmulti:
2625         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2626 }
2627
2628 static int
2629 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2630 {
2631         struct bnxt *bp = dev->data->dev_private;
2632         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2633         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2634         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2635         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2636         int ret;
2637
2638         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2639                         fw_major, fw_minor, fw_updt, fw_rsvd);
2640
2641         ret += 1; /* add the size of '\0' */
2642         if (fw_size < (uint32_t)ret)
2643                 return ret;
2644         else
2645                 return 0;
2646 }
2647
2648 static void
2649 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2650         struct rte_eth_rxq_info *qinfo)
2651 {
2652         struct bnxt *bp = dev->data->dev_private;
2653         struct bnxt_rx_queue *rxq;
2654
2655         if (is_bnxt_in_error(bp))
2656                 return;
2657
2658         rxq = dev->data->rx_queues[queue_id];
2659
2660         qinfo->mp = rxq->mb_pool;
2661         qinfo->scattered_rx = dev->data->scattered_rx;
2662         qinfo->nb_desc = rxq->nb_rx_desc;
2663
2664         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2665         qinfo->conf.rx_drop_en = rxq->drop_en;
2666         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2667         qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
2668 }
2669
2670 static void
2671 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2672         struct rte_eth_txq_info *qinfo)
2673 {
2674         struct bnxt *bp = dev->data->dev_private;
2675         struct bnxt_tx_queue *txq;
2676
2677         if (is_bnxt_in_error(bp))
2678                 return;
2679
2680         txq = dev->data->tx_queues[queue_id];
2681
2682         qinfo->nb_desc = txq->nb_tx_desc;
2683
2684         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2685         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2686         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2687
2688         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2689         qinfo->conf.tx_rs_thresh = 0;
2690         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2691         qinfo->conf.offloads = txq->offloads;
2692 }
2693
2694 static const struct {
2695         eth_rx_burst_t pkt_burst;
2696         const char *info;
2697 } bnxt_rx_burst_info[] = {
2698         {bnxt_recv_pkts,        "Scalar"},
2699 #if defined(RTE_ARCH_X86)
2700         {bnxt_recv_pkts_vec,    "Vector SSE"},
2701 #elif defined(RTE_ARCH_ARM64)
2702         {bnxt_recv_pkts_vec,    "Vector Neon"},
2703 #endif
2704 };
2705
2706 static int
2707 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2708                        struct rte_eth_burst_mode *mode)
2709 {
2710         eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2711         size_t i;
2712
2713         for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) {
2714                 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) {
2715                         snprintf(mode->info, sizeof(mode->info), "%s",
2716                                  bnxt_rx_burst_info[i].info);
2717                         return 0;
2718                 }
2719         }
2720
2721         return -EINVAL;
2722 }
2723
2724 static const struct {
2725         eth_tx_burst_t pkt_burst;
2726         const char *info;
2727 } bnxt_tx_burst_info[] = {
2728         {bnxt_xmit_pkts,        "Scalar"},
2729 #if defined(RTE_ARCH_X86)
2730         {bnxt_xmit_pkts_vec,    "Vector SSE"},
2731 #elif defined(RTE_ARCH_ARM64)
2732         {bnxt_xmit_pkts_vec,    "Vector Neon"},
2733 #endif
2734 };
2735
2736 static int
2737 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2738                        struct rte_eth_burst_mode *mode)
2739 {
2740         eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
2741         size_t i;
2742
2743         for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) {
2744                 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) {
2745                         snprintf(mode->info, sizeof(mode->info), "%s",
2746                                  bnxt_tx_burst_info[i].info);
2747                         return 0;
2748                 }
2749         }
2750
2751         return -EINVAL;
2752 }
2753
2754 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2755 {
2756         struct bnxt *bp = eth_dev->data->dev_private;
2757         uint32_t new_pkt_size;
2758         uint32_t rc = 0;
2759         uint32_t i;
2760
2761         rc = is_bnxt_in_error(bp);
2762         if (rc)
2763                 return rc;
2764
2765         /* Exit if receive queues are not configured yet */
2766         if (!eth_dev->data->nb_rx_queues)
2767                 return rc;
2768
2769         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2770                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2771
2772         /*
2773          * Disallow any MTU change that would require scattered receive support
2774          * if it is not already enabled.
2775          */
2776         if (eth_dev->data->dev_started &&
2777             !eth_dev->data->scattered_rx &&
2778             (new_pkt_size >
2779              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2780                 PMD_DRV_LOG(ERR,
2781                             "MTU change would require scattered rx support. ");
2782                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2783                 return -EINVAL;
2784         }
2785
2786         if (new_mtu > RTE_ETHER_MTU) {
2787                 bp->flags |= BNXT_FLAG_JUMBO;
2788                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2789                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2790         } else {
2791                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2792                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2793                 bp->flags &= ~BNXT_FLAG_JUMBO;
2794         }
2795
2796         /* Is there a change in mtu setting? */
2797         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2798                 return rc;
2799
2800         for (i = 0; i < bp->nr_vnics; i++) {
2801                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2802                 uint16_t size = 0;
2803
2804                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2805                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2806                 if (rc)
2807                         break;
2808
2809                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2810                 size -= RTE_PKTMBUF_HEADROOM;
2811
2812                 if (size < new_mtu) {
2813                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2814                         if (rc)
2815                                 return rc;
2816                 }
2817         }
2818
2819         if (!rc)
2820                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2821
2822         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2823
2824         return rc;
2825 }
2826
2827 static int
2828 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2829 {
2830         struct bnxt *bp = dev->data->dev_private;
2831         uint16_t vlan = bp->vlan;
2832         int rc;
2833
2834         rc = is_bnxt_in_error(bp);
2835         if (rc)
2836                 return rc;
2837
2838         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2839                 PMD_DRV_LOG(ERR,
2840                         "PVID cannot be modified for this function\n");
2841                 return -ENOTSUP;
2842         }
2843         bp->vlan = on ? pvid : 0;
2844
2845         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2846         if (rc)
2847                 bp->vlan = vlan;
2848         return rc;
2849 }
2850
2851 static int
2852 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2853 {
2854         struct bnxt *bp = dev->data->dev_private;
2855         int rc;
2856
2857         rc = is_bnxt_in_error(bp);
2858         if (rc)
2859                 return rc;
2860
2861         return bnxt_hwrm_port_led_cfg(bp, true);
2862 }
2863
2864 static int
2865 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2866 {
2867         struct bnxt *bp = dev->data->dev_private;
2868         int rc;
2869
2870         rc = is_bnxt_in_error(bp);
2871         if (rc)
2872                 return rc;
2873
2874         return bnxt_hwrm_port_led_cfg(bp, false);
2875 }
2876
2877 static uint32_t
2878 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2879 {
2880         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2881         uint32_t desc = 0, raw_cons = 0, cons;
2882         struct bnxt_cp_ring_info *cpr;
2883         struct bnxt_rx_queue *rxq;
2884         struct rx_pkt_cmpl *rxcmp;
2885         int rc;
2886
2887         rc = is_bnxt_in_error(bp);
2888         if (rc)
2889                 return rc;
2890
2891         rxq = dev->data->rx_queues[rx_queue_id];
2892         cpr = rxq->cp_ring;
2893         raw_cons = cpr->cp_raw_cons;
2894
2895         while (1) {
2896                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2897                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2898                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2899
2900                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2901                         break;
2902                 } else {
2903                         raw_cons++;
2904                         desc++;
2905                 }
2906         }
2907
2908         return desc;
2909 }
2910
2911 static int
2912 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2913 {
2914         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2915         struct bnxt_rx_ring_info *rxr;
2916         struct bnxt_cp_ring_info *cpr;
2917         struct rte_mbuf *rx_buf;
2918         struct rx_pkt_cmpl *rxcmp;
2919         uint32_t cons, cp_cons;
2920         int rc;
2921
2922         if (!rxq)
2923                 return -EINVAL;
2924
2925         rc = is_bnxt_in_error(rxq->bp);
2926         if (rc)
2927                 return rc;
2928
2929         cpr = rxq->cp_ring;
2930         rxr = rxq->rx_ring;
2931
2932         if (offset >= rxq->nb_rx_desc)
2933                 return -EINVAL;
2934
2935         cons = RING_CMP(cpr->cp_ring_struct, offset);
2936         cp_cons = cpr->cp_raw_cons;
2937         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2938
2939         if (cons > cp_cons) {
2940                 if (CMPL_VALID(rxcmp, cpr->valid))
2941                         return RTE_ETH_RX_DESC_DONE;
2942         } else {
2943                 if (CMPL_VALID(rxcmp, !cpr->valid))
2944                         return RTE_ETH_RX_DESC_DONE;
2945         }
2946         rx_buf = rxr->rx_buf_ring[cons];
2947         if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf)
2948                 return RTE_ETH_RX_DESC_UNAVAIL;
2949
2950
2951         return RTE_ETH_RX_DESC_AVAIL;
2952 }
2953
2954 static int
2955 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2956 {
2957         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2958         struct bnxt_tx_ring_info *txr;
2959         struct bnxt_cp_ring_info *cpr;
2960         struct bnxt_sw_tx_bd *tx_buf;
2961         struct tx_pkt_cmpl *txcmp;
2962         uint32_t cons, cp_cons;
2963         int rc;
2964
2965         if (!txq)
2966                 return -EINVAL;
2967
2968         rc = is_bnxt_in_error(txq->bp);
2969         if (rc)
2970                 return rc;
2971
2972         cpr = txq->cp_ring;
2973         txr = txq->tx_ring;
2974
2975         if (offset >= txq->nb_tx_desc)
2976                 return -EINVAL;
2977
2978         cons = RING_CMP(cpr->cp_ring_struct, offset);
2979         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2980         cp_cons = cpr->cp_raw_cons;
2981
2982         if (cons > cp_cons) {
2983                 if (CMPL_VALID(txcmp, cpr->valid))
2984                         return RTE_ETH_TX_DESC_UNAVAIL;
2985         } else {
2986                 if (CMPL_VALID(txcmp, !cpr->valid))
2987                         return RTE_ETH_TX_DESC_UNAVAIL;
2988         }
2989         tx_buf = &txr->tx_buf_ring[cons];
2990         if (tx_buf->mbuf == NULL)
2991                 return RTE_ETH_TX_DESC_DONE;
2992
2993         return RTE_ETH_TX_DESC_FULL;
2994 }
2995
2996 int
2997 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
2998                     enum rte_filter_type filter_type,
2999                     enum rte_filter_op filter_op, void *arg)
3000 {
3001         struct bnxt *bp = dev->data->dev_private;
3002         int ret = 0;
3003
3004         if (!bp)
3005                 return -EIO;
3006
3007         if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
3008                 struct bnxt_representor *vfr = dev->data->dev_private;
3009                 bp = vfr->parent_dev->data->dev_private;
3010                 /* parent is deleted while children are still valid */
3011                 if (!bp) {
3012                         PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
3013                                     dev->data->port_id,
3014                                     filter_type,
3015                                     filter_op);
3016                         return -EIO;
3017                 }
3018         }
3019
3020         ret = is_bnxt_in_error(bp);
3021         if (ret)
3022                 return ret;
3023
3024         switch (filter_type) {
3025         case RTE_ETH_FILTER_GENERIC:
3026                 if (filter_op != RTE_ETH_FILTER_GET)
3027                         return -EINVAL;
3028
3029                 /* PMD supports thread-safe flow operations.  rte_flow API
3030                  * functions can avoid mutex for multi-thread safety.
3031                  */
3032                 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
3033
3034                 if (BNXT_TRUFLOW_EN(bp))
3035                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3036                 else
3037                         *(const void **)arg = &bnxt_flow_ops;
3038                 break;
3039         default:
3040                 PMD_DRV_LOG(ERR,
3041                         "Filter type (%d) not supported", filter_type);
3042                 ret = -EINVAL;
3043                 break;
3044         }
3045         return ret;
3046 }
3047
3048 static const uint32_t *
3049 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3050 {
3051         static const uint32_t ptypes[] = {
3052                 RTE_PTYPE_L2_ETHER_VLAN,
3053                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3054                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3055                 RTE_PTYPE_L4_ICMP,
3056                 RTE_PTYPE_L4_TCP,
3057                 RTE_PTYPE_L4_UDP,
3058                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3059                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3060                 RTE_PTYPE_INNER_L4_ICMP,
3061                 RTE_PTYPE_INNER_L4_TCP,
3062                 RTE_PTYPE_INNER_L4_UDP,
3063                 RTE_PTYPE_UNKNOWN
3064         };
3065
3066         if (!dev->rx_pkt_burst)
3067                 return NULL;
3068
3069         return ptypes;
3070 }
3071
3072 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3073                          int reg_win)
3074 {
3075         uint32_t reg_base = *reg_arr & 0xfffff000;
3076         uint32_t win_off;
3077         int i;
3078
3079         for (i = 0; i < count; i++) {
3080                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3081                         return -ERANGE;
3082         }
3083         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3084         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3085         return 0;
3086 }
3087
3088 static int bnxt_map_ptp_regs(struct bnxt *bp)
3089 {
3090         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3091         uint32_t *reg_arr;
3092         int rc, i;
3093
3094         reg_arr = ptp->rx_regs;
3095         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3096         if (rc)
3097                 return rc;
3098
3099         reg_arr = ptp->tx_regs;
3100         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3101         if (rc)
3102                 return rc;
3103
3104         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3105                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3106
3107         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3108                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3109
3110         return 0;
3111 }
3112
3113 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3114 {
3115         rte_write32(0, (uint8_t *)bp->bar0 +
3116                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3117         rte_write32(0, (uint8_t *)bp->bar0 +
3118                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3119 }
3120
3121 static uint64_t bnxt_cc_read(struct bnxt *bp)
3122 {
3123         uint64_t ns;
3124
3125         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3126                               BNXT_GRCPF_REG_SYNC_TIME));
3127         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3128                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3129         return ns;
3130 }
3131
3132 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3133 {
3134         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3135         uint32_t fifo;
3136
3137         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3138                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3139         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3140                 return -EAGAIN;
3141
3142         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3143                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3144         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3145                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3146         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3147                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3148
3149         return 0;
3150 }
3151
3152 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3153 {
3154         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3155         struct bnxt_pf_info *pf = bp->pf;
3156         uint16_t port_id;
3157         uint32_t fifo;
3158
3159         if (!ptp)
3160                 return -ENODEV;
3161
3162         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3163                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3164         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3165                 return -EAGAIN;
3166
3167         port_id = pf->port_id;
3168         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3169                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3170
3171         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3172                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3173         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3174 /*              bnxt_clr_rx_ts(bp);       TBD  */
3175                 return -EBUSY;
3176         }
3177
3178         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3179                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3180         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3181                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3182
3183         return 0;
3184 }
3185
3186 static int
3187 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3188 {
3189         uint64_t ns;
3190         struct bnxt *bp = dev->data->dev_private;
3191         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3192
3193         if (!ptp)
3194                 return 0;
3195
3196         ns = rte_timespec_to_ns(ts);
3197         /* Set the timecounters to a new value. */
3198         ptp->tc.nsec = ns;
3199
3200         return 0;
3201 }
3202
3203 static int
3204 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3205 {
3206         struct bnxt *bp = dev->data->dev_private;
3207         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3208         uint64_t ns, systime_cycles = 0;
3209         int rc = 0;
3210
3211         if (!ptp)
3212                 return 0;
3213
3214         if (BNXT_CHIP_THOR(bp))
3215                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3216                                              &systime_cycles);
3217         else
3218                 systime_cycles = bnxt_cc_read(bp);
3219
3220         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3221         *ts = rte_ns_to_timespec(ns);
3222
3223         return rc;
3224 }
3225 static int
3226 bnxt_timesync_enable(struct rte_eth_dev *dev)
3227 {
3228         struct bnxt *bp = dev->data->dev_private;
3229         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3230         uint32_t shift = 0;
3231         int rc;
3232
3233         if (!ptp)
3234                 return 0;
3235
3236         ptp->rx_filter = 1;
3237         ptp->tx_tstamp_en = 1;
3238         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3239
3240         rc = bnxt_hwrm_ptp_cfg(bp);
3241         if (rc)
3242                 return rc;
3243
3244         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3245         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3246         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3247
3248         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3249         ptp->tc.cc_shift = shift;
3250         ptp->tc.nsec_mask = (1ULL << shift) - 1;
3251
3252         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3253         ptp->rx_tstamp_tc.cc_shift = shift;
3254         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3255
3256         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3257         ptp->tx_tstamp_tc.cc_shift = shift;
3258         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3259
3260         if (!BNXT_CHIP_THOR(bp))
3261                 bnxt_map_ptp_regs(bp);
3262
3263         return 0;
3264 }
3265
3266 static int
3267 bnxt_timesync_disable(struct rte_eth_dev *dev)
3268 {
3269         struct bnxt *bp = dev->data->dev_private;
3270         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3271
3272         if (!ptp)
3273                 return 0;
3274
3275         ptp->rx_filter = 0;
3276         ptp->tx_tstamp_en = 0;
3277         ptp->rxctl = 0;
3278
3279         bnxt_hwrm_ptp_cfg(bp);
3280
3281         if (!BNXT_CHIP_THOR(bp))
3282                 bnxt_unmap_ptp_regs(bp);
3283
3284         return 0;
3285 }
3286
3287 static int
3288 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3289                                  struct timespec *timestamp,
3290                                  uint32_t flags __rte_unused)
3291 {
3292         struct bnxt *bp = dev->data->dev_private;
3293         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3294         uint64_t rx_tstamp_cycles = 0;
3295         uint64_t ns;
3296
3297         if (!ptp)
3298                 return 0;
3299
3300         if (BNXT_CHIP_THOR(bp))
3301                 rx_tstamp_cycles = ptp->rx_timestamp;
3302         else
3303                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3304
3305         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3306         *timestamp = rte_ns_to_timespec(ns);
3307         return  0;
3308 }
3309
3310 static int
3311 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3312                                  struct timespec *timestamp)
3313 {
3314         struct bnxt *bp = dev->data->dev_private;
3315         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3316         uint64_t tx_tstamp_cycles = 0;
3317         uint64_t ns;
3318         int rc = 0;
3319
3320         if (!ptp)
3321                 return 0;
3322
3323         if (BNXT_CHIP_THOR(bp))
3324                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3325                                              &tx_tstamp_cycles);
3326         else
3327                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3328
3329         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3330         *timestamp = rte_ns_to_timespec(ns);
3331
3332         return rc;
3333 }
3334
3335 static int
3336 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3337 {
3338         struct bnxt *bp = dev->data->dev_private;
3339         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3340
3341         if (!ptp)
3342                 return 0;
3343
3344         ptp->tc.nsec += delta;
3345
3346         return 0;
3347 }
3348
3349 static int
3350 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3351 {
3352         struct bnxt *bp = dev->data->dev_private;
3353         int rc;
3354         uint32_t dir_entries;
3355         uint32_t entry_length;
3356
3357         rc = is_bnxt_in_error(bp);
3358         if (rc)
3359                 return rc;
3360
3361         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3362                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3363                     bp->pdev->addr.devid, bp->pdev->addr.function);
3364
3365         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3366         if (rc != 0)
3367                 return rc;
3368
3369         return dir_entries * entry_length;
3370 }
3371
3372 static int
3373 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3374                 struct rte_dev_eeprom_info *in_eeprom)
3375 {
3376         struct bnxt *bp = dev->data->dev_private;
3377         uint32_t index;
3378         uint32_t offset;
3379         int rc;
3380
3381         rc = is_bnxt_in_error(bp);
3382         if (rc)
3383                 return rc;
3384
3385         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3386                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3387                     bp->pdev->addr.devid, bp->pdev->addr.function,
3388                     in_eeprom->offset, in_eeprom->length);
3389
3390         if (in_eeprom->offset == 0) /* special offset value to get directory */
3391                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
3392                                                 in_eeprom->data);
3393
3394         index = in_eeprom->offset >> 24;
3395         offset = in_eeprom->offset & 0xffffff;
3396
3397         if (index != 0)
3398                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
3399                                            in_eeprom->length, in_eeprom->data);
3400
3401         return 0;
3402 }
3403
3404 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
3405 {
3406         switch (dir_type) {
3407         case BNX_DIR_TYPE_CHIMP_PATCH:
3408         case BNX_DIR_TYPE_BOOTCODE:
3409         case BNX_DIR_TYPE_BOOTCODE_2:
3410         case BNX_DIR_TYPE_APE_FW:
3411         case BNX_DIR_TYPE_APE_PATCH:
3412         case BNX_DIR_TYPE_KONG_FW:
3413         case BNX_DIR_TYPE_KONG_PATCH:
3414         case BNX_DIR_TYPE_BONO_FW:
3415         case BNX_DIR_TYPE_BONO_PATCH:
3416                 /* FALLTHROUGH */
3417                 return true;
3418         }
3419
3420         return false;
3421 }
3422
3423 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
3424 {
3425         switch (dir_type) {
3426         case BNX_DIR_TYPE_AVS:
3427         case BNX_DIR_TYPE_EXP_ROM_MBA:
3428         case BNX_DIR_TYPE_PCIE:
3429         case BNX_DIR_TYPE_TSCF_UCODE:
3430         case BNX_DIR_TYPE_EXT_PHY:
3431         case BNX_DIR_TYPE_CCM:
3432         case BNX_DIR_TYPE_ISCSI_BOOT:
3433         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3434         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3435                 /* FALLTHROUGH */
3436                 return true;
3437         }
3438
3439         return false;
3440 }
3441
3442 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
3443 {
3444         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3445                 bnxt_dir_type_is_other_exec_format(dir_type);
3446 }
3447
3448 static int
3449 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
3450                 struct rte_dev_eeprom_info *in_eeprom)
3451 {
3452         struct bnxt *bp = dev->data->dev_private;
3453         uint8_t index, dir_op;
3454         uint16_t type, ext, ordinal, attr;
3455         int rc;
3456
3457         rc = is_bnxt_in_error(bp);
3458         if (rc)
3459                 return rc;
3460
3461         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3462                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3463                     bp->pdev->addr.devid, bp->pdev->addr.function,
3464                     in_eeprom->offset, in_eeprom->length);
3465
3466         if (!BNXT_PF(bp)) {
3467                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
3468                 return -EINVAL;
3469         }
3470
3471         type = in_eeprom->magic >> 16;
3472
3473         if (type == 0xffff) { /* special value for directory operations */
3474                 index = in_eeprom->magic & 0xff;
3475                 dir_op = in_eeprom->magic >> 8;
3476                 if (index == 0)
3477                         return -EINVAL;
3478                 switch (dir_op) {
3479                 case 0x0e: /* erase */
3480                         if (in_eeprom->offset != ~in_eeprom->magic)
3481                                 return -EINVAL;
3482                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
3483                 default:
3484                         return -EINVAL;
3485                 }
3486         }
3487
3488         /* Create or re-write an NVM item: */
3489         if (bnxt_dir_type_is_executable(type) == true)
3490                 return -EOPNOTSUPP;
3491         ext = in_eeprom->magic & 0xffff;
3492         ordinal = in_eeprom->offset >> 16;
3493         attr = in_eeprom->offset & 0xffff;
3494
3495         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
3496                                      in_eeprom->data, in_eeprom->length);
3497 }
3498
3499 /*
3500  * Initialization
3501  */
3502
3503 static const struct eth_dev_ops bnxt_dev_ops = {
3504         .dev_infos_get = bnxt_dev_info_get_op,
3505         .dev_close = bnxt_dev_close_op,
3506         .dev_configure = bnxt_dev_configure_op,
3507         .dev_start = bnxt_dev_start_op,
3508         .dev_stop = bnxt_dev_stop_op,
3509         .dev_set_link_up = bnxt_dev_set_link_up_op,
3510         .dev_set_link_down = bnxt_dev_set_link_down_op,
3511         .stats_get = bnxt_stats_get_op,
3512         .stats_reset = bnxt_stats_reset_op,
3513         .rx_queue_setup = bnxt_rx_queue_setup_op,
3514         .rx_queue_release = bnxt_rx_queue_release_op,
3515         .tx_queue_setup = bnxt_tx_queue_setup_op,
3516         .tx_queue_release = bnxt_tx_queue_release_op,
3517         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3518         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3519         .reta_update = bnxt_reta_update_op,
3520         .reta_query = bnxt_reta_query_op,
3521         .rss_hash_update = bnxt_rss_hash_update_op,
3522         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3523         .link_update = bnxt_link_update_op,
3524         .promiscuous_enable = bnxt_promiscuous_enable_op,
3525         .promiscuous_disable = bnxt_promiscuous_disable_op,
3526         .allmulticast_enable = bnxt_allmulticast_enable_op,
3527         .allmulticast_disable = bnxt_allmulticast_disable_op,
3528         .mac_addr_add = bnxt_mac_addr_add_op,
3529         .mac_addr_remove = bnxt_mac_addr_remove_op,
3530         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
3531         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
3532         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3533         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3534         .vlan_filter_set = bnxt_vlan_filter_set_op,
3535         .vlan_offload_set = bnxt_vlan_offload_set_op,
3536         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
3537         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
3538         .mtu_set = bnxt_mtu_set_op,
3539         .mac_addr_set = bnxt_set_default_mac_addr_op,
3540         .xstats_get = bnxt_dev_xstats_get_op,
3541         .xstats_get_names = bnxt_dev_xstats_get_names_op,
3542         .xstats_reset = bnxt_dev_xstats_reset_op,
3543         .fw_version_get = bnxt_fw_version_get,
3544         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3545         .rxq_info_get = bnxt_rxq_info_get_op,
3546         .txq_info_get = bnxt_txq_info_get_op,
3547         .rx_burst_mode_get = bnxt_rx_burst_mode_get,
3548         .tx_burst_mode_get = bnxt_tx_burst_mode_get,
3549         .dev_led_on = bnxt_dev_led_on_op,
3550         .dev_led_off = bnxt_dev_led_off_op,
3551         .rx_queue_start = bnxt_rx_queue_start,
3552         .rx_queue_stop = bnxt_rx_queue_stop,
3553         .tx_queue_start = bnxt_tx_queue_start,
3554         .tx_queue_stop = bnxt_tx_queue_stop,
3555         .filter_ctrl = bnxt_filter_ctrl_op,
3556         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3557         .get_eeprom_length    = bnxt_get_eeprom_length_op,
3558         .get_eeprom           = bnxt_get_eeprom_op,
3559         .set_eeprom           = bnxt_set_eeprom_op,
3560         .timesync_enable      = bnxt_timesync_enable,
3561         .timesync_disable     = bnxt_timesync_disable,
3562         .timesync_read_time   = bnxt_timesync_read_time,
3563         .timesync_write_time   = bnxt_timesync_write_time,
3564         .timesync_adjust_time = bnxt_timesync_adjust_time,
3565         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3566         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3567 };
3568
3569 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
3570 {
3571         uint32_t offset;
3572
3573         /* Only pre-map the reset GRC registers using window 3 */
3574         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
3575                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
3576
3577         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
3578
3579         return offset;
3580 }
3581
3582 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
3583 {
3584         struct bnxt_error_recovery_info *info = bp->recovery_info;
3585         uint32_t reg_base = 0xffffffff;
3586         int i;
3587
3588         /* Only pre-map the monitoring GRC registers using window 2 */
3589         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
3590                 uint32_t reg = info->status_regs[i];
3591
3592                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
3593                         continue;
3594
3595                 if (reg_base == 0xffffffff)
3596                         reg_base = reg & 0xfffff000;
3597                 if ((reg & 0xfffff000) != reg_base)
3598                         return -ERANGE;
3599
3600                 /* Use mask 0xffc as the Lower 2 bits indicates
3601                  * address space location
3602                  */
3603                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
3604                                                 (reg & 0xffc);
3605         }
3606
3607         if (reg_base == 0xffffffff)
3608                 return 0;
3609
3610         rte_write32(reg_base, (uint8_t *)bp->bar0 +
3611                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
3612
3613         return 0;
3614 }
3615
3616 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
3617 {
3618         struct bnxt_error_recovery_info *info = bp->recovery_info;
3619         uint32_t delay = info->delay_after_reset[index];
3620         uint32_t val = info->reset_reg_val[index];
3621         uint32_t reg = info->reset_reg[index];
3622         uint32_t type, offset;
3623
3624         type = BNXT_FW_STATUS_REG_TYPE(reg);
3625         offset = BNXT_FW_STATUS_REG_OFF(reg);
3626
3627         switch (type) {
3628         case BNXT_FW_STATUS_REG_TYPE_CFG:
3629                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
3630                 break;
3631         case BNXT_FW_STATUS_REG_TYPE_GRC:
3632                 offset = bnxt_map_reset_regs(bp, offset);
3633                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
3634                 break;
3635         case BNXT_FW_STATUS_REG_TYPE_BAR0:
3636                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
3637                 break;
3638         }
3639         /* wait on a specific interval of time until core reset is complete */
3640         if (delay)
3641                 rte_delay_ms(delay);
3642 }
3643
3644 static void bnxt_dev_cleanup(struct bnxt *bp)
3645 {
3646         bp->eth_dev->data->dev_link.link_status = 0;
3647         bp->link_info->link_up = 0;
3648         if (bp->eth_dev->data->dev_started)
3649                 bnxt_dev_stop_op(bp->eth_dev);
3650
3651         bnxt_uninit_resources(bp, true);
3652 }
3653
3654 static int bnxt_restore_vlan_filters(struct bnxt *bp)
3655 {
3656         struct rte_eth_dev *dev = bp->eth_dev;
3657         struct rte_vlan_filter_conf *vfc;
3658         int vidx, vbit, rc;
3659         uint16_t vlan_id;
3660
3661         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
3662                 vfc = &dev->data->vlan_filter_conf;
3663                 vidx = vlan_id / 64;
3664                 vbit = vlan_id % 64;
3665
3666                 /* Each bit corresponds to a VLAN id */
3667                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
3668                         rc = bnxt_add_vlan_filter(bp, vlan_id);
3669                         if (rc)
3670                                 return rc;
3671                 }
3672         }
3673
3674         return 0;
3675 }
3676
3677 static int bnxt_restore_mac_filters(struct bnxt *bp)
3678 {
3679         struct rte_eth_dev *dev = bp->eth_dev;
3680         struct rte_eth_dev_info dev_info;
3681         struct rte_ether_addr *addr;
3682         uint64_t pool_mask;
3683         uint32_t pool = 0;
3684         uint16_t i;
3685         int rc;
3686
3687         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3688                 return 0;
3689
3690         rc = bnxt_dev_info_get_op(dev, &dev_info);
3691         if (rc)
3692                 return rc;
3693
3694         /* replay MAC address configuration */
3695         for (i = 1; i < dev_info.max_mac_addrs; i++) {
3696                 addr = &dev->data->mac_addrs[i];
3697
3698                 /* skip zero address */
3699                 if (rte_is_zero_ether_addr(addr))
3700                         continue;
3701
3702                 pool = 0;
3703                 pool_mask = dev->data->mac_pool_sel[i];
3704
3705                 do {
3706                         if (pool_mask & 1ULL) {
3707                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
3708                                 if (rc)
3709                                         return rc;
3710                         }
3711                         pool_mask >>= 1;
3712                         pool++;
3713                 } while (pool_mask);
3714         }
3715
3716         return 0;
3717 }
3718
3719 static int bnxt_restore_filters(struct bnxt *bp)
3720 {
3721         struct rte_eth_dev *dev = bp->eth_dev;
3722         int ret = 0;
3723
3724         if (dev->data->all_multicast) {
3725                 ret = bnxt_allmulticast_enable_op(dev);
3726                 if (ret)
3727                         return ret;
3728         }
3729         if (dev->data->promiscuous) {
3730                 ret = bnxt_promiscuous_enable_op(dev);
3731                 if (ret)
3732                         return ret;
3733         }
3734
3735         ret = bnxt_restore_mac_filters(bp);
3736         if (ret)
3737                 return ret;
3738
3739         ret = bnxt_restore_vlan_filters(bp);
3740         /* TODO restore other filters as well */
3741         return ret;
3742 }
3743
3744 static void bnxt_dev_recover(void *arg)
3745 {
3746         struct bnxt *bp = arg;
3747         int timeout = bp->fw_reset_max_msecs;
3748         int rc = 0;
3749
3750         /* Clear Error flag so that device re-init should happen */
3751         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
3752
3753         do {
3754                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
3755                 if (rc == 0)
3756                         break;
3757                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
3758                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
3759         } while (rc && timeout);
3760
3761         if (rc) {
3762                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
3763                 goto err;
3764         }
3765
3766         rc = bnxt_init_resources(bp, true);
3767         if (rc) {
3768                 PMD_DRV_LOG(ERR,
3769                             "Failed to initialize resources after reset\n");
3770                 goto err;
3771         }
3772         /* clear reset flag as the device is initialized now */
3773         bp->flags &= ~BNXT_FLAG_FW_RESET;
3774
3775         rc = bnxt_dev_start_op(bp->eth_dev);
3776         if (rc) {
3777                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
3778                 goto err_start;
3779         }
3780
3781         rc = bnxt_restore_filters(bp);
3782         if (rc)
3783                 goto err_start;
3784
3785         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
3786         return;
3787 err_start:
3788         bnxt_dev_stop_op(bp->eth_dev);
3789 err:
3790         bp->flags |= BNXT_FLAG_FATAL_ERROR;
3791         bnxt_uninit_resources(bp, false);
3792         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
3793 }
3794
3795 void bnxt_dev_reset_and_resume(void *arg)
3796 {
3797         struct bnxt *bp = arg;
3798         int rc;
3799
3800         bnxt_dev_cleanup(bp);
3801
3802         bnxt_wait_for_device_shutdown(bp);
3803
3804         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
3805                                bnxt_dev_recover, (void *)bp);
3806         if (rc)
3807                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
3808 }
3809
3810 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
3811 {
3812         struct bnxt_error_recovery_info *info = bp->recovery_info;
3813         uint32_t reg = info->status_regs[index];
3814         uint32_t type, offset, val = 0;
3815
3816         type = BNXT_FW_STATUS_REG_TYPE(reg);
3817         offset = BNXT_FW_STATUS_REG_OFF(reg);
3818
3819         switch (type) {
3820         case BNXT_FW_STATUS_REG_TYPE_CFG:
3821                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
3822                 break;
3823         case BNXT_FW_STATUS_REG_TYPE_GRC:
3824                 offset = info->mapped_status_regs[index];
3825                 /* FALLTHROUGH */
3826         case BNXT_FW_STATUS_REG_TYPE_BAR0:
3827                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3828                                        offset));
3829                 break;
3830         }
3831
3832         return val;
3833 }
3834
3835 static int bnxt_fw_reset_all(struct bnxt *bp)
3836 {
3837         struct bnxt_error_recovery_info *info = bp->recovery_info;
3838         uint32_t i;
3839         int rc = 0;
3840
3841         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
3842                 /* Reset through master function driver */
3843                 for (i = 0; i < info->reg_array_cnt; i++)
3844                         bnxt_write_fw_reset_reg(bp, i);
3845                 /* Wait for time specified by FW after triggering reset */
3846                 rte_delay_ms(info->master_func_wait_period_after_reset);
3847         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
3848                 /* Reset with the help of Kong processor */
3849                 rc = bnxt_hwrm_fw_reset(bp);
3850                 if (rc)
3851                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
3852         }
3853
3854         return rc;
3855 }
3856
3857 static void bnxt_fw_reset_cb(void *arg)
3858 {
3859         struct bnxt *bp = arg;
3860         struct bnxt_error_recovery_info *info = bp->recovery_info;
3861         int rc = 0;
3862
3863         /* Only Master function can do FW reset */
3864         if (bnxt_is_master_func(bp) &&
3865             bnxt_is_recovery_enabled(bp)) {
3866                 rc = bnxt_fw_reset_all(bp);
3867                 if (rc) {
3868                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
3869                         return;
3870                 }
3871         }
3872
3873         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
3874          * EXCEPTION_FATAL_ASYNC event to all the functions
3875          * (including MASTER FUNC). After receiving this Async, all the active
3876          * drivers should treat this case as FW initiated recovery
3877          */
3878         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
3879                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
3880                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
3881
3882                 /* To recover from error */
3883                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
3884                                   (void *)bp);
3885         }
3886 }
3887
3888 /* Driver should poll FW heartbeat, reset_counter with the frequency
3889  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
3890  * When the driver detects heartbeat stop or change in reset_counter,
3891  * it has to trigger a reset to recover from the error condition.
3892  * A “master PF” is the function who will have the privilege to
3893  * initiate the chimp reset. The master PF will be elected by the
3894  * firmware and will be notified through async message.
3895  */
3896 static void bnxt_check_fw_health(void *arg)
3897 {
3898         struct bnxt *bp = arg;
3899         struct bnxt_error_recovery_info *info = bp->recovery_info;
3900         uint32_t val = 0, wait_msec;
3901
3902         if (!info || !bnxt_is_recovery_enabled(bp) ||
3903             is_bnxt_in_error(bp))
3904                 return;
3905
3906         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
3907         if (val == info->last_heart_beat)
3908                 goto reset;
3909
3910         info->last_heart_beat = val;
3911
3912         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
3913         if (val != info->last_reset_counter)
3914                 goto reset;
3915
3916         info->last_reset_counter = val;
3917
3918         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
3919                           bnxt_check_fw_health, (void *)bp);
3920
3921         return;
3922 reset:
3923         /* Stop DMA to/from device */
3924         bp->flags |= BNXT_FLAG_FATAL_ERROR;
3925         bp->flags |= BNXT_FLAG_FW_RESET;
3926
3927         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
3928
3929         if (bnxt_is_master_func(bp))
3930                 wait_msec = info->master_func_wait_period;
3931         else
3932                 wait_msec = info->normal_func_wait_period;
3933
3934         rte_eal_alarm_set(US_PER_MS * wait_msec,
3935                           bnxt_fw_reset_cb, (void *)bp);
3936 }
3937
3938 void bnxt_schedule_fw_health_check(struct bnxt *bp)
3939 {
3940         uint32_t polling_freq;
3941
3942         pthread_mutex_lock(&bp->health_check_lock);
3943
3944         if (!bnxt_is_recovery_enabled(bp))
3945                 goto done;
3946
3947         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
3948                 goto done;
3949
3950         polling_freq = bp->recovery_info->driver_polling_freq;
3951
3952         rte_eal_alarm_set(US_PER_MS * polling_freq,
3953                           bnxt_check_fw_health, (void *)bp);
3954         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
3955
3956 done:
3957         pthread_mutex_unlock(&bp->health_check_lock);
3958 }
3959
3960 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
3961 {
3962         if (!bnxt_is_recovery_enabled(bp))
3963                 return;
3964
3965         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
3966         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
3967 }
3968
3969 static bool bnxt_vf_pciid(uint16_t device_id)
3970 {
3971         switch (device_id) {
3972         case BROADCOM_DEV_ID_57304_VF:
3973         case BROADCOM_DEV_ID_57406_VF:
3974         case BROADCOM_DEV_ID_5731X_VF:
3975         case BROADCOM_DEV_ID_5741X_VF:
3976         case BROADCOM_DEV_ID_57414_VF:
3977         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
3978         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
3979         case BROADCOM_DEV_ID_58802_VF:
3980         case BROADCOM_DEV_ID_57500_VF1:
3981         case BROADCOM_DEV_ID_57500_VF2:
3982                 /* FALLTHROUGH */
3983                 return true;
3984         default:
3985                 return false;
3986         }
3987 }
3988
3989 static bool bnxt_thor_device(uint16_t device_id)
3990 {
3991         switch (device_id) {
3992         case BROADCOM_DEV_ID_57508:
3993         case BROADCOM_DEV_ID_57504:
3994         case BROADCOM_DEV_ID_57502:
3995         case BROADCOM_DEV_ID_57508_MF1:
3996         case BROADCOM_DEV_ID_57504_MF1:
3997         case BROADCOM_DEV_ID_57502_MF1:
3998         case BROADCOM_DEV_ID_57508_MF2:
3999         case BROADCOM_DEV_ID_57504_MF2:
4000         case BROADCOM_DEV_ID_57502_MF2:
4001         case BROADCOM_DEV_ID_57500_VF1:
4002         case BROADCOM_DEV_ID_57500_VF2:
4003                 /* FALLTHROUGH */
4004                 return true;
4005         default:
4006                 return false;
4007         }
4008 }
4009
4010 bool bnxt_stratus_device(struct bnxt *bp)
4011 {
4012         uint16_t device_id = bp->pdev->id.device_id;
4013
4014         switch (device_id) {
4015         case BROADCOM_DEV_ID_STRATUS_NIC:
4016         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4017         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4018                 /* FALLTHROUGH */
4019                 return true;
4020         default:
4021                 return false;
4022         }
4023 }
4024
4025 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4026 {
4027         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4028         struct bnxt *bp = eth_dev->data->dev_private;
4029
4030         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4031         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4032         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4033         if (!bp->bar0 || !bp->doorbell_base) {
4034                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4035                 return -ENODEV;
4036         }
4037
4038         bp->eth_dev = eth_dev;
4039         bp->pdev = pci_dev;
4040
4041         return 0;
4042 }
4043
4044 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4045                                   struct bnxt_ctx_pg_info *ctx_pg,
4046                                   uint32_t mem_size,
4047                                   const char *suffix,
4048                                   uint16_t idx)
4049 {
4050         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4051         const struct rte_memzone *mz = NULL;
4052         char mz_name[RTE_MEMZONE_NAMESIZE];
4053         rte_iova_t mz_phys_addr;
4054         uint64_t valid_bits = 0;
4055         uint32_t sz;
4056         int i;
4057
4058         if (!mem_size)
4059                 return 0;
4060
4061         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4062                          BNXT_PAGE_SIZE;
4063         rmem->page_size = BNXT_PAGE_SIZE;
4064         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4065         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4066         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4067
4068         valid_bits = PTU_PTE_VALID;
4069
4070         if (rmem->nr_pages > 1) {
4071                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4072                          "bnxt_ctx_pg_tbl%s_%x_%d",
4073                          suffix, idx, bp->eth_dev->data->port_id);
4074                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4075                 mz = rte_memzone_lookup(mz_name);
4076                 if (!mz) {
4077                         mz = rte_memzone_reserve_aligned(mz_name,
4078                                                 rmem->nr_pages * 8,
4079                                                 SOCKET_ID_ANY,
4080                                                 RTE_MEMZONE_2MB |
4081                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4082                                                 RTE_MEMZONE_IOVA_CONTIG,
4083                                                 BNXT_PAGE_SIZE);
4084                         if (mz == NULL)
4085                                 return -ENOMEM;
4086                 }
4087
4088                 memset(mz->addr, 0, mz->len);
4089                 mz_phys_addr = mz->iova;
4090
4091                 rmem->pg_tbl = mz->addr;
4092                 rmem->pg_tbl_map = mz_phys_addr;
4093                 rmem->pg_tbl_mz = mz;
4094         }
4095
4096         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4097                  suffix, idx, bp->eth_dev->data->port_id);
4098         mz = rte_memzone_lookup(mz_name);
4099         if (!mz) {
4100                 mz = rte_memzone_reserve_aligned(mz_name,
4101                                                  mem_size,
4102                                                  SOCKET_ID_ANY,
4103                                                  RTE_MEMZONE_1GB |
4104                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4105                                                  RTE_MEMZONE_IOVA_CONTIG,
4106                                                  BNXT_PAGE_SIZE);
4107                 if (mz == NULL)
4108                         return -ENOMEM;
4109         }
4110
4111         memset(mz->addr, 0, mz->len);
4112         mz_phys_addr = mz->iova;
4113
4114         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4115                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4116                 rmem->dma_arr[i] = mz_phys_addr + sz;
4117
4118                 if (rmem->nr_pages > 1) {
4119                         if (i == rmem->nr_pages - 2 &&
4120                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4121                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4122                         else if (i == rmem->nr_pages - 1 &&
4123                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4124                                 valid_bits |= PTU_PTE_LAST;
4125
4126                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4127                                                            valid_bits);
4128                 }
4129         }
4130
4131         rmem->mz = mz;
4132         if (rmem->vmem_size)
4133                 rmem->vmem = (void **)mz->addr;
4134         rmem->dma_arr[0] = mz_phys_addr;
4135         return 0;
4136 }
4137
4138 static void bnxt_free_ctx_mem(struct bnxt *bp)
4139 {
4140         int i;
4141
4142         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4143                 return;
4144
4145         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4146         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4147         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4148         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4149         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4150         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4151         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4152         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4153         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4154         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4155         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4156
4157         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4158                 if (bp->ctx->tqm_mem[i])
4159                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4160         }
4161
4162         rte_free(bp->ctx);
4163         bp->ctx = NULL;
4164 }
4165
4166 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4167
4168 #define min_t(type, x, y) ({                    \
4169         type __min1 = (x);                      \
4170         type __min2 = (y);                      \
4171         __min1 < __min2 ? __min1 : __min2; })
4172
4173 #define max_t(type, x, y) ({                    \
4174         type __max1 = (x);                      \
4175         type __max2 = (y);                      \
4176         __max1 > __max2 ? __max1 : __max2; })
4177
4178 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4179
4180 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4181 {
4182         struct bnxt_ctx_pg_info *ctx_pg;
4183         struct bnxt_ctx_mem_info *ctx;
4184         uint32_t mem_size, ena, entries;
4185         uint32_t entries_sp, min;
4186         int i, rc;
4187
4188         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4189         if (rc) {
4190                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4191                 return rc;
4192         }
4193         ctx = bp->ctx;
4194         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4195                 return 0;
4196
4197         ctx_pg = &ctx->qp_mem;
4198         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4199         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4200         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4201         if (rc)
4202                 return rc;
4203
4204         ctx_pg = &ctx->srq_mem;
4205         ctx_pg->entries = ctx->srq_max_l2_entries;
4206         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4207         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4208         if (rc)
4209                 return rc;
4210
4211         ctx_pg = &ctx->cq_mem;
4212         ctx_pg->entries = ctx->cq_max_l2_entries;
4213         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4214         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4215         if (rc)
4216                 return rc;
4217
4218         ctx_pg = &ctx->vnic_mem;
4219         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4220                 ctx->vnic_max_ring_table_entries;
4221         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4222         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4223         if (rc)
4224                 return rc;
4225
4226         ctx_pg = &ctx->stat_mem;
4227         ctx_pg->entries = ctx->stat_max_entries;
4228         mem_size = ctx->stat_entry_size * ctx_pg->entries;
4229         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4230         if (rc)
4231                 return rc;
4232
4233         min = ctx->tqm_min_entries_per_ring;
4234
4235         entries_sp = ctx->qp_max_l2_entries +
4236                      ctx->vnic_max_vnic_entries +
4237                      2 * ctx->qp_min_qp1_entries + min;
4238         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4239
4240         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4241         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4242         entries = clamp_t(uint32_t, entries, min,
4243                           ctx->tqm_max_entries_per_ring);
4244         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4245                 ctx_pg = ctx->tqm_mem[i];
4246                 ctx_pg->entries = i ? entries : entries_sp;
4247                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4248                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4249                 if (rc)
4250                         return rc;
4251                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4252         }
4253
4254         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4255         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4256         if (rc)
4257                 PMD_DRV_LOG(ERR,
4258                             "Failed to configure context mem: rc = %d\n", rc);
4259         else
4260                 ctx->flags |= BNXT_CTX_FLAG_INITED;
4261
4262         return rc;
4263 }
4264
4265 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4266 {
4267         struct rte_pci_device *pci_dev = bp->pdev;
4268         char mz_name[RTE_MEMZONE_NAMESIZE];
4269         const struct rte_memzone *mz = NULL;
4270         uint32_t total_alloc_len;
4271         rte_iova_t mz_phys_addr;
4272
4273         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4274                 return 0;
4275
4276         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4277                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4278                  pci_dev->addr.bus, pci_dev->addr.devid,
4279                  pci_dev->addr.function, "rx_port_stats");
4280         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4281         mz = rte_memzone_lookup(mz_name);
4282         total_alloc_len =
4283                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4284                                        sizeof(struct rx_port_stats_ext) + 512);
4285         if (!mz) {
4286                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
4287                                          SOCKET_ID_ANY,
4288                                          RTE_MEMZONE_2MB |
4289                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4290                                          RTE_MEMZONE_IOVA_CONTIG);
4291                 if (mz == NULL)
4292                         return -ENOMEM;
4293         }
4294         memset(mz->addr, 0, mz->len);
4295         mz_phys_addr = mz->iova;
4296
4297         bp->rx_mem_zone = (const void *)mz;
4298         bp->hw_rx_port_stats = mz->addr;
4299         bp->hw_rx_port_stats_map = mz_phys_addr;
4300
4301         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4302                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4303                  pci_dev->addr.bus, pci_dev->addr.devid,
4304                  pci_dev->addr.function, "tx_port_stats");
4305         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4306         mz = rte_memzone_lookup(mz_name);
4307         total_alloc_len =
4308                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4309                                        sizeof(struct tx_port_stats_ext) + 512);
4310         if (!mz) {
4311                 mz = rte_memzone_reserve(mz_name,
4312                                          total_alloc_len,
4313                                          SOCKET_ID_ANY,
4314                                          RTE_MEMZONE_2MB |
4315                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4316                                          RTE_MEMZONE_IOVA_CONTIG);
4317                 if (mz == NULL)
4318                         return -ENOMEM;
4319         }
4320         memset(mz->addr, 0, mz->len);
4321         mz_phys_addr = mz->iova;
4322
4323         bp->tx_mem_zone = (const void *)mz;
4324         bp->hw_tx_port_stats = mz->addr;
4325         bp->hw_tx_port_stats_map = mz_phys_addr;
4326         bp->flags |= BNXT_FLAG_PORT_STATS;
4327
4328         /* Display extended statistics if FW supports it */
4329         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4330             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4331             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4332                 return 0;
4333
4334         bp->hw_rx_port_stats_ext = (void *)
4335                 ((uint8_t *)bp->hw_rx_port_stats +
4336                  sizeof(struct rx_port_stats));
4337         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4338                 sizeof(struct rx_port_stats);
4339         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4340
4341         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4342             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4343                 bp->hw_tx_port_stats_ext = (void *)
4344                         ((uint8_t *)bp->hw_tx_port_stats +
4345                          sizeof(struct tx_port_stats));
4346                 bp->hw_tx_port_stats_ext_map =
4347                         bp->hw_tx_port_stats_map +
4348                         sizeof(struct tx_port_stats);
4349                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4350         }
4351
4352         return 0;
4353 }
4354
4355 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4356 {
4357         struct bnxt *bp = eth_dev->data->dev_private;
4358         int rc = 0;
4359
4360         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4361                                                RTE_ETHER_ADDR_LEN *
4362                                                bp->max_l2_ctx,
4363                                                0);
4364         if (eth_dev->data->mac_addrs == NULL) {
4365                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4366                 return -ENOMEM;
4367         }
4368
4369         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
4370                 if (BNXT_PF(bp))
4371                         return -EINVAL;
4372
4373                 /* Generate a random MAC address, if none was assigned by PF */
4374                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4375                 bnxt_eth_hw_addr_random(bp->mac_addr);
4376                 PMD_DRV_LOG(INFO,
4377                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4378                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4379                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4380
4381                 rc = bnxt_hwrm_set_mac(bp);
4382                 if (rc)
4383                         return rc;
4384         }
4385
4386         /* Copy the permanent MAC from the FUNC_QCAPS response */
4387         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
4388
4389         return rc;
4390 }
4391
4392 static int bnxt_restore_dflt_mac(struct bnxt *bp)
4393 {
4394         int rc = 0;
4395
4396         /* MAC is already configured in FW */
4397         if (BNXT_HAS_DFLT_MAC_SET(bp))
4398                 return 0;
4399
4400         /* Restore the old MAC configured */
4401         rc = bnxt_hwrm_set_mac(bp);
4402         if (rc)
4403                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
4404
4405         return rc;
4406 }
4407
4408 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
4409 {
4410         if (!BNXT_PF(bp))
4411                 return;
4412
4413         memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
4414
4415         if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
4416                 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
4417         BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
4418         BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
4419         BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
4420         BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
4421 }
4422
4423 uint16_t
4424 bnxt_get_svif(uint16_t port_id, bool func_svif,
4425               enum bnxt_ulp_intf_type type)
4426 {
4427         struct rte_eth_dev *eth_dev;
4428         struct bnxt *bp;
4429
4430         eth_dev = &rte_eth_devices[port_id];
4431         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4432                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
4433                 if (!vfr)
4434                         return 0;
4435
4436                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4437                         return vfr->svif;
4438
4439                 eth_dev = vfr->parent_dev;
4440         }
4441
4442         bp = eth_dev->data->dev_private;
4443
4444         return func_svif ? bp->func_svif : bp->port_svif;
4445 }
4446
4447 uint16_t
4448 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
4449 {
4450         struct rte_eth_dev *eth_dev;
4451         struct bnxt_vnic_info *vnic;
4452         struct bnxt *bp;
4453
4454         eth_dev = &rte_eth_devices[port];
4455         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4456                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
4457                 if (!vfr)
4458                         return 0;
4459
4460                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4461                         return vfr->dflt_vnic_id;
4462
4463                 eth_dev = vfr->parent_dev;
4464         }
4465
4466         bp = eth_dev->data->dev_private;
4467
4468         vnic = BNXT_GET_DEFAULT_VNIC(bp);
4469
4470         return vnic->fw_vnic_id;
4471 }
4472
4473 uint16_t
4474 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
4475 {
4476         struct rte_eth_dev *eth_dev;
4477         struct bnxt *bp;
4478
4479         eth_dev = &rte_eth_devices[port];
4480         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4481                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
4482                 if (!vfr)
4483                         return 0;
4484
4485                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4486                         return vfr->fw_fid;
4487
4488                 eth_dev = vfr->parent_dev;
4489         }
4490
4491         bp = eth_dev->data->dev_private;
4492
4493         return bp->fw_fid;
4494 }
4495
4496 enum bnxt_ulp_intf_type
4497 bnxt_get_interface_type(uint16_t port)
4498 {
4499         struct rte_eth_dev *eth_dev;
4500         struct bnxt *bp;
4501
4502         eth_dev = &rte_eth_devices[port];
4503         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
4504                 return BNXT_ULP_INTF_TYPE_VF_REP;
4505
4506         bp = eth_dev->data->dev_private;
4507         if (BNXT_PF(bp))
4508                 return BNXT_ULP_INTF_TYPE_PF;
4509         else if (BNXT_VF_IS_TRUSTED(bp))
4510                 return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
4511         else if (BNXT_VF(bp))
4512                 return BNXT_ULP_INTF_TYPE_VF;
4513
4514         return BNXT_ULP_INTF_TYPE_INVALID;
4515 }
4516
4517 uint16_t
4518 bnxt_get_phy_port_id(uint16_t port_id)
4519 {
4520         struct bnxt_representor *vfr;
4521         struct rte_eth_dev *eth_dev;
4522         struct bnxt *bp;
4523
4524         eth_dev = &rte_eth_devices[port_id];
4525         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4526                 vfr = eth_dev->data->dev_private;
4527                 if (!vfr)
4528                         return 0;
4529
4530                 eth_dev = vfr->parent_dev;
4531         }
4532
4533         bp = eth_dev->data->dev_private;
4534
4535         return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
4536 }
4537
4538 uint16_t
4539 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
4540 {
4541         struct rte_eth_dev *eth_dev;
4542         struct bnxt *bp;
4543
4544         eth_dev = &rte_eth_devices[port_id];
4545         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4546                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
4547                 if (!vfr)
4548                         return 0;
4549
4550                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4551                         return vfr->fw_fid - 1;
4552
4553                 eth_dev = vfr->parent_dev;
4554         }
4555
4556         bp = eth_dev->data->dev_private;
4557
4558         return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
4559 }
4560
4561 uint16_t
4562 bnxt_get_vport(uint16_t port_id)
4563 {
4564         return (1 << bnxt_get_phy_port_id(port_id));
4565 }
4566
4567 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
4568 {
4569         struct bnxt_error_recovery_info *info = bp->recovery_info;
4570
4571         if (info) {
4572                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
4573                         memset(info, 0, sizeof(*info));
4574                 return;
4575         }
4576
4577         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4578                 return;
4579
4580         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4581                            sizeof(*info), 0);
4582         if (!info)
4583                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4584
4585         bp->recovery_info = info;
4586 }
4587
4588 static void bnxt_check_fw_status(struct bnxt *bp)
4589 {
4590         uint32_t fw_status;
4591
4592         if (!(bp->recovery_info &&
4593               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
4594                 return;
4595
4596         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
4597         if (fw_status != BNXT_FW_STATUS_HEALTHY)
4598                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
4599                             fw_status);
4600 }
4601
4602 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
4603 {
4604         struct bnxt_error_recovery_info *info = bp->recovery_info;
4605         uint32_t status_loc;
4606         uint32_t sig_ver;
4607
4608         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
4609                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4610         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4611                                    BNXT_GRCP_WINDOW_2_BASE +
4612                                    offsetof(struct hcomm_status,
4613                                             sig_ver)));
4614         /* If the signature is absent, then FW does not support this feature */
4615         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
4616             HCOMM_STATUS_SIGNATURE_VAL)
4617                 return 0;
4618
4619         if (!info) {
4620                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4621                                    sizeof(*info), 0);
4622                 if (!info)
4623                         return -ENOMEM;
4624                 bp->recovery_info = info;
4625         } else {
4626                 memset(info, 0, sizeof(*info));
4627         }
4628
4629         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4630                                       BNXT_GRCP_WINDOW_2_BASE +
4631                                       offsetof(struct hcomm_status,
4632                                                fw_status_loc)));
4633
4634         /* Only pre-map the FW health status GRC register */
4635         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
4636                 return 0;
4637
4638         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
4639         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
4640                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
4641
4642         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
4643                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4644
4645         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
4646
4647         return 0;
4648 }
4649
4650 static int bnxt_init_fw(struct bnxt *bp)
4651 {
4652         uint16_t mtu;
4653         int rc = 0;
4654
4655         bp->fw_cap = 0;
4656
4657         rc = bnxt_map_hcomm_fw_status_reg(bp);
4658         if (rc)
4659                 return rc;
4660
4661         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
4662         if (rc) {
4663                 bnxt_check_fw_status(bp);
4664                 return rc;
4665         }
4666
4667         rc = bnxt_hwrm_func_reset(bp);
4668         if (rc)
4669                 return -EIO;
4670
4671         rc = bnxt_hwrm_vnic_qcaps(bp);
4672         if (rc)
4673                 return rc;
4674
4675         rc = bnxt_hwrm_queue_qportcfg(bp);
4676         if (rc)
4677                 return rc;
4678
4679         /* Get the MAX capabilities for this function.
4680          * This function also allocates context memory for TQM rings and
4681          * informs the firmware about this allocated backing store memory.
4682          */
4683         rc = bnxt_hwrm_func_qcaps(bp);
4684         if (rc)
4685                 return rc;
4686
4687         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
4688         if (rc)
4689                 return rc;
4690
4691         bnxt_hwrm_port_mac_qcfg(bp);
4692
4693         bnxt_hwrm_parent_pf_qcfg(bp);
4694
4695         bnxt_hwrm_port_phy_qcaps(bp);
4696
4697         bnxt_alloc_error_recovery_info(bp);
4698         /* Get the adapter error recovery support info */
4699         rc = bnxt_hwrm_error_recovery_qcfg(bp);
4700         if (rc)
4701                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4702
4703         bnxt_hwrm_port_led_qcaps(bp);
4704
4705         return 0;
4706 }
4707
4708 static int
4709 bnxt_init_locks(struct bnxt *bp)
4710 {
4711         int err;
4712
4713         err = pthread_mutex_init(&bp->flow_lock, NULL);
4714         if (err) {
4715                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
4716                 return err;
4717         }
4718
4719         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
4720         if (err)
4721                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
4722
4723         err = pthread_mutex_init(&bp->health_check_lock, NULL);
4724         if (err)
4725                 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
4726         return err;
4727 }
4728
4729 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
4730 {
4731         int rc = 0;
4732
4733         rc = bnxt_init_fw(bp);
4734         if (rc)
4735                 return rc;
4736
4737         if (!reconfig_dev) {
4738                 rc = bnxt_setup_mac_addr(bp->eth_dev);
4739                 if (rc)
4740                         return rc;
4741         } else {
4742                 rc = bnxt_restore_dflt_mac(bp);
4743                 if (rc)
4744                         return rc;
4745         }
4746
4747         bnxt_config_vf_req_fwd(bp);
4748
4749         rc = bnxt_hwrm_func_driver_register(bp);
4750         if (rc) {
4751                 PMD_DRV_LOG(ERR, "Failed to register driver");
4752                 return -EBUSY;
4753         }
4754
4755         if (BNXT_PF(bp)) {
4756                 if (bp->pdev->max_vfs) {
4757                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
4758                         if (rc) {
4759                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
4760                                 return rc;
4761                         }
4762                 } else {
4763                         rc = bnxt_hwrm_allocate_pf_only(bp);
4764                         if (rc) {
4765                                 PMD_DRV_LOG(ERR,
4766                                             "Failed to allocate PF resources");
4767                                 return rc;
4768                         }
4769                 }
4770         }
4771
4772         rc = bnxt_alloc_mem(bp, reconfig_dev);
4773         if (rc)
4774                 return rc;
4775
4776         rc = bnxt_setup_int(bp);
4777         if (rc)
4778                 return rc;
4779
4780         rc = bnxt_request_int(bp);
4781         if (rc)
4782                 return rc;
4783
4784         rc = bnxt_init_ctx_mem(bp);
4785         if (rc) {
4786                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
4787                 return rc;
4788         }
4789
4790         rc = bnxt_init_locks(bp);
4791         if (rc)
4792                 return rc;
4793
4794         return 0;
4795 }
4796
4797 static int
4798 bnxt_parse_devarg_truflow(__rte_unused const char *key,
4799                           const char *value, void *opaque_arg)
4800 {
4801         struct bnxt *bp = opaque_arg;
4802         unsigned long truflow;
4803         char *end = NULL;
4804
4805         if (!value || !opaque_arg) {
4806                 PMD_DRV_LOG(ERR,
4807                             "Invalid parameter passed to truflow devargs.\n");
4808                 return -EINVAL;
4809         }
4810
4811         truflow = strtoul(value, &end, 10);
4812         if (end == NULL || *end != '\0' ||
4813             (truflow == ULONG_MAX && errno == ERANGE)) {
4814                 PMD_DRV_LOG(ERR,
4815                             "Invalid parameter passed to truflow devargs.\n");
4816                 return -EINVAL;
4817         }
4818
4819         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
4820                 PMD_DRV_LOG(ERR,
4821                             "Invalid value passed to truflow devargs.\n");
4822                 return -EINVAL;
4823         }
4824
4825         if (truflow) {
4826                 bp->flags |= BNXT_FLAG_TRUFLOW_EN;
4827                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
4828         } else {
4829                 bp->flags &= ~BNXT_FLAG_TRUFLOW_EN;
4830                 PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n");
4831         }
4832
4833         return 0;
4834 }
4835
4836 static int
4837 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
4838                              const char *value, void *opaque_arg)
4839 {
4840         struct bnxt *bp = opaque_arg;
4841         unsigned long flow_xstat;
4842         char *end = NULL;
4843
4844         if (!value || !opaque_arg) {
4845                 PMD_DRV_LOG(ERR,
4846                             "Invalid parameter passed to flow_xstat devarg.\n");
4847                 return -EINVAL;
4848         }
4849
4850         flow_xstat = strtoul(value, &end, 10);
4851         if (end == NULL || *end != '\0' ||
4852             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
4853                 PMD_DRV_LOG(ERR,
4854                             "Invalid parameter passed to flow_xstat devarg.\n");
4855                 return -EINVAL;
4856         }
4857
4858         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
4859                 PMD_DRV_LOG(ERR,
4860                             "Invalid value passed to flow_xstat devarg.\n");
4861                 return -EINVAL;
4862         }
4863
4864         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
4865         if (BNXT_FLOW_XSTATS_EN(bp))
4866                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
4867
4868         return 0;
4869 }
4870
4871 static int
4872 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
4873                                         const char *value, void *opaque_arg)
4874 {
4875         struct bnxt *bp = opaque_arg;
4876         unsigned long max_num_kflows;
4877         char *end = NULL;
4878
4879         if (!value || !opaque_arg) {
4880                 PMD_DRV_LOG(ERR,
4881                         "Invalid parameter passed to max_num_kflows devarg.\n");
4882                 return -EINVAL;
4883         }
4884
4885         max_num_kflows = strtoul(value, &end, 10);
4886         if (end == NULL || *end != '\0' ||
4887                 (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
4888                 PMD_DRV_LOG(ERR,
4889                         "Invalid parameter passed to max_num_kflows devarg.\n");
4890                 return -EINVAL;
4891         }
4892
4893         if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
4894                 PMD_DRV_LOG(ERR,
4895                         "Invalid value passed to max_num_kflows devarg.\n");
4896                 return -EINVAL;
4897         }
4898
4899         bp->max_num_kflows = max_num_kflows;
4900         if (bp->max_num_kflows)
4901                 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
4902                                 max_num_kflows);
4903
4904         return 0;
4905 }
4906
4907 static int
4908 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key,
4909                             const char *value, void *opaque_arg)
4910 {
4911         struct bnxt_representor *vfr_bp = opaque_arg;
4912         unsigned long rep_is_pf;
4913         char *end = NULL;
4914
4915         if (!value || !opaque_arg) {
4916                 PMD_DRV_LOG(ERR,
4917                             "Invalid parameter passed to rep_is_pf devargs.\n");
4918                 return -EINVAL;
4919         }
4920
4921         rep_is_pf = strtoul(value, &end, 10);
4922         if (end == NULL || *end != '\0' ||
4923             (rep_is_pf == ULONG_MAX && errno == ERANGE)) {
4924                 PMD_DRV_LOG(ERR,
4925                             "Invalid parameter passed to rep_is_pf devargs.\n");
4926                 return -EINVAL;
4927         }
4928
4929         if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) {
4930                 PMD_DRV_LOG(ERR,
4931                             "Invalid value passed to rep_is_pf devargs.\n");
4932                 return -EINVAL;
4933         }
4934
4935         vfr_bp->flags |= rep_is_pf;
4936         if (BNXT_REP_PF(vfr_bp))
4937                 PMD_DRV_LOG(INFO, "PF representor\n");
4938         else
4939                 PMD_DRV_LOG(INFO, "VF representor\n");
4940
4941         return 0;
4942 }
4943
4944 static int
4945 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key,
4946                                const char *value, void *opaque_arg)
4947 {
4948         struct bnxt_representor *vfr_bp = opaque_arg;
4949         unsigned long rep_based_pf;
4950         char *end = NULL;
4951
4952         if (!value || !opaque_arg) {
4953                 PMD_DRV_LOG(ERR,
4954                             "Invalid parameter passed to rep_based_pf "
4955                             "devargs.\n");
4956                 return -EINVAL;
4957         }
4958
4959         rep_based_pf = strtoul(value, &end, 10);
4960         if (end == NULL || *end != '\0' ||
4961             (rep_based_pf == ULONG_MAX && errno == ERANGE)) {
4962                 PMD_DRV_LOG(ERR,
4963                             "Invalid parameter passed to rep_based_pf "
4964                             "devargs.\n");
4965                 return -EINVAL;
4966         }
4967
4968         if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) {
4969                 PMD_DRV_LOG(ERR,
4970                             "Invalid value passed to rep_based_pf devargs.\n");
4971                 return -EINVAL;
4972         }
4973
4974         vfr_bp->rep_based_pf = rep_based_pf;
4975         vfr_bp->flags |= BNXT_REP_BASED_PF_VALID;
4976
4977         PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf);
4978
4979         return 0;
4980 }
4981
4982 static int
4983 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key,
4984                             const char *value, void *opaque_arg)
4985 {
4986         struct bnxt_representor *vfr_bp = opaque_arg;
4987         unsigned long rep_q_r2f;
4988         char *end = NULL;
4989
4990         if (!value || !opaque_arg) {
4991                 PMD_DRV_LOG(ERR,
4992                             "Invalid parameter passed to rep_q_r2f "
4993                             "devargs.\n");
4994                 return -EINVAL;
4995         }
4996
4997         rep_q_r2f = strtoul(value, &end, 10);
4998         if (end == NULL || *end != '\0' ||
4999             (rep_q_r2f == ULONG_MAX && errno == ERANGE)) {
5000                 PMD_DRV_LOG(ERR,
5001                             "Invalid parameter passed to rep_q_r2f "
5002                             "devargs.\n");
5003                 return -EINVAL;
5004         }
5005
5006         if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) {
5007                 PMD_DRV_LOG(ERR,
5008                             "Invalid value passed to rep_q_r2f devargs.\n");
5009                 return -EINVAL;
5010         }
5011
5012         vfr_bp->rep_q_r2f = rep_q_r2f;
5013         vfr_bp->flags |= BNXT_REP_Q_R2F_VALID;
5014         PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f);
5015
5016         return 0;
5017 }
5018
5019 static int
5020 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key,
5021                             const char *value, void *opaque_arg)
5022 {
5023         struct bnxt_representor *vfr_bp = opaque_arg;
5024         unsigned long rep_q_f2r;
5025         char *end = NULL;
5026
5027         if (!value || !opaque_arg) {
5028                 PMD_DRV_LOG(ERR,
5029                             "Invalid parameter passed to rep_q_f2r "
5030                             "devargs.\n");
5031                 return -EINVAL;
5032         }
5033
5034         rep_q_f2r = strtoul(value, &end, 10);
5035         if (end == NULL || *end != '\0' ||
5036             (rep_q_f2r == ULONG_MAX && errno == ERANGE)) {
5037                 PMD_DRV_LOG(ERR,
5038                             "Invalid parameter passed to rep_q_f2r "
5039                             "devargs.\n");
5040                 return -EINVAL;
5041         }
5042
5043         if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) {
5044                 PMD_DRV_LOG(ERR,
5045                             "Invalid value passed to rep_q_f2r devargs.\n");
5046                 return -EINVAL;
5047         }
5048
5049         vfr_bp->rep_q_f2r = rep_q_f2r;
5050         vfr_bp->flags |= BNXT_REP_Q_F2R_VALID;
5051         PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r);
5052
5053         return 0;
5054 }
5055
5056 static int
5057 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key,
5058                              const char *value, void *opaque_arg)
5059 {
5060         struct bnxt_representor *vfr_bp = opaque_arg;
5061         unsigned long rep_fc_r2f;
5062         char *end = NULL;
5063
5064         if (!value || !opaque_arg) {
5065                 PMD_DRV_LOG(ERR,
5066                             "Invalid parameter passed to rep_fc_r2f "
5067                             "devargs.\n");
5068                 return -EINVAL;
5069         }
5070
5071         rep_fc_r2f = strtoul(value, &end, 10);
5072         if (end == NULL || *end != '\0' ||
5073             (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) {
5074                 PMD_DRV_LOG(ERR,
5075                             "Invalid parameter passed to rep_fc_r2f "
5076                             "devargs.\n");
5077                 return -EINVAL;
5078         }
5079
5080         if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) {
5081                 PMD_DRV_LOG(ERR,
5082                             "Invalid value passed to rep_fc_r2f devargs.\n");
5083                 return -EINVAL;
5084         }
5085
5086         vfr_bp->flags |= BNXT_REP_FC_R2F_VALID;
5087         vfr_bp->rep_fc_r2f = rep_fc_r2f;
5088         PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f);
5089
5090         return 0;
5091 }
5092
5093 static int
5094 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key,
5095                              const char *value, void *opaque_arg)
5096 {
5097         struct bnxt_representor *vfr_bp = opaque_arg;
5098         unsigned long rep_fc_f2r;
5099         char *end = NULL;
5100
5101         if (!value || !opaque_arg) {
5102                 PMD_DRV_LOG(ERR,
5103                             "Invalid parameter passed to rep_fc_f2r "
5104                             "devargs.\n");
5105                 return -EINVAL;
5106         }
5107
5108         rep_fc_f2r = strtoul(value, &end, 10);
5109         if (end == NULL || *end != '\0' ||
5110             (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) {
5111                 PMD_DRV_LOG(ERR,
5112                             "Invalid parameter passed to rep_fc_f2r "
5113                             "devargs.\n");
5114                 return -EINVAL;
5115         }
5116
5117         if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) {
5118                 PMD_DRV_LOG(ERR,
5119                             "Invalid value passed to rep_fc_f2r devargs.\n");
5120                 return -EINVAL;
5121         }
5122
5123         vfr_bp->flags |= BNXT_REP_FC_F2R_VALID;
5124         vfr_bp->rep_fc_f2r = rep_fc_f2r;
5125         PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r);
5126
5127         return 0;
5128 }
5129
5130 static void
5131 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5132 {
5133         struct rte_kvargs *kvlist;
5134
5135         if (devargs == NULL)
5136                 return;
5137
5138         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5139         if (kvlist == NULL)
5140                 return;
5141
5142         /*
5143          * Handler for "truflow" devarg.
5144          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
5145          */
5146         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5147                            bnxt_parse_devarg_truflow, bp);
5148
5149         /*
5150          * Handler for "flow_xstat" devarg.
5151          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
5152          */
5153         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5154                            bnxt_parse_devarg_flow_xstat, bp);
5155
5156         /*
5157          * Handler for "max_num_kflows" devarg.
5158          * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
5159          */
5160         rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5161                            bnxt_parse_devarg_max_num_kflows, bp);
5162
5163         rte_kvargs_free(kvlist);
5164 }
5165
5166 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5167 {
5168         int rc = 0;
5169
5170         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5171                 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5172                 if (rc)
5173                         PMD_DRV_LOG(ERR,
5174                                     "Failed to alloc switch domain: %d\n", rc);
5175                 else
5176                         PMD_DRV_LOG(INFO,
5177                                     "Switch domain allocated %d\n",
5178                                     bp->switch_domain_id);
5179         }
5180
5181         return rc;
5182 }
5183
5184 static int
5185 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
5186 {
5187         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5188         static int version_printed;
5189         struct bnxt *bp;
5190         int rc;
5191
5192         if (version_printed++ == 0)
5193                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5194
5195         eth_dev->dev_ops = &bnxt_dev_ops;
5196         eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
5197         eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op;
5198         eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op;
5199         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5200         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5201
5202         /*
5203          * For secondary processes, we don't initialise any further
5204          * as primary has already done this work.
5205          */
5206         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5207                 return 0;
5208
5209         rte_eth_copy_pci_info(eth_dev, pci_dev);
5210         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
5211
5212         bp = eth_dev->data->dev_private;
5213
5214         /* Parse dev arguments passed on when starting the DPDK application. */
5215         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5216
5217         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5218
5219         if (bnxt_vf_pciid(pci_dev->id.device_id))
5220                 bp->flags |= BNXT_FLAG_VF;
5221
5222         if (bnxt_thor_device(pci_dev->id.device_id))
5223                 bp->flags |= BNXT_FLAG_THOR_CHIP;
5224
5225         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5226             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5227             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5228             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5229                 bp->flags |= BNXT_FLAG_STINGRAY;
5230
5231         if (BNXT_TRUFLOW_EN(bp)) {
5232                 /* extra mbuf field is required to store CFA code from mark */
5233                 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = {
5234                         .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME,
5235                         .size = sizeof(bnxt_cfa_code_dynfield_t),
5236                         .align = __alignof__(bnxt_cfa_code_dynfield_t),
5237                 };
5238                 bnxt_cfa_code_dynfield_offset =
5239                         rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
5240                 if (bnxt_cfa_code_dynfield_offset < 0) {
5241                         PMD_DRV_LOG(ERR,
5242                             "Failed to register mbuf field for TruFlow mark\n");
5243                         return -rte_errno;
5244                 }
5245         }
5246
5247         rc = bnxt_init_board(eth_dev);
5248         if (rc) {
5249                 PMD_DRV_LOG(ERR,
5250                             "Failed to initialize board rc: %x\n", rc);
5251                 return rc;
5252         }
5253
5254         rc = bnxt_alloc_pf_info(bp);
5255         if (rc)
5256                 goto error_free;
5257
5258         rc = bnxt_alloc_link_info(bp);
5259         if (rc)
5260                 goto error_free;
5261
5262         rc = bnxt_alloc_parent_info(bp);
5263         if (rc)
5264                 goto error_free;
5265
5266         rc = bnxt_alloc_hwrm_resources(bp);
5267         if (rc) {
5268                 PMD_DRV_LOG(ERR,
5269                             "Failed to allocate hwrm resource rc: %x\n", rc);
5270                 goto error_free;
5271         }
5272         rc = bnxt_alloc_leds_info(bp);
5273         if (rc)
5274                 goto error_free;
5275
5276         rc = bnxt_alloc_cos_queues(bp);
5277         if (rc)
5278                 goto error_free;
5279
5280         rc = bnxt_init_resources(bp, false);
5281         if (rc)
5282                 goto error_free;
5283
5284         rc = bnxt_alloc_stats_mem(bp);
5285         if (rc)
5286                 goto error_free;
5287
5288         bnxt_alloc_switch_domain(bp);
5289
5290         PMD_DRV_LOG(INFO,
5291                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5292                     pci_dev->mem_resource[0].phys_addr,
5293                     pci_dev->mem_resource[0].addr);
5294
5295         return 0;
5296
5297 error_free:
5298         bnxt_dev_uninit(eth_dev);
5299         return rc;
5300 }
5301
5302
5303 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5304 {
5305         if (!ctx)
5306                 return;
5307
5308         if (ctx->va)
5309                 rte_free(ctx->va);
5310
5311         ctx->va = NULL;
5312         ctx->dma = RTE_BAD_IOVA;
5313         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5314 }
5315
5316 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5317 {
5318         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5319                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5320                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
5321                                   bp->flow_stat->max_fc,
5322                                   false);
5323
5324         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5325                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5326                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
5327                                   bp->flow_stat->max_fc,
5328                                   false);
5329
5330         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5331                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5332         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5333
5334         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5335                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5336         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5337
5338         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5339                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5340         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5341
5342         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5343                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5344         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5345 }
5346
5347 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5348 {
5349         bnxt_unregister_fc_ctx_mem(bp);
5350
5351         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5352         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5353         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5354         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5355 }
5356
5357 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5358 {
5359         if (BNXT_FLOW_XSTATS_EN(bp))
5360                 bnxt_uninit_fc_ctx_mem(bp);
5361 }
5362
5363 static void
5364 bnxt_free_error_recovery_info(struct bnxt *bp)
5365 {
5366         rte_free(bp->recovery_info);
5367         bp->recovery_info = NULL;
5368         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5369 }
5370
5371 static void
5372 bnxt_uninit_locks(struct bnxt *bp)
5373 {
5374         pthread_mutex_destroy(&bp->flow_lock);
5375         pthread_mutex_destroy(&bp->def_cp_lock);
5376         pthread_mutex_destroy(&bp->health_check_lock);
5377         if (bp->rep_info) {
5378                 pthread_mutex_destroy(&bp->rep_info->vfr_lock);
5379                 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
5380         }
5381 }
5382
5383 static int
5384 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5385 {
5386         int rc;
5387
5388         bnxt_free_int(bp);
5389         bnxt_free_mem(bp, reconfig_dev);
5390
5391         bnxt_hwrm_func_buf_unrgtr(bp);
5392         rte_free(bp->pf->vf_req_buf);
5393
5394         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5395         bp->flags &= ~BNXT_FLAG_REGISTERED;
5396         bnxt_free_ctx_mem(bp);
5397         if (!reconfig_dev) {
5398                 bnxt_free_hwrm_resources(bp);
5399                 bnxt_free_error_recovery_info(bp);
5400         }
5401
5402         bnxt_uninit_ctx_mem(bp);
5403
5404         bnxt_uninit_locks(bp);
5405         bnxt_free_flow_stats_info(bp);
5406         bnxt_free_rep_info(bp);
5407         rte_free(bp->ptp_cfg);
5408         bp->ptp_cfg = NULL;
5409         return rc;
5410 }
5411
5412 static int
5413 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5414 {
5415         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5416                 return -EPERM;
5417
5418         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5419
5420         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5421                 bnxt_dev_close_op(eth_dev);
5422
5423         return 0;
5424 }
5425
5426 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
5427 {
5428         struct bnxt *bp = eth_dev->data->dev_private;
5429         struct rte_eth_dev *vf_rep_eth_dev;
5430         int ret = 0, i;
5431
5432         if (!bp)
5433                 return -EINVAL;
5434
5435         for (i = 0; i < bp->num_reps; i++) {
5436                 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
5437                 if (!vf_rep_eth_dev)
5438                         continue;
5439                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n",
5440                             vf_rep_eth_dev->data->port_id);
5441                 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit);
5442         }
5443         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n",
5444                     eth_dev->data->port_id);
5445         ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
5446
5447         return ret;
5448 }
5449
5450 static void bnxt_free_rep_info(struct bnxt *bp)
5451 {
5452         rte_free(bp->rep_info);
5453         bp->rep_info = NULL;
5454         rte_free(bp->cfa_code_map);
5455         bp->cfa_code_map = NULL;
5456 }
5457
5458 static int bnxt_init_rep_info(struct bnxt *bp)
5459 {
5460         int i = 0, rc;
5461
5462         if (bp->rep_info)
5463                 return 0;
5464
5465         bp->rep_info = rte_zmalloc("bnxt_rep_info",
5466                                    sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
5467                                    0);
5468         if (!bp->rep_info) {
5469                 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
5470                 return -ENOMEM;
5471         }
5472         bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
5473                                        sizeof(*bp->cfa_code_map) *
5474                                        BNXT_MAX_CFA_CODE, 0);
5475         if (!bp->cfa_code_map) {
5476                 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
5477                 bnxt_free_rep_info(bp);
5478                 return -ENOMEM;
5479         }
5480
5481         for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
5482                 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
5483
5484         rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
5485         if (rc) {
5486                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
5487                 bnxt_free_rep_info(bp);
5488                 return rc;
5489         }
5490
5491         rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
5492         if (rc) {
5493                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
5494                 bnxt_free_rep_info(bp);
5495                 return rc;
5496         }
5497
5498         return rc;
5499 }
5500
5501 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
5502                                struct rte_eth_devargs eth_da,
5503                                struct rte_eth_dev *backing_eth_dev,
5504                                const char *dev_args)
5505 {
5506         struct rte_eth_dev *vf_rep_eth_dev;
5507         char name[RTE_ETH_NAME_MAX_LEN];
5508         struct bnxt *backing_bp;
5509         uint16_t num_rep;
5510         int i, ret = 0;
5511         struct rte_kvargs *kvlist = NULL;
5512
5513         num_rep = eth_da.nb_representor_ports;
5514         if (num_rep > BNXT_MAX_VF_REPS) {
5515                 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
5516                             num_rep, BNXT_MAX_VF_REPS);
5517                 return -EINVAL;
5518         }
5519
5520         if (num_rep >= RTE_MAX_ETHPORTS) {
5521                 PMD_DRV_LOG(ERR,
5522                             "nb_representor_ports = %d > %d MAX ETHPORTS\n",
5523                             num_rep, RTE_MAX_ETHPORTS);
5524                 return -EINVAL;
5525         }
5526
5527         backing_bp = backing_eth_dev->data->dev_private;
5528
5529         if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
5530                 PMD_DRV_LOG(ERR,
5531                             "Not a PF or trusted VF. No Representor support\n");
5532                 /* Returning an error is not an option.
5533                  * Applications are not handling this correctly
5534                  */
5535                 return 0;
5536         }
5537
5538         if (bnxt_init_rep_info(backing_bp))
5539                 return 0;
5540
5541         for (i = 0; i < num_rep; i++) {
5542                 struct bnxt_representor representor = {
5543                         .vf_id = eth_da.representor_ports[i],
5544                         .switch_domain_id = backing_bp->switch_domain_id,
5545                         .parent_dev = backing_eth_dev
5546                 };
5547
5548                 if (representor.vf_id >= BNXT_MAX_VF_REPS) {
5549                         PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
5550                                     representor.vf_id, BNXT_MAX_VF_REPS);
5551                         continue;
5552                 }
5553
5554                 /* representor port net_bdf_port */
5555                 snprintf(name, sizeof(name), "net_%s_representor_%d",
5556                          pci_dev->device.name, eth_da.representor_ports[i]);
5557
5558                 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args);
5559                 if (kvlist) {
5560                         /*
5561                          * Handler for "rep_is_pf" devarg.
5562                          * Invoked as for ex: "-w 000:00:0d.0,
5563                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5564                          */
5565                         ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF,
5566                                                  bnxt_parse_devarg_rep_is_pf,
5567                                                  (void *)&representor);
5568                         if (ret) {
5569                                 ret = -EINVAL;
5570                                 goto err;
5571                         }
5572                         /*
5573                          * Handler for "rep_based_pf" devarg.
5574                          * Invoked as for ex: "-w 000:00:0d.0,
5575                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5576                          */
5577                         ret = rte_kvargs_process(kvlist,
5578                                                  BNXT_DEVARG_REP_BASED_PF,
5579                                                  bnxt_parse_devarg_rep_based_pf,
5580                                                  (void *)&representor);
5581                         if (ret) {
5582                                 ret = -EINVAL;
5583                                 goto err;
5584                         }
5585                         /*
5586                          * Handler for "rep_based_pf" devarg.
5587                          * Invoked as for ex: "-w 000:00:0d.0,
5588                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5589                          */
5590                         ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F,
5591                                                  bnxt_parse_devarg_rep_q_r2f,
5592                                                  (void *)&representor);
5593                         if (ret) {
5594                                 ret = -EINVAL;
5595                                 goto err;
5596                         }
5597                         /*
5598                          * Handler for "rep_based_pf" devarg.
5599                          * Invoked as for ex: "-w 000:00:0d.0,
5600                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5601                          */
5602                         ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R,
5603                                                  bnxt_parse_devarg_rep_q_f2r,
5604                                                  (void *)&representor);
5605                         if (ret) {
5606                                 ret = -EINVAL;
5607                                 goto err;
5608                         }
5609                         /*
5610                          * Handler for "rep_based_pf" devarg.
5611                          * Invoked as for ex: "-w 000:00:0d.0,
5612                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5613                          */
5614                         ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F,
5615                                                  bnxt_parse_devarg_rep_fc_r2f,
5616                                                  (void *)&representor);
5617                         if (ret) {
5618                                 ret = -EINVAL;
5619                                 goto err;
5620                         }
5621                         /*
5622                          * Handler for "rep_based_pf" devarg.
5623                          * Invoked as for ex: "-w 000:00:0d.0,
5624                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5625                          */
5626                         ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R,
5627                                                  bnxt_parse_devarg_rep_fc_f2r,
5628                                                  (void *)&representor);
5629                         if (ret) {
5630                                 ret = -EINVAL;
5631                                 goto err;
5632                         }
5633                 }
5634
5635                 ret = rte_eth_dev_create(&pci_dev->device, name,
5636                                          sizeof(struct bnxt_representor),
5637                                          NULL, NULL,
5638                                          bnxt_representor_init,
5639                                          &representor);
5640                 if (ret) {
5641                         PMD_DRV_LOG(ERR, "failed to create bnxt vf "
5642                                     "representor %s.", name);
5643                         goto err;
5644                 }
5645
5646                 vf_rep_eth_dev = rte_eth_dev_allocated(name);
5647                 if (!vf_rep_eth_dev) {
5648                         PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
5649                                     " for VF-Rep: %s.", name);
5650                         ret = -ENODEV;
5651                         goto err;
5652                 }
5653
5654                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n",
5655                             backing_eth_dev->data->port_id);
5656                 backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
5657                                                          vf_rep_eth_dev;
5658                 backing_bp->num_reps++;
5659
5660         }
5661
5662         rte_kvargs_free(kvlist);
5663         return 0;
5664
5665 err:
5666         /* If num_rep > 1, then rollback already created
5667          * ports, since we'll be failing the probe anyway
5668          */
5669         if (num_rep > 1)
5670                 bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5671         rte_errno = -ret;
5672         rte_kvargs_free(kvlist);
5673
5674         return ret;
5675 }
5676
5677 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5678                           struct rte_pci_device *pci_dev)
5679 {
5680         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
5681         struct rte_eth_dev *backing_eth_dev;
5682         uint16_t num_rep;
5683         int ret = 0;
5684
5685         if (pci_dev->device.devargs) {
5686                 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
5687                                             &eth_da);
5688                 if (ret)
5689                         return ret;
5690         }
5691
5692         num_rep = eth_da.nb_representor_ports;
5693         PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
5694                     num_rep);
5695
5696         /* We could come here after first level of probe is already invoked
5697          * as part of an application bringup(OVS-DPDK vswitchd), so first check
5698          * for already allocated eth_dev for the backing device (PF/Trusted VF)
5699          */
5700         backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5701         if (backing_eth_dev == NULL) {
5702                 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
5703                                          sizeof(struct bnxt),
5704                                          eth_dev_pci_specific_init, pci_dev,
5705                                          bnxt_dev_init, NULL);
5706
5707                 if (ret || !num_rep)
5708                         return ret;
5709
5710                 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5711         }
5712         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n",
5713                     backing_eth_dev->data->port_id);
5714
5715         if (!num_rep)
5716                 return ret;
5717
5718         /* probe representor ports now */
5719         ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev,
5720                                   pci_dev->device.devargs->args);
5721
5722         return ret;
5723 }
5724
5725 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
5726 {
5727         struct rte_eth_dev *eth_dev;
5728
5729         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5730         if (!eth_dev)
5731                 return 0; /* Invoked typically only by OVS-DPDK, by the
5732                            * time it comes here the eth_dev is already
5733                            * deleted by rte_eth_dev_close(), so returning
5734                            * +ve value will at least help in proper cleanup
5735                            */
5736
5737         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id);
5738         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
5739                 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
5740                         return rte_eth_dev_destroy(eth_dev,
5741                                                    bnxt_representor_uninit);
5742                 else
5743                         return rte_eth_dev_destroy(eth_dev,
5744                                                    bnxt_dev_uninit);
5745         } else {
5746                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
5747         }
5748 }
5749
5750 static struct rte_pci_driver bnxt_rte_pmd = {
5751         .id_table = bnxt_pci_id_map,
5752         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
5753                         RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
5754                                                   * and OVS-DPDK
5755                                                   */
5756         .probe = bnxt_pci_probe,
5757         .remove = bnxt_pci_remove,
5758 };
5759
5760 static bool
5761 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5762 {
5763         if (strcmp(dev->device->driver->name, drv->driver.name))
5764                 return false;
5765
5766         return true;
5767 }
5768
5769 bool is_bnxt_supported(struct rte_eth_dev *dev)
5770 {
5771         return is_device_supported(dev, &bnxt_rte_pmd);
5772 }
5773
5774 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
5775 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
5776 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
5777 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");