bb6b25dff2b90225fc41f02bdddb328099da3ece
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_reps.h"
22 #include "bnxt_ring.h"
23 #include "bnxt_rxq.h"
24 #include "bnxt_rxr.h"
25 #include "bnxt_stats.h"
26 #include "bnxt_txq.h"
27 #include "bnxt_txr.h"
28 #include "bnxt_vnic.h"
29 #include "hsi_struct_def_dpdk.h"
30 #include "bnxt_nvm_defs.h"
31 #include "bnxt_tf_common.h"
32 #include "ulp_flow_db.h"
33
34 #define DRV_MODULE_NAME         "bnxt"
35 static const char bnxt_version[] =
36         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
37
38 /*
39  * The set of PCI devices this driver supports
40  */
41 static const struct rte_pci_id bnxt_pci_id_map[] = {
42         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
43                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
45                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
94         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
95         { .vendor_id = 0, /* sentinel */ },
96 };
97
98 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
99 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
100 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
101 #define BNXT_DEVARG_REPRESENTOR "representor"
102 #define BNXT_DEVARG_REP_BASED_PF  "rep-based-pf"
103 #define BNXT_DEVARG_REP_IS_PF  "rep-is-pf"
104 #define BNXT_DEVARG_REP_Q_R2F  "rep-q-r2f"
105 #define BNXT_DEVARG_REP_Q_F2R  "rep-q-f2r"
106 #define BNXT_DEVARG_REP_FC_R2F  "rep-fc-r2f"
107 #define BNXT_DEVARG_REP_FC_F2R  "rep-fc-f2r"
108
109 static const char *const bnxt_dev_args[] = {
110         BNXT_DEVARG_REPRESENTOR,
111         BNXT_DEVARG_TRUFLOW,
112         BNXT_DEVARG_FLOW_XSTAT,
113         BNXT_DEVARG_MAX_NUM_KFLOWS,
114         BNXT_DEVARG_REP_BASED_PF,
115         BNXT_DEVARG_REP_IS_PF,
116         BNXT_DEVARG_REP_Q_R2F,
117         BNXT_DEVARG_REP_Q_F2R,
118         BNXT_DEVARG_REP_FC_R2F,
119         BNXT_DEVARG_REP_FC_F2R,
120         NULL
121 };
122
123 /*
124  * truflow == false to disable the feature
125  * truflow == true to enable the feature
126  */
127 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
128
129 /*
130  * flow_xstat == false to disable the feature
131  * flow_xstat == true to enable the feature
132  */
133 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
134
135 /*
136  * rep_is_pf == false to indicate VF representor
137  * rep_is_pf == true to indicate PF representor
138  */
139 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)        ((rep_is_pf) > 1)
140
141 /*
142  * rep_based_pf == Physical index of the PF
143  */
144 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)  ((rep_based_pf) > 15)
145 /*
146  * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction
147  */
148 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)        ((rep_q_r2f) > 3)
149
150 /*
151  * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction
152  */
153 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)        ((rep_q_f2r) > 3)
154
155 /*
156  * rep_fc_r2f == Flow control for the representor to endpoint direction
157  */
158 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)      ((rep_fc_r2f) > 1)
159
160 /*
161  * rep_fc_f2r == Flow control for the endpoint to representor direction
162  */
163 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)      ((rep_fc_f2r) > 1)
164
165 /*
166  * max_num_kflows must be >= 32
167  * and must be a power-of-2 supported value
168  * return: 1 -> invalid
169  *         0 -> valid
170  */
171 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
172 {
173         if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
174                 return 1;
175         return 0;
176 }
177
178 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
179 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
180 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
181 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
182 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
183 static int bnxt_restore_vlan_filters(struct bnxt *bp);
184 static void bnxt_dev_recover(void *arg);
185 static void bnxt_free_error_recovery_info(struct bnxt *bp);
186 static void bnxt_free_rep_info(struct bnxt *bp);
187
188 int is_bnxt_in_error(struct bnxt *bp)
189 {
190         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
191                 return -EIO;
192         if (bp->flags & BNXT_FLAG_FW_RESET)
193                 return -EBUSY;
194
195         return 0;
196 }
197
198 /***********************/
199
200 /*
201  * High level utility functions
202  */
203
204 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
205 {
206         if (!BNXT_CHIP_THOR(bp))
207                 return 1;
208
209         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
210                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
211                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
212 }
213
214 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
215 {
216         if (!BNXT_CHIP_THOR(bp))
217                 return HW_HASH_INDEX_SIZE;
218
219         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
220 }
221
222 static void bnxt_free_parent_info(struct bnxt *bp)
223 {
224         rte_free(bp->parent);
225 }
226
227 static void bnxt_free_pf_info(struct bnxt *bp)
228 {
229         rte_free(bp->pf);
230 }
231
232 static void bnxt_free_link_info(struct bnxt *bp)
233 {
234         rte_free(bp->link_info);
235 }
236
237 static void bnxt_free_leds_info(struct bnxt *bp)
238 {
239         if (BNXT_VF(bp))
240                 return;
241
242         rte_free(bp->leds);
243         bp->leds = NULL;
244 }
245
246 static void bnxt_free_flow_stats_info(struct bnxt *bp)
247 {
248         rte_free(bp->flow_stat);
249         bp->flow_stat = NULL;
250 }
251
252 static void bnxt_free_cos_queues(struct bnxt *bp)
253 {
254         rte_free(bp->rx_cos_queue);
255         rte_free(bp->tx_cos_queue);
256 }
257
258 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
259 {
260         bnxt_free_filter_mem(bp);
261         bnxt_free_vnic_attributes(bp);
262         bnxt_free_vnic_mem(bp);
263
264         /* tx/rx rings are configured as part of *_queue_setup callbacks.
265          * If the number of rings change across fw update,
266          * we don't have much choice except to warn the user.
267          */
268         if (!reconfig) {
269                 bnxt_free_stats(bp);
270                 bnxt_free_tx_rings(bp);
271                 bnxt_free_rx_rings(bp);
272         }
273         bnxt_free_async_cp_ring(bp);
274         bnxt_free_rxtx_nq_ring(bp);
275
276         rte_free(bp->grp_info);
277         bp->grp_info = NULL;
278 }
279
280 static int bnxt_alloc_parent_info(struct bnxt *bp)
281 {
282         bp->parent = rte_zmalloc("bnxt_parent_info",
283                                  sizeof(struct bnxt_parent_info), 0);
284         if (bp->parent == NULL)
285                 return -ENOMEM;
286
287         return 0;
288 }
289
290 static int bnxt_alloc_pf_info(struct bnxt *bp)
291 {
292         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
293         if (bp->pf == NULL)
294                 return -ENOMEM;
295
296         return 0;
297 }
298
299 static int bnxt_alloc_link_info(struct bnxt *bp)
300 {
301         bp->link_info =
302                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
303         if (bp->link_info == NULL)
304                 return -ENOMEM;
305
306         return 0;
307 }
308
309 static int bnxt_alloc_leds_info(struct bnxt *bp)
310 {
311         if (BNXT_VF(bp))
312                 return 0;
313
314         bp->leds = rte_zmalloc("bnxt_leds",
315                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
316                                0);
317         if (bp->leds == NULL)
318                 return -ENOMEM;
319
320         return 0;
321 }
322
323 static int bnxt_alloc_cos_queues(struct bnxt *bp)
324 {
325         bp->rx_cos_queue =
326                 rte_zmalloc("bnxt_rx_cosq",
327                             BNXT_COS_QUEUE_COUNT *
328                             sizeof(struct bnxt_cos_queue_info),
329                             0);
330         if (bp->rx_cos_queue == NULL)
331                 return -ENOMEM;
332
333         bp->tx_cos_queue =
334                 rte_zmalloc("bnxt_tx_cosq",
335                             BNXT_COS_QUEUE_COUNT *
336                             sizeof(struct bnxt_cos_queue_info),
337                             0);
338         if (bp->tx_cos_queue == NULL)
339                 return -ENOMEM;
340
341         return 0;
342 }
343
344 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
345 {
346         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
347                                     sizeof(struct bnxt_flow_stat_info), 0);
348         if (bp->flow_stat == NULL)
349                 return -ENOMEM;
350
351         return 0;
352 }
353
354 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
355 {
356         int rc;
357
358         rc = bnxt_alloc_ring_grps(bp);
359         if (rc)
360                 goto alloc_mem_err;
361
362         rc = bnxt_alloc_async_ring_struct(bp);
363         if (rc)
364                 goto alloc_mem_err;
365
366         rc = bnxt_alloc_vnic_mem(bp);
367         if (rc)
368                 goto alloc_mem_err;
369
370         rc = bnxt_alloc_vnic_attributes(bp);
371         if (rc)
372                 goto alloc_mem_err;
373
374         rc = bnxt_alloc_filter_mem(bp);
375         if (rc)
376                 goto alloc_mem_err;
377
378         rc = bnxt_alloc_async_cp_ring(bp);
379         if (rc)
380                 goto alloc_mem_err;
381
382         rc = bnxt_alloc_rxtx_nq_ring(bp);
383         if (rc)
384                 goto alloc_mem_err;
385
386         if (BNXT_FLOW_XSTATS_EN(bp)) {
387                 rc = bnxt_alloc_flow_stats_info(bp);
388                 if (rc)
389                         goto alloc_mem_err;
390         }
391
392         return 0;
393
394 alloc_mem_err:
395         bnxt_free_mem(bp, reconfig);
396         return rc;
397 }
398
399 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
400 {
401         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
402         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
403         uint64_t rx_offloads = dev_conf->rxmode.offloads;
404         struct bnxt_rx_queue *rxq;
405         unsigned int j;
406         int rc;
407
408         rc = bnxt_vnic_grp_alloc(bp, vnic);
409         if (rc)
410                 goto err_out;
411
412         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
413                     vnic_id, vnic, vnic->fw_grp_ids);
414
415         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
416         if (rc)
417                 goto err_out;
418
419         /* Alloc RSS context only if RSS mode is enabled */
420         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
421                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
422
423                 rc = 0;
424                 for (j = 0; j < nr_ctxs; j++) {
425                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
426                         if (rc)
427                                 break;
428                 }
429                 if (rc) {
430                         PMD_DRV_LOG(ERR,
431                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
432                                     vnic_id, j, rc);
433                         goto err_out;
434                 }
435                 vnic->num_lb_ctxts = nr_ctxs;
436         }
437
438         /*
439          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
440          * setting is not available at this time, it will not be
441          * configured correctly in the CFA.
442          */
443         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
444                 vnic->vlan_strip = true;
445         else
446                 vnic->vlan_strip = false;
447
448         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
449         if (rc)
450                 goto err_out;
451
452         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
453         if (rc)
454                 goto err_out;
455
456         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
457                 rxq = bp->eth_dev->data->rx_queues[j];
458
459                 PMD_DRV_LOG(DEBUG,
460                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
461                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
462
463                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
464                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
465                 else
466                         vnic->rx_queue_cnt++;
467         }
468
469         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
470
471         rc = bnxt_vnic_rss_configure(bp, vnic);
472         if (rc)
473                 goto err_out;
474
475         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
476
477         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
478                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
479         else
480                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
481
482         return 0;
483 err_out:
484         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
485                     vnic_id, rc);
486         return rc;
487 }
488
489 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
490 {
491         int rc = 0;
492
493         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
494                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
495         if (rc)
496                 return rc;
497
498         PMD_DRV_LOG(DEBUG,
499                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
500                     " rx_fc_in_tbl.ctx_id = %d\n",
501                     bp->flow_stat->rx_fc_in_tbl.va,
502                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
503                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
504
505         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
506                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
507         if (rc)
508                 return rc;
509
510         PMD_DRV_LOG(DEBUG,
511                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
512                     " rx_fc_out_tbl.ctx_id = %d\n",
513                     bp->flow_stat->rx_fc_out_tbl.va,
514                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
515                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
516
517         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
518                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
519         if (rc)
520                 return rc;
521
522         PMD_DRV_LOG(DEBUG,
523                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
524                     " tx_fc_in_tbl.ctx_id = %d\n",
525                     bp->flow_stat->tx_fc_in_tbl.va,
526                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
527                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
528
529         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
530                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
531         if (rc)
532                 return rc;
533
534         PMD_DRV_LOG(DEBUG,
535                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
536                     " tx_fc_out_tbl.ctx_id = %d\n",
537                     bp->flow_stat->tx_fc_out_tbl.va,
538                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
539                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
540
541         memset(bp->flow_stat->rx_fc_out_tbl.va,
542                0,
543                bp->flow_stat->rx_fc_out_tbl.size);
544         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
545                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
546                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
547                                        bp->flow_stat->max_fc,
548                                        true);
549         if (rc)
550                 return rc;
551
552         memset(bp->flow_stat->tx_fc_out_tbl.va,
553                0,
554                bp->flow_stat->tx_fc_out_tbl.size);
555         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
556                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
557                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
558                                        bp->flow_stat->max_fc,
559                                        true);
560
561         return rc;
562 }
563
564 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
565                                   struct bnxt_ctx_mem_buf_info *ctx)
566 {
567         if (!ctx)
568                 return -EINVAL;
569
570         ctx->va = rte_zmalloc(type, size, 0);
571         if (ctx->va == NULL)
572                 return -ENOMEM;
573         rte_mem_lock_page(ctx->va);
574         ctx->size = size;
575         ctx->dma = rte_mem_virt2iova(ctx->va);
576         if (ctx->dma == RTE_BAD_IOVA)
577                 return -ENOMEM;
578
579         return 0;
580 }
581
582 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
583 {
584         struct rte_pci_device *pdev = bp->pdev;
585         char type[RTE_MEMZONE_NAMESIZE];
586         uint16_t max_fc;
587         int rc = 0;
588
589         max_fc = bp->flow_stat->max_fc;
590
591         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
592                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
593         /* 4 bytes for each counter-id */
594         rc = bnxt_alloc_ctx_mem_buf(type,
595                                     max_fc * 4,
596                                     &bp->flow_stat->rx_fc_in_tbl);
597         if (rc)
598                 return rc;
599
600         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
601                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
602         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
603         rc = bnxt_alloc_ctx_mem_buf(type,
604                                     max_fc * 16,
605                                     &bp->flow_stat->rx_fc_out_tbl);
606         if (rc)
607                 return rc;
608
609         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
610                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
611         /* 4 bytes for each counter-id */
612         rc = bnxt_alloc_ctx_mem_buf(type,
613                                     max_fc * 4,
614                                     &bp->flow_stat->tx_fc_in_tbl);
615         if (rc)
616                 return rc;
617
618         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
619                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
620         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
621         rc = bnxt_alloc_ctx_mem_buf(type,
622                                     max_fc * 16,
623                                     &bp->flow_stat->tx_fc_out_tbl);
624         if (rc)
625                 return rc;
626
627         rc = bnxt_register_fc_ctx_mem(bp);
628
629         return rc;
630 }
631
632 static int bnxt_init_ctx_mem(struct bnxt *bp)
633 {
634         int rc = 0;
635
636         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
637             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
638             !BNXT_FLOW_XSTATS_EN(bp))
639                 return 0;
640
641         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
642         if (rc)
643                 return rc;
644
645         rc = bnxt_init_fc_ctx_mem(bp);
646
647         return rc;
648 }
649
650 static int bnxt_update_phy_setting(struct bnxt *bp)
651 {
652         struct rte_eth_link new;
653         int rc;
654
655         rc = bnxt_get_hwrm_link_config(bp, &new);
656         if (rc) {
657                 PMD_DRV_LOG(ERR, "Failed to get link settings\n");
658                 return rc;
659         }
660
661         /*
662          * On BCM957508-N2100 adapters, FW will not allow any user other
663          * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call
664          * always returns link up. Force phy update always in that case.
665          */
666         if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) {
667                 rc = bnxt_set_hwrm_link_config(bp, true);
668                 if (rc) {
669                         PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
670                         return rc;
671                 }
672         }
673
674         return rc;
675 }
676
677 static int bnxt_init_chip(struct bnxt *bp)
678 {
679         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
680         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
681         uint32_t intr_vector = 0;
682         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
683         uint32_t vec = BNXT_MISC_VEC_ID;
684         unsigned int i, j;
685         int rc;
686
687         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
688                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
689                         DEV_RX_OFFLOAD_JUMBO_FRAME;
690                 bp->flags |= BNXT_FLAG_JUMBO;
691         } else {
692                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
693                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
694                 bp->flags &= ~BNXT_FLAG_JUMBO;
695         }
696
697         /* THOR does not support ring groups.
698          * But we will use the array to save RSS context IDs.
699          */
700         if (BNXT_CHIP_THOR(bp))
701                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
702
703         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
704         if (rc) {
705                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
706                 goto err_out;
707         }
708
709         rc = bnxt_alloc_hwrm_rings(bp);
710         if (rc) {
711                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
712                 goto err_out;
713         }
714
715         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
716         if (rc) {
717                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
718                 goto err_out;
719         }
720
721         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
722                 goto skip_cosq_cfg;
723
724         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
725                 if (bp->rx_cos_queue[i].id != 0xff) {
726                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
727
728                         if (!vnic) {
729                                 PMD_DRV_LOG(ERR,
730                                             "Num pools more than FW profile\n");
731                                 rc = -EINVAL;
732                                 goto err_out;
733                         }
734                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
735                         bp->rx_cosq_cnt++;
736                 }
737         }
738
739 skip_cosq_cfg:
740         rc = bnxt_mq_rx_configure(bp);
741         if (rc) {
742                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
743                 goto err_out;
744         }
745
746         /* VNIC configuration */
747         for (i = 0; i < bp->nr_vnics; i++) {
748                 rc = bnxt_setup_one_vnic(bp, i);
749                 if (rc)
750                         goto err_out;
751         }
752
753         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
754         if (rc) {
755                 PMD_DRV_LOG(ERR,
756                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
757                 goto err_out;
758         }
759
760         /* check and configure queue intr-vector mapping */
761         if ((rte_intr_cap_multiple(intr_handle) ||
762              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
763             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
764                 intr_vector = bp->eth_dev->data->nb_rx_queues;
765                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
766                 if (intr_vector > bp->rx_cp_nr_rings) {
767                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
768                                         bp->rx_cp_nr_rings);
769                         return -ENOTSUP;
770                 }
771                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
772                 if (rc)
773                         return rc;
774         }
775
776         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
777                 intr_handle->intr_vec =
778                         rte_zmalloc("intr_vec",
779                                     bp->eth_dev->data->nb_rx_queues *
780                                     sizeof(int), 0);
781                 if (intr_handle->intr_vec == NULL) {
782                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
783                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
784                         rc = -ENOMEM;
785                         goto err_disable;
786                 }
787                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
788                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
789                          intr_handle->intr_vec, intr_handle->nb_efd,
790                         intr_handle->max_intr);
791                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
792                      queue_id++) {
793                         intr_handle->intr_vec[queue_id] =
794                                                         vec + BNXT_RX_VEC_START;
795                         if (vec < base + intr_handle->nb_efd - 1)
796                                 vec++;
797                 }
798         }
799
800         /* enable uio/vfio intr/eventfd mapping */
801         rc = rte_intr_enable(intr_handle);
802 #ifndef RTE_EXEC_ENV_FREEBSD
803         /* In FreeBSD OS, nic_uio driver does not support interrupts */
804         if (rc)
805                 goto err_free;
806 #endif
807
808         rc = bnxt_update_phy_setting(bp);
809         if (rc)
810                 goto err_free;
811
812         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
813         if (!bp->mark_table)
814                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
815
816         return 0;
817
818 err_free:
819         rte_free(intr_handle->intr_vec);
820 err_disable:
821         rte_intr_efd_disable(intr_handle);
822 err_out:
823         /* Some of the error status returned by FW may not be from errno.h */
824         if (rc > 0)
825                 rc = -EIO;
826
827         return rc;
828 }
829
830 static int bnxt_shutdown_nic(struct bnxt *bp)
831 {
832         bnxt_free_all_hwrm_resources(bp);
833         bnxt_free_all_filters(bp);
834         bnxt_free_all_vnics(bp);
835         return 0;
836 }
837
838 /*
839  * Device configuration and status function
840  */
841
842 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
843 {
844         uint32_t link_speed = bp->link_info->support_speeds;
845         uint32_t speed_capa = 0;
846
847         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
848                 speed_capa |= ETH_LINK_SPEED_100M;
849         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
850                 speed_capa |= ETH_LINK_SPEED_100M_HD;
851         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
852                 speed_capa |= ETH_LINK_SPEED_1G;
853         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
854                 speed_capa |= ETH_LINK_SPEED_2_5G;
855         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
856                 speed_capa |= ETH_LINK_SPEED_10G;
857         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
858                 speed_capa |= ETH_LINK_SPEED_20G;
859         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
860                 speed_capa |= ETH_LINK_SPEED_25G;
861         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
862                 speed_capa |= ETH_LINK_SPEED_40G;
863         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
864                 speed_capa |= ETH_LINK_SPEED_50G;
865         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
866                 speed_capa |= ETH_LINK_SPEED_100G;
867         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
868                 speed_capa |= ETH_LINK_SPEED_200G;
869
870         if (bp->link_info->auto_mode ==
871             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
872                 speed_capa |= ETH_LINK_SPEED_FIXED;
873         else
874                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
875
876         return speed_capa;
877 }
878
879 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
880                                 struct rte_eth_dev_info *dev_info)
881 {
882         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
883         struct bnxt *bp = eth_dev->data->dev_private;
884         uint16_t max_vnics, i, j, vpool, vrxq;
885         unsigned int max_rx_rings;
886         int rc;
887
888         rc = is_bnxt_in_error(bp);
889         if (rc)
890                 return rc;
891
892         /* MAC Specifics */
893         dev_info->max_mac_addrs = bp->max_l2_ctx;
894         dev_info->max_hash_mac_addrs = 0;
895
896         /* PF/VF specifics */
897         if (BNXT_PF(bp))
898                 dev_info->max_vfs = pdev->max_vfs;
899
900         max_rx_rings = BNXT_MAX_RINGS(bp);
901         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
902         dev_info->max_rx_queues = max_rx_rings;
903         dev_info->max_tx_queues = max_rx_rings;
904         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
905         dev_info->hash_key_size = 40;
906         max_vnics = bp->max_vnics;
907
908         /* MTU specifics */
909         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
910         dev_info->max_mtu = BNXT_MAX_MTU;
911
912         /* Fast path specifics */
913         dev_info->min_rx_bufsize = 1;
914         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
915
916         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
917         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
918                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
919         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
920         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
921
922         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
923
924         /* *INDENT-OFF* */
925         dev_info->default_rxconf = (struct rte_eth_rxconf) {
926                 .rx_thresh = {
927                         .pthresh = 8,
928                         .hthresh = 8,
929                         .wthresh = 0,
930                 },
931                 .rx_free_thresh = 32,
932                 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
933         };
934
935         dev_info->default_txconf = (struct rte_eth_txconf) {
936                 .tx_thresh = {
937                         .pthresh = 32,
938                         .hthresh = 0,
939                         .wthresh = 0,
940                 },
941                 .tx_free_thresh = 32,
942                 .tx_rs_thresh = 32,
943         };
944         eth_dev->data->dev_conf.intr_conf.lsc = 1;
945
946         eth_dev->data->dev_conf.intr_conf.rxq = 1;
947         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
948         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
949         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
950         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
951
952         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
953                 dev_info->switch_info.name = eth_dev->device->name;
954                 dev_info->switch_info.domain_id = bp->switch_domain_id;
955                 dev_info->switch_info.port_id =
956                                 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF :
957                                     BNXT_SWITCH_PORT_ID_TRUSTED_VF;
958         }
959
960         /* *INDENT-ON* */
961
962         /*
963          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
964          *       need further investigation.
965          */
966
967         /* VMDq resources */
968         vpool = 64; /* ETH_64_POOLS */
969         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
970         for (i = 0; i < 4; vpool >>= 1, i++) {
971                 if (max_vnics > vpool) {
972                         for (j = 0; j < 5; vrxq >>= 1, j++) {
973                                 if (dev_info->max_rx_queues > vrxq) {
974                                         if (vpool > vrxq)
975                                                 vpool = vrxq;
976                                         goto found;
977                                 }
978                         }
979                         /* Not enough resources to support VMDq */
980                         break;
981                 }
982         }
983         /* Not enough resources to support VMDq */
984         vpool = 0;
985         vrxq = 0;
986 found:
987         dev_info->max_vmdq_pools = vpool;
988         dev_info->vmdq_queue_num = vrxq;
989
990         dev_info->vmdq_pool_base = 0;
991         dev_info->vmdq_queue_base = 0;
992
993         return 0;
994 }
995
996 /* Configure the device based on the configuration provided */
997 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
998 {
999         struct bnxt *bp = eth_dev->data->dev_private;
1000         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1001         int rc;
1002
1003         bp->rx_queues = (void *)eth_dev->data->rx_queues;
1004         bp->tx_queues = (void *)eth_dev->data->tx_queues;
1005         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
1006         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
1007
1008         rc = is_bnxt_in_error(bp);
1009         if (rc)
1010                 return rc;
1011
1012         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
1013                 rc = bnxt_hwrm_check_vf_rings(bp);
1014                 if (rc) {
1015                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
1016                         return -ENOSPC;
1017                 }
1018
1019                 /* If a resource has already been allocated - in this case
1020                  * it is the async completion ring, free it. Reallocate it after
1021                  * resource reservation. This will ensure the resource counts
1022                  * are calculated correctly.
1023                  */
1024
1025                 pthread_mutex_lock(&bp->def_cp_lock);
1026
1027                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
1028                         bnxt_disable_int(bp);
1029                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
1030                 }
1031
1032                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
1033                 if (rc) {
1034                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
1035                         pthread_mutex_unlock(&bp->def_cp_lock);
1036                         return -ENOSPC;
1037                 }
1038
1039                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
1040                         rc = bnxt_alloc_async_cp_ring(bp);
1041                         if (rc) {
1042                                 pthread_mutex_unlock(&bp->def_cp_lock);
1043                                 return rc;
1044                         }
1045                         bnxt_enable_int(bp);
1046                 }
1047
1048                 pthread_mutex_unlock(&bp->def_cp_lock);
1049         } else {
1050                 /* legacy driver needs to get updated values */
1051                 rc = bnxt_hwrm_func_qcaps(bp);
1052                 if (rc) {
1053                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
1054                         return rc;
1055                 }
1056         }
1057
1058         /* Inherit new configurations */
1059         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
1060             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
1061             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
1062                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
1063             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
1064             bp->max_stat_ctx)
1065                 goto resource_error;
1066
1067         if (BNXT_HAS_RING_GRPS(bp) &&
1068             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
1069                 goto resource_error;
1070
1071         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1072             bp->max_vnics < eth_dev->data->nb_rx_queues)
1073                 goto resource_error;
1074
1075         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1076         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1077
1078         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1079                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1080         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1081
1082         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1083                 eth_dev->data->mtu =
1084                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1085                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1086                         BNXT_NUM_VLANS;
1087                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1088         }
1089         return 0;
1090
1091 resource_error:
1092         PMD_DRV_LOG(ERR,
1093                     "Insufficient resources to support requested config\n");
1094         PMD_DRV_LOG(ERR,
1095                     "Num Queues Requested: Tx %d, Rx %d\n",
1096                     eth_dev->data->nb_tx_queues,
1097                     eth_dev->data->nb_rx_queues);
1098         PMD_DRV_LOG(ERR,
1099                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1100                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1101                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1102         return -ENOSPC;
1103 }
1104
1105 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1106 {
1107         struct rte_eth_link *link = &eth_dev->data->dev_link;
1108
1109         if (link->link_status)
1110                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1111                         eth_dev->data->port_id,
1112                         (uint32_t)link->link_speed,
1113                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1114                         ("full-duplex") : ("half-duplex\n"));
1115         else
1116                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1117                         eth_dev->data->port_id);
1118 }
1119
1120 /*
1121  * Determine whether the current configuration requires support for scattered
1122  * receive; return 1 if scattered receive is required and 0 if not.
1123  */
1124 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1125 {
1126         uint16_t buf_size;
1127         int i;
1128
1129         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1130                 return 1;
1131
1132         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1133                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1134
1135                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1136                                       RTE_PKTMBUF_HEADROOM);
1137                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1138                         return 1;
1139         }
1140         return 0;
1141 }
1142
1143 static eth_rx_burst_t
1144 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1145 {
1146         struct bnxt *bp = eth_dev->data->dev_private;
1147
1148 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1149 #ifndef RTE_LIBRTE_IEEE1588
1150         /*
1151          * Vector mode receive can be enabled only if scatter rx is not
1152          * in use and rx offloads are limited to VLAN stripping and
1153          * CRC stripping.
1154          */
1155         if (!eth_dev->data->scattered_rx &&
1156             !(eth_dev->data->dev_conf.rxmode.offloads &
1157               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1158                 DEV_RX_OFFLOAD_KEEP_CRC |
1159                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1160                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1161                 DEV_RX_OFFLOAD_UDP_CKSUM |
1162                 DEV_RX_OFFLOAD_TCP_CKSUM |
1163                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1164                 DEV_RX_OFFLOAD_RSS_HASH |
1165                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1166             !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp)) {
1167                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1168                             eth_dev->data->port_id);
1169                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1170                 return bnxt_recv_pkts_vec;
1171         }
1172         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1173                     eth_dev->data->port_id);
1174         PMD_DRV_LOG(INFO,
1175                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1176                     eth_dev->data->port_id,
1177                     eth_dev->data->scattered_rx,
1178                     eth_dev->data->dev_conf.rxmode.offloads);
1179 #endif
1180 #endif
1181         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1182         return bnxt_recv_pkts;
1183 }
1184
1185 static eth_tx_burst_t
1186 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1187 {
1188 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
1189 #ifndef RTE_LIBRTE_IEEE1588
1190         struct bnxt *bp = eth_dev->data->dev_private;
1191
1192         /*
1193          * Vector mode transmit can be enabled only if not using scatter rx
1194          * or tx offloads.
1195          */
1196         if (!eth_dev->data->scattered_rx &&
1197             !eth_dev->data->dev_conf.txmode.offloads &&
1198             !BNXT_TRUFLOW_EN(bp)) {
1199                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1200                             eth_dev->data->port_id);
1201                 return bnxt_xmit_pkts_vec;
1202         }
1203         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1204                     eth_dev->data->port_id);
1205         PMD_DRV_LOG(INFO,
1206                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1207                     eth_dev->data->port_id,
1208                     eth_dev->data->scattered_rx,
1209                     eth_dev->data->dev_conf.txmode.offloads);
1210 #endif
1211 #endif
1212         return bnxt_xmit_pkts;
1213 }
1214
1215 static int bnxt_handle_if_change_status(struct bnxt *bp)
1216 {
1217         int rc;
1218
1219         /* Since fw has undergone a reset and lost all contexts,
1220          * set fatal flag to not issue hwrm during cleanup
1221          */
1222         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1223         bnxt_uninit_resources(bp, true);
1224
1225         /* clear fatal flag so that re-init happens */
1226         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1227         rc = bnxt_init_resources(bp, true);
1228
1229         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1230
1231         return rc;
1232 }
1233
1234 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1235 {
1236         struct bnxt *bp = eth_dev->data->dev_private;
1237         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1238         int vlan_mask = 0;
1239         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1240
1241         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1242                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1243                 return -EINVAL;
1244         }
1245
1246         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1247                 PMD_DRV_LOG(ERR,
1248                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1249                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1250         }
1251
1252         do {
1253                 rc = bnxt_hwrm_if_change(bp, true);
1254                 if (rc == 0 || rc != -EAGAIN)
1255                         break;
1256
1257                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1258         } while (retry_cnt--);
1259
1260         if (rc)
1261                 return rc;
1262
1263         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1264                 rc = bnxt_handle_if_change_status(bp);
1265                 if (rc)
1266                         return rc;
1267         }
1268
1269         bnxt_enable_int(bp);
1270
1271         rc = bnxt_init_chip(bp);
1272         if (rc)
1273                 goto error;
1274
1275         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1276         eth_dev->data->dev_started = 1;
1277
1278         bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1279
1280         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1281                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1282         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1283                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1284         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1285         if (rc)
1286                 goto error;
1287
1288         /* Initialize bnxt ULP port details */
1289         rc = bnxt_ulp_port_init(bp);
1290         if (rc)
1291                 goto error;
1292
1293         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1294         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1295
1296         bnxt_schedule_fw_health_check(bp);
1297
1298         return 0;
1299
1300 error:
1301         bnxt_shutdown_nic(bp);
1302         bnxt_free_tx_mbufs(bp);
1303         bnxt_free_rx_mbufs(bp);
1304         bnxt_hwrm_if_change(bp, false);
1305         eth_dev->data->dev_started = 0;
1306         return rc;
1307 }
1308
1309 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1310 {
1311         struct bnxt *bp = eth_dev->data->dev_private;
1312         int rc = 0;
1313
1314         if (!bp->link_info->link_up)
1315                 rc = bnxt_set_hwrm_link_config(bp, true);
1316         if (!rc)
1317                 eth_dev->data->dev_link.link_status = 1;
1318
1319         bnxt_print_link_info(eth_dev);
1320         return rc;
1321 }
1322
1323 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1324 {
1325         struct bnxt *bp = eth_dev->data->dev_private;
1326
1327         eth_dev->data->dev_link.link_status = 0;
1328         bnxt_set_hwrm_link_config(bp, false);
1329         bp->link_info->link_up = 0;
1330
1331         return 0;
1332 }
1333
1334 static void bnxt_free_switch_domain(struct bnxt *bp)
1335 {
1336         if (bp->switch_domain_id)
1337                 rte_eth_switch_domain_free(bp->switch_domain_id);
1338 }
1339
1340 /* Unload the driver, release resources */
1341 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1342 {
1343         struct bnxt *bp = eth_dev->data->dev_private;
1344         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1345         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1346
1347         eth_dev->data->dev_started = 0;
1348         eth_dev->data->scattered_rx = 0;
1349
1350         /* Prevent crashes when queues are still in use */
1351         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1352         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1353
1354         bnxt_disable_int(bp);
1355
1356         /* disable uio/vfio intr/eventfd mapping */
1357         rte_intr_disable(intr_handle);
1358
1359         /* Stop the child representors for this device */
1360         bnxt_rep_stop_all(bp);
1361
1362         /* delete the bnxt ULP port details */
1363         bnxt_ulp_port_deinit(bp);
1364
1365         bnxt_cancel_fw_health_check(bp);
1366
1367         /* Do not bring link down during reset recovery */
1368         if (!is_bnxt_in_error(bp))
1369                 bnxt_dev_set_link_down_op(eth_dev);
1370
1371         /* Wait for link to be reset and the async notification to process.
1372          * During reset recovery, there is no need to wait and
1373          * VF/NPAR functions do not have privilege to change PHY config.
1374          */
1375         if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1376                 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1377
1378         /* Clean queue intr-vector mapping */
1379         rte_intr_efd_disable(intr_handle);
1380         if (intr_handle->intr_vec != NULL) {
1381                 rte_free(intr_handle->intr_vec);
1382                 intr_handle->intr_vec = NULL;
1383         }
1384
1385         bnxt_hwrm_port_clr_stats(bp);
1386         bnxt_free_tx_mbufs(bp);
1387         bnxt_free_rx_mbufs(bp);
1388         /* Process any remaining notifications in default completion queue */
1389         bnxt_int_handler(eth_dev);
1390         bnxt_shutdown_nic(bp);
1391         bnxt_hwrm_if_change(bp, false);
1392
1393         rte_free(bp->mark_table);
1394         bp->mark_table = NULL;
1395
1396         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1397         bp->rx_cosq_cnt = 0;
1398         /* All filters are deleted on a port stop. */
1399         if (BNXT_FLOW_XSTATS_EN(bp))
1400                 bp->flow_stat->flow_count = 0;
1401 }
1402
1403 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1404 {
1405         struct bnxt *bp = eth_dev->data->dev_private;
1406
1407         /* cancel the recovery handler before remove dev */
1408         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1409         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1410         bnxt_cancel_fc_thread(bp);
1411
1412         if (eth_dev->data->dev_started)
1413                 bnxt_dev_stop_op(eth_dev);
1414
1415         bnxt_free_switch_domain(bp);
1416
1417         bnxt_uninit_resources(bp, false);
1418
1419         bnxt_free_leds_info(bp);
1420         bnxt_free_cos_queues(bp);
1421         bnxt_free_link_info(bp);
1422         bnxt_free_pf_info(bp);
1423         bnxt_free_parent_info(bp);
1424
1425         eth_dev->dev_ops = NULL;
1426         eth_dev->rx_pkt_burst = NULL;
1427         eth_dev->tx_pkt_burst = NULL;
1428
1429         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1430         bp->tx_mem_zone = NULL;
1431         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1432         bp->rx_mem_zone = NULL;
1433
1434         bnxt_hwrm_free_vf_info(bp);
1435
1436         rte_free(bp->grp_info);
1437         bp->grp_info = NULL;
1438 }
1439
1440 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1441                                     uint32_t index)
1442 {
1443         struct bnxt *bp = eth_dev->data->dev_private;
1444         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1445         struct bnxt_vnic_info *vnic;
1446         struct bnxt_filter_info *filter, *temp_filter;
1447         uint32_t i;
1448
1449         if (is_bnxt_in_error(bp))
1450                 return;
1451
1452         /*
1453          * Loop through all VNICs from the specified filter flow pools to
1454          * remove the corresponding MAC addr filter
1455          */
1456         for (i = 0; i < bp->nr_vnics; i++) {
1457                 if (!(pool_mask & (1ULL << i)))
1458                         continue;
1459
1460                 vnic = &bp->vnic_info[i];
1461                 filter = STAILQ_FIRST(&vnic->filter);
1462                 while (filter) {
1463                         temp_filter = STAILQ_NEXT(filter, next);
1464                         if (filter->mac_index == index) {
1465                                 STAILQ_REMOVE(&vnic->filter, filter,
1466                                                 bnxt_filter_info, next);
1467                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1468                                 bnxt_free_filter(bp, filter);
1469                         }
1470                         filter = temp_filter;
1471                 }
1472         }
1473 }
1474
1475 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1476                                struct rte_ether_addr *mac_addr, uint32_t index,
1477                                uint32_t pool)
1478 {
1479         struct bnxt_filter_info *filter;
1480         int rc = 0;
1481
1482         /* Attach requested MAC address to the new l2_filter */
1483         STAILQ_FOREACH(filter, &vnic->filter, next) {
1484                 if (filter->mac_index == index) {
1485                         PMD_DRV_LOG(DEBUG,
1486                                     "MAC addr already existed for pool %d\n",
1487                                     pool);
1488                         return 0;
1489                 }
1490         }
1491
1492         filter = bnxt_alloc_filter(bp);
1493         if (!filter) {
1494                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1495                 return -ENODEV;
1496         }
1497
1498         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1499          * if the MAC that's been programmed now is a different one, then,
1500          * copy that addr to filter->l2_addr
1501          */
1502         if (mac_addr)
1503                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1504         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1505
1506         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1507         if (!rc) {
1508                 filter->mac_index = index;
1509                 if (filter->mac_index == 0)
1510                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1511                 else
1512                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1513         } else {
1514                 bnxt_free_filter(bp, filter);
1515         }
1516
1517         return rc;
1518 }
1519
1520 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1521                                 struct rte_ether_addr *mac_addr,
1522                                 uint32_t index, uint32_t pool)
1523 {
1524         struct bnxt *bp = eth_dev->data->dev_private;
1525         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1526         int rc = 0;
1527
1528         rc = is_bnxt_in_error(bp);
1529         if (rc)
1530                 return rc;
1531
1532         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1533                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1534                 return -ENOTSUP;
1535         }
1536
1537         if (!vnic) {
1538                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1539                 return -EINVAL;
1540         }
1541
1542         /* Filter settings will get applied when port is started */
1543         if (!eth_dev->data->dev_started)
1544                 return 0;
1545
1546         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1547
1548         return rc;
1549 }
1550
1551 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1552                      bool exp_link_status)
1553 {
1554         int rc = 0;
1555         struct bnxt *bp = eth_dev->data->dev_private;
1556         struct rte_eth_link new;
1557         int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1558                   BNXT_LINK_DOWN_WAIT_CNT;
1559
1560         rc = is_bnxt_in_error(bp);
1561         if (rc)
1562                 return rc;
1563
1564         memset(&new, 0, sizeof(new));
1565         do {
1566                 /* Retrieve link info from hardware */
1567                 rc = bnxt_get_hwrm_link_config(bp, &new);
1568                 if (rc) {
1569                         new.link_speed = ETH_LINK_SPEED_100M;
1570                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1571                         PMD_DRV_LOG(ERR,
1572                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1573                         goto out;
1574                 }
1575
1576                 if (!wait_to_complete || new.link_status == exp_link_status)
1577                         break;
1578
1579                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1580         } while (cnt--);
1581
1582 out:
1583         /* Timed out or success */
1584         if (new.link_status != eth_dev->data->dev_link.link_status ||
1585         new.link_speed != eth_dev->data->dev_link.link_speed) {
1586                 rte_eth_linkstatus_set(eth_dev, &new);
1587
1588                 rte_eth_dev_callback_process(eth_dev,
1589                                              RTE_ETH_EVENT_INTR_LSC,
1590                                              NULL);
1591
1592                 bnxt_print_link_info(eth_dev);
1593         }
1594
1595         return rc;
1596 }
1597
1598 int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1599                         int wait_to_complete)
1600 {
1601         return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1602 }
1603
1604 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1605 {
1606         struct bnxt *bp = eth_dev->data->dev_private;
1607         struct bnxt_vnic_info *vnic;
1608         uint32_t old_flags;
1609         int rc;
1610
1611         rc = is_bnxt_in_error(bp);
1612         if (rc)
1613                 return rc;
1614
1615         /* Filter settings will get applied when port is started */
1616         if (!eth_dev->data->dev_started)
1617                 return 0;
1618
1619         if (bp->vnic_info == NULL)
1620                 return 0;
1621
1622         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1623
1624         old_flags = vnic->flags;
1625         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1626         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1627         if (rc != 0)
1628                 vnic->flags = old_flags;
1629
1630         return rc;
1631 }
1632
1633 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1634 {
1635         struct bnxt *bp = eth_dev->data->dev_private;
1636         struct bnxt_vnic_info *vnic;
1637         uint32_t old_flags;
1638         int rc;
1639
1640         rc = is_bnxt_in_error(bp);
1641         if (rc)
1642                 return rc;
1643
1644         /* Filter settings will get applied when port is started */
1645         if (!eth_dev->data->dev_started)
1646                 return 0;
1647
1648         if (bp->vnic_info == NULL)
1649                 return 0;
1650
1651         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1652
1653         old_flags = vnic->flags;
1654         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1655         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1656         if (rc != 0)
1657                 vnic->flags = old_flags;
1658
1659         return rc;
1660 }
1661
1662 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1663 {
1664         struct bnxt *bp = eth_dev->data->dev_private;
1665         struct bnxt_vnic_info *vnic;
1666         uint32_t old_flags;
1667         int rc;
1668
1669         rc = is_bnxt_in_error(bp);
1670         if (rc)
1671                 return rc;
1672
1673         /* Filter settings will get applied when port is started */
1674         if (!eth_dev->data->dev_started)
1675                 return 0;
1676
1677         if (bp->vnic_info == NULL)
1678                 return 0;
1679
1680         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1681
1682         old_flags = vnic->flags;
1683         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1684         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1685         if (rc != 0)
1686                 vnic->flags = old_flags;
1687
1688         return rc;
1689 }
1690
1691 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1692 {
1693         struct bnxt *bp = eth_dev->data->dev_private;
1694         struct bnxt_vnic_info *vnic;
1695         uint32_t old_flags;
1696         int rc;
1697
1698         rc = is_bnxt_in_error(bp);
1699         if (rc)
1700                 return rc;
1701
1702         /* Filter settings will get applied when port is started */
1703         if (!eth_dev->data->dev_started)
1704                 return 0;
1705
1706         if (bp->vnic_info == NULL)
1707                 return 0;
1708
1709         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1710
1711         old_flags = vnic->flags;
1712         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1713         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1714         if (rc != 0)
1715                 vnic->flags = old_flags;
1716
1717         return rc;
1718 }
1719
1720 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1721 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1722 {
1723         if (qid >= bp->rx_nr_rings)
1724                 return NULL;
1725
1726         return bp->eth_dev->data->rx_queues[qid];
1727 }
1728
1729 /* Return rxq corresponding to a given rss table ring/group ID. */
1730 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1731 {
1732         struct bnxt_rx_queue *rxq;
1733         unsigned int i;
1734
1735         if (!BNXT_HAS_RING_GRPS(bp)) {
1736                 for (i = 0; i < bp->rx_nr_rings; i++) {
1737                         rxq = bp->eth_dev->data->rx_queues[i];
1738                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1739                                 return rxq->index;
1740                 }
1741         } else {
1742                 for (i = 0; i < bp->rx_nr_rings; i++) {
1743                         if (bp->grp_info[i].fw_grp_id == fwr)
1744                                 return i;
1745                 }
1746         }
1747
1748         return INVALID_HW_RING_ID;
1749 }
1750
1751 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1752                             struct rte_eth_rss_reta_entry64 *reta_conf,
1753                             uint16_t reta_size)
1754 {
1755         struct bnxt *bp = eth_dev->data->dev_private;
1756         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1757         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1758         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1759         uint16_t idx, sft;
1760         int i, rc;
1761
1762         rc = is_bnxt_in_error(bp);
1763         if (rc)
1764                 return rc;
1765
1766         if (!vnic->rss_table)
1767                 return -EINVAL;
1768
1769         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1770                 return -EINVAL;
1771
1772         if (reta_size != tbl_size) {
1773                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1774                         "(%d) must equal the size supported by the hardware "
1775                         "(%d)\n", reta_size, tbl_size);
1776                 return -EINVAL;
1777         }
1778
1779         for (i = 0; i < reta_size; i++) {
1780                 struct bnxt_rx_queue *rxq;
1781
1782                 idx = i / RTE_RETA_GROUP_SIZE;
1783                 sft = i % RTE_RETA_GROUP_SIZE;
1784
1785                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1786                         continue;
1787
1788                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1789                 if (!rxq) {
1790                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1791                         return -EINVAL;
1792                 }
1793
1794                 if (BNXT_CHIP_THOR(bp)) {
1795                         vnic->rss_table[i * 2] =
1796                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1797                         vnic->rss_table[i * 2 + 1] =
1798                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1799                 } else {
1800                         vnic->rss_table[i] =
1801                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1802                 }
1803         }
1804
1805         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1806         return 0;
1807 }
1808
1809 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1810                               struct rte_eth_rss_reta_entry64 *reta_conf,
1811                               uint16_t reta_size)
1812 {
1813         struct bnxt *bp = eth_dev->data->dev_private;
1814         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1815         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1816         uint16_t idx, sft, i;
1817         int rc;
1818
1819         rc = is_bnxt_in_error(bp);
1820         if (rc)
1821                 return rc;
1822
1823         /* Retrieve from the default VNIC */
1824         if (!vnic)
1825                 return -EINVAL;
1826         if (!vnic->rss_table)
1827                 return -EINVAL;
1828
1829         if (reta_size != tbl_size) {
1830                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1831                         "(%d) must equal the size supported by the hardware "
1832                         "(%d)\n", reta_size, tbl_size);
1833                 return -EINVAL;
1834         }
1835
1836         for (idx = 0, i = 0; i < reta_size; i++) {
1837                 idx = i / RTE_RETA_GROUP_SIZE;
1838                 sft = i % RTE_RETA_GROUP_SIZE;
1839
1840                 if (reta_conf[idx].mask & (1ULL << sft)) {
1841                         uint16_t qid;
1842
1843                         if (BNXT_CHIP_THOR(bp))
1844                                 qid = bnxt_rss_to_qid(bp,
1845                                                       vnic->rss_table[i * 2]);
1846                         else
1847                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1848
1849                         if (qid == INVALID_HW_RING_ID) {
1850                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1851                                 return -EINVAL;
1852                         }
1853                         reta_conf[idx].reta[sft] = qid;
1854                 }
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1861                                    struct rte_eth_rss_conf *rss_conf)
1862 {
1863         struct bnxt *bp = eth_dev->data->dev_private;
1864         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1865         struct bnxt_vnic_info *vnic;
1866         int rc;
1867
1868         rc = is_bnxt_in_error(bp);
1869         if (rc)
1870                 return rc;
1871
1872         /*
1873          * If RSS enablement were different than dev_configure,
1874          * then return -EINVAL
1875          */
1876         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1877                 if (!rss_conf->rss_hf)
1878                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1879         } else {
1880                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1881                         return -EINVAL;
1882         }
1883
1884         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1885         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1886                rss_conf,
1887                sizeof(*rss_conf));
1888
1889         /* Update the default RSS VNIC(s) */
1890         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1891         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1892
1893         /*
1894          * If hashkey is not specified, use the previously configured
1895          * hashkey
1896          */
1897         if (!rss_conf->rss_key)
1898                 goto rss_config;
1899
1900         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1901                 PMD_DRV_LOG(ERR,
1902                             "Invalid hashkey length, should be 16 bytes\n");
1903                 return -EINVAL;
1904         }
1905         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1906
1907 rss_config:
1908         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1909         return 0;
1910 }
1911
1912 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1913                                      struct rte_eth_rss_conf *rss_conf)
1914 {
1915         struct bnxt *bp = eth_dev->data->dev_private;
1916         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1917         int len, rc;
1918         uint32_t hash_types;
1919
1920         rc = is_bnxt_in_error(bp);
1921         if (rc)
1922                 return rc;
1923
1924         /* RSS configuration is the same for all VNICs */
1925         if (vnic && vnic->rss_hash_key) {
1926                 if (rss_conf->rss_key) {
1927                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1928                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1929                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1930                 }
1931
1932                 hash_types = vnic->hash_type;
1933                 rss_conf->rss_hf = 0;
1934                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1935                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1936                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1937                 }
1938                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1939                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1940                         hash_types &=
1941                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1942                 }
1943                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1944                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1945                         hash_types &=
1946                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1947                 }
1948                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1949                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1950                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1951                 }
1952                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1953                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1954                         hash_types &=
1955                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1956                 }
1957                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1958                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1959                         hash_types &=
1960                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1961                 }
1962                 if (hash_types) {
1963                         PMD_DRV_LOG(ERR,
1964                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1965                                 vnic->hash_type);
1966                         return -ENOTSUP;
1967                 }
1968         } else {
1969                 rss_conf->rss_hf = 0;
1970         }
1971         return 0;
1972 }
1973
1974 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1975                                struct rte_eth_fc_conf *fc_conf)
1976 {
1977         struct bnxt *bp = dev->data->dev_private;
1978         struct rte_eth_link link_info;
1979         int rc;
1980
1981         rc = is_bnxt_in_error(bp);
1982         if (rc)
1983                 return rc;
1984
1985         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1986         if (rc)
1987                 return rc;
1988
1989         memset(fc_conf, 0, sizeof(*fc_conf));
1990         if (bp->link_info->auto_pause)
1991                 fc_conf->autoneg = 1;
1992         switch (bp->link_info->pause) {
1993         case 0:
1994                 fc_conf->mode = RTE_FC_NONE;
1995                 break;
1996         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1997                 fc_conf->mode = RTE_FC_TX_PAUSE;
1998                 break;
1999         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
2000                 fc_conf->mode = RTE_FC_RX_PAUSE;
2001                 break;
2002         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
2003                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
2004                 fc_conf->mode = RTE_FC_FULL;
2005                 break;
2006         }
2007         return 0;
2008 }
2009
2010 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
2011                                struct rte_eth_fc_conf *fc_conf)
2012 {
2013         struct bnxt *bp = dev->data->dev_private;
2014         int rc;
2015
2016         rc = is_bnxt_in_error(bp);
2017         if (rc)
2018                 return rc;
2019
2020         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2021                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
2022                 return -ENOTSUP;
2023         }
2024
2025         switch (fc_conf->mode) {
2026         case RTE_FC_NONE:
2027                 bp->link_info->auto_pause = 0;
2028                 bp->link_info->force_pause = 0;
2029                 break;
2030         case RTE_FC_RX_PAUSE:
2031                 if (fc_conf->autoneg) {
2032                         bp->link_info->auto_pause =
2033                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2034                         bp->link_info->force_pause = 0;
2035                 } else {
2036                         bp->link_info->auto_pause = 0;
2037                         bp->link_info->force_pause =
2038                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2039                 }
2040                 break;
2041         case RTE_FC_TX_PAUSE:
2042                 if (fc_conf->autoneg) {
2043                         bp->link_info->auto_pause =
2044                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
2045                         bp->link_info->force_pause = 0;
2046                 } else {
2047                         bp->link_info->auto_pause = 0;
2048                         bp->link_info->force_pause =
2049                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
2050                 }
2051                 break;
2052         case RTE_FC_FULL:
2053                 if (fc_conf->autoneg) {
2054                         bp->link_info->auto_pause =
2055                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
2056                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2057                         bp->link_info->force_pause = 0;
2058                 } else {
2059                         bp->link_info->auto_pause = 0;
2060                         bp->link_info->force_pause =
2061                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
2062                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2063                 }
2064                 break;
2065         }
2066         return bnxt_set_hwrm_link_config(bp, true);
2067 }
2068
2069 /* Add UDP tunneling port */
2070 static int
2071 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
2072                          struct rte_eth_udp_tunnel *udp_tunnel)
2073 {
2074         struct bnxt *bp = eth_dev->data->dev_private;
2075         uint16_t tunnel_type = 0;
2076         int rc = 0;
2077
2078         rc = is_bnxt_in_error(bp);
2079         if (rc)
2080                 return rc;
2081
2082         switch (udp_tunnel->prot_type) {
2083         case RTE_TUNNEL_TYPE_VXLAN:
2084                 if (bp->vxlan_port_cnt) {
2085                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2086                                 udp_tunnel->udp_port);
2087                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2088                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2089                                 return -ENOSPC;
2090                         }
2091                         bp->vxlan_port_cnt++;
2092                         return 0;
2093                 }
2094                 tunnel_type =
2095                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2096                 bp->vxlan_port_cnt++;
2097                 break;
2098         case RTE_TUNNEL_TYPE_GENEVE:
2099                 if (bp->geneve_port_cnt) {
2100                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2101                                 udp_tunnel->udp_port);
2102                         if (bp->geneve_port != udp_tunnel->udp_port) {
2103                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2104                                 return -ENOSPC;
2105                         }
2106                         bp->geneve_port_cnt++;
2107                         return 0;
2108                 }
2109                 tunnel_type =
2110                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2111                 bp->geneve_port_cnt++;
2112                 break;
2113         default:
2114                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2115                 return -ENOTSUP;
2116         }
2117         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2118                                              tunnel_type);
2119         return rc;
2120 }
2121
2122 static int
2123 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2124                          struct rte_eth_udp_tunnel *udp_tunnel)
2125 {
2126         struct bnxt *bp = eth_dev->data->dev_private;
2127         uint16_t tunnel_type = 0;
2128         uint16_t port = 0;
2129         int rc = 0;
2130
2131         rc = is_bnxt_in_error(bp);
2132         if (rc)
2133                 return rc;
2134
2135         switch (udp_tunnel->prot_type) {
2136         case RTE_TUNNEL_TYPE_VXLAN:
2137                 if (!bp->vxlan_port_cnt) {
2138                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2139                         return -EINVAL;
2140                 }
2141                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2142                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2143                                 udp_tunnel->udp_port, bp->vxlan_port);
2144                         return -EINVAL;
2145                 }
2146                 if (--bp->vxlan_port_cnt)
2147                         return 0;
2148
2149                 tunnel_type =
2150                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2151                 port = bp->vxlan_fw_dst_port_id;
2152                 break;
2153         case RTE_TUNNEL_TYPE_GENEVE:
2154                 if (!bp->geneve_port_cnt) {
2155                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2156                         return -EINVAL;
2157                 }
2158                 if (bp->geneve_port != udp_tunnel->udp_port) {
2159                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2160                                 udp_tunnel->udp_port, bp->geneve_port);
2161                         return -EINVAL;
2162                 }
2163                 if (--bp->geneve_port_cnt)
2164                         return 0;
2165
2166                 tunnel_type =
2167                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2168                 port = bp->geneve_fw_dst_port_id;
2169                 break;
2170         default:
2171                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2172                 return -ENOTSUP;
2173         }
2174
2175         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2176         if (!rc) {
2177                 if (tunnel_type ==
2178                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
2179                         bp->vxlan_port = 0;
2180                 if (tunnel_type ==
2181                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
2182                         bp->geneve_port = 0;
2183         }
2184         return rc;
2185 }
2186
2187 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2188 {
2189         struct bnxt_filter_info *filter;
2190         struct bnxt_vnic_info *vnic;
2191         int rc = 0;
2192         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2193
2194         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2195         filter = STAILQ_FIRST(&vnic->filter);
2196         while (filter) {
2197                 /* Search for this matching MAC+VLAN filter */
2198                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2199                         /* Delete the filter */
2200                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2201                         if (rc)
2202                                 return rc;
2203                         STAILQ_REMOVE(&vnic->filter, filter,
2204                                       bnxt_filter_info, next);
2205                         bnxt_free_filter(bp, filter);
2206                         PMD_DRV_LOG(INFO,
2207                                     "Deleted vlan filter for %d\n",
2208                                     vlan_id);
2209                         return 0;
2210                 }
2211                 filter = STAILQ_NEXT(filter, next);
2212         }
2213         return -ENOENT;
2214 }
2215
2216 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2217 {
2218         struct bnxt_filter_info *filter;
2219         struct bnxt_vnic_info *vnic;
2220         int rc = 0;
2221         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2222                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2223         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2224
2225         /* Implementation notes on the use of VNIC in this command:
2226          *
2227          * By default, these filters belong to default vnic for the function.
2228          * Once these filters are set up, only destination VNIC can be modified.
2229          * If the destination VNIC is not specified in this command,
2230          * then the HWRM shall only create an l2 context id.
2231          */
2232
2233         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2234         filter = STAILQ_FIRST(&vnic->filter);
2235         /* Check if the VLAN has already been added */
2236         while (filter) {
2237                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2238                         return -EEXIST;
2239
2240                 filter = STAILQ_NEXT(filter, next);
2241         }
2242
2243         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2244          * command to create MAC+VLAN filter with the right flags, enables set.
2245          */
2246         filter = bnxt_alloc_filter(bp);
2247         if (!filter) {
2248                 PMD_DRV_LOG(ERR,
2249                             "MAC/VLAN filter alloc failed\n");
2250                 return -ENOMEM;
2251         }
2252         /* MAC + VLAN ID filter */
2253         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2254          * untagged packets are received
2255          *
2256          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2257          * packets and only the programmed vlan's packets are received
2258          */
2259         filter->l2_ivlan = vlan_id;
2260         filter->l2_ivlan_mask = 0x0FFF;
2261         filter->enables |= en;
2262         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2263
2264         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2265         if (rc) {
2266                 /* Free the newly allocated filter as we were
2267                  * not able to create the filter in hardware.
2268                  */
2269                 bnxt_free_filter(bp, filter);
2270                 return rc;
2271         }
2272
2273         filter->mac_index = 0;
2274         /* Add this new filter to the list */
2275         if (vlan_id == 0)
2276                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2277         else
2278                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2279
2280         PMD_DRV_LOG(INFO,
2281                     "Added Vlan filter for %d\n", vlan_id);
2282         return rc;
2283 }
2284
2285 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2286                 uint16_t vlan_id, int on)
2287 {
2288         struct bnxt *bp = eth_dev->data->dev_private;
2289         int rc;
2290
2291         rc = is_bnxt_in_error(bp);
2292         if (rc)
2293                 return rc;
2294
2295         if (!eth_dev->data->dev_started) {
2296                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2297                 return -EINVAL;
2298         }
2299
2300         /* These operations apply to ALL existing MAC/VLAN filters */
2301         if (on)
2302                 return bnxt_add_vlan_filter(bp, vlan_id);
2303         else
2304                 return bnxt_del_vlan_filter(bp, vlan_id);
2305 }
2306
2307 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2308                                     struct bnxt_vnic_info *vnic)
2309 {
2310         struct bnxt_filter_info *filter;
2311         int rc;
2312
2313         filter = STAILQ_FIRST(&vnic->filter);
2314         while (filter) {
2315                 if (filter->mac_index == 0 &&
2316                     !memcmp(filter->l2_addr, bp->mac_addr,
2317                             RTE_ETHER_ADDR_LEN)) {
2318                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2319                         if (!rc) {
2320                                 STAILQ_REMOVE(&vnic->filter, filter,
2321                                               bnxt_filter_info, next);
2322                                 bnxt_free_filter(bp, filter);
2323                         }
2324                         return rc;
2325                 }
2326                 filter = STAILQ_NEXT(filter, next);
2327         }
2328         return 0;
2329 }
2330
2331 static int
2332 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2333 {
2334         struct bnxt_vnic_info *vnic;
2335         unsigned int i;
2336         int rc;
2337
2338         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2339         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2340                 /* Remove any VLAN filters programmed */
2341                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2342                         bnxt_del_vlan_filter(bp, i);
2343
2344                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2345                 if (rc)
2346                         return rc;
2347         } else {
2348                 /* Default filter will allow packets that match the
2349                  * dest mac. So, it has to be deleted, otherwise, we
2350                  * will endup receiving vlan packets for which the
2351                  * filter is not programmed, when hw-vlan-filter
2352                  * configuration is ON
2353                  */
2354                 bnxt_del_dflt_mac_filter(bp, vnic);
2355                 /* This filter will allow only untagged packets */
2356                 bnxt_add_vlan_filter(bp, 0);
2357         }
2358         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2359                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2360
2361         return 0;
2362 }
2363
2364 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2365 {
2366         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2367         unsigned int i;
2368         int rc;
2369
2370         /* Destroy vnic filters and vnic */
2371         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2372             DEV_RX_OFFLOAD_VLAN_FILTER) {
2373                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2374                         bnxt_del_vlan_filter(bp, i);
2375         }
2376         bnxt_del_dflt_mac_filter(bp, vnic);
2377
2378         rc = bnxt_hwrm_vnic_free(bp, vnic);
2379         if (rc)
2380                 return rc;
2381
2382         rte_free(vnic->fw_grp_ids);
2383         vnic->fw_grp_ids = NULL;
2384
2385         vnic->rx_queue_cnt = 0;
2386
2387         return 0;
2388 }
2389
2390 static int
2391 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2392 {
2393         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2394         int rc;
2395
2396         /* Destroy, recreate and reconfigure the default vnic */
2397         rc = bnxt_free_one_vnic(bp, 0);
2398         if (rc)
2399                 return rc;
2400
2401         /* default vnic 0 */
2402         rc = bnxt_setup_one_vnic(bp, 0);
2403         if (rc)
2404                 return rc;
2405
2406         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2407             DEV_RX_OFFLOAD_VLAN_FILTER) {
2408                 rc = bnxt_add_vlan_filter(bp, 0);
2409                 if (rc)
2410                         return rc;
2411                 rc = bnxt_restore_vlan_filters(bp);
2412                 if (rc)
2413                         return rc;
2414         } else {
2415                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2416                 if (rc)
2417                         return rc;
2418         }
2419
2420         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2421         if (rc)
2422                 return rc;
2423
2424         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2425                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2426
2427         return rc;
2428 }
2429
2430 static int
2431 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2432 {
2433         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2434         struct bnxt *bp = dev->data->dev_private;
2435         int rc;
2436
2437         rc = is_bnxt_in_error(bp);
2438         if (rc)
2439                 return rc;
2440
2441         /* Filter settings will get applied when port is started */
2442         if (!dev->data->dev_started)
2443                 return 0;
2444
2445         if (mask & ETH_VLAN_FILTER_MASK) {
2446                 /* Enable or disable VLAN filtering */
2447                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2448                 if (rc)
2449                         return rc;
2450         }
2451
2452         if (mask & ETH_VLAN_STRIP_MASK) {
2453                 /* Enable or disable VLAN stripping */
2454                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2455                 if (rc)
2456                         return rc;
2457         }
2458
2459         if (mask & ETH_VLAN_EXTEND_MASK) {
2460                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2461                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2462                 else
2463                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2464         }
2465
2466         return 0;
2467 }
2468
2469 static int
2470 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2471                       uint16_t tpid)
2472 {
2473         struct bnxt *bp = dev->data->dev_private;
2474         int qinq = dev->data->dev_conf.rxmode.offloads &
2475                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2476
2477         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2478             vlan_type != ETH_VLAN_TYPE_OUTER) {
2479                 PMD_DRV_LOG(ERR,
2480                             "Unsupported vlan type.");
2481                 return -EINVAL;
2482         }
2483         if (!qinq) {
2484                 PMD_DRV_LOG(ERR,
2485                             "QinQ not enabled. Needs to be ON as we can "
2486                             "accelerate only outer vlan\n");
2487                 return -EINVAL;
2488         }
2489
2490         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2491                 switch (tpid) {
2492                 case RTE_ETHER_TYPE_QINQ:
2493                         bp->outer_tpid_bd =
2494                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2495                                 break;
2496                 case RTE_ETHER_TYPE_VLAN:
2497                         bp->outer_tpid_bd =
2498                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2499                                 break;
2500                 case RTE_ETHER_TYPE_QINQ1:
2501                         bp->outer_tpid_bd =
2502                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2503                                 break;
2504                 case RTE_ETHER_TYPE_QINQ2:
2505                         bp->outer_tpid_bd =
2506                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2507                                 break;
2508                 case RTE_ETHER_TYPE_QINQ3:
2509                         bp->outer_tpid_bd =
2510                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2511                                 break;
2512                 default:
2513                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2514                         return -EINVAL;
2515                 }
2516                 bp->outer_tpid_bd |= tpid;
2517                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2518         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2519                 PMD_DRV_LOG(ERR,
2520                             "Can accelerate only outer vlan in QinQ\n");
2521                 return -EINVAL;
2522         }
2523
2524         return 0;
2525 }
2526
2527 static int
2528 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2529                              struct rte_ether_addr *addr)
2530 {
2531         struct bnxt *bp = dev->data->dev_private;
2532         /* Default Filter is tied to VNIC 0 */
2533         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2534         int rc;
2535
2536         rc = is_bnxt_in_error(bp);
2537         if (rc)
2538                 return rc;
2539
2540         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2541                 return -EPERM;
2542
2543         if (rte_is_zero_ether_addr(addr))
2544                 return -EINVAL;
2545
2546         /* Filter settings will get applied when port is started */
2547         if (!dev->data->dev_started)
2548                 return 0;
2549
2550         /* Check if the requested MAC is already added */
2551         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2552                 return 0;
2553
2554         /* Destroy filter and re-create it */
2555         bnxt_del_dflt_mac_filter(bp, vnic);
2556
2557         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2558         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2559                 /* This filter will allow only untagged packets */
2560                 rc = bnxt_add_vlan_filter(bp, 0);
2561         } else {
2562                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2563         }
2564
2565         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2566         return rc;
2567 }
2568
2569 static int
2570 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2571                           struct rte_ether_addr *mc_addr_set,
2572                           uint32_t nb_mc_addr)
2573 {
2574         struct bnxt *bp = eth_dev->data->dev_private;
2575         char *mc_addr_list = (char *)mc_addr_set;
2576         struct bnxt_vnic_info *vnic;
2577         uint32_t off = 0, i = 0;
2578         int rc;
2579
2580         rc = is_bnxt_in_error(bp);
2581         if (rc)
2582                 return rc;
2583
2584         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2585
2586         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2587                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2588                 goto allmulti;
2589         }
2590
2591         /* TODO Check for Duplicate mcast addresses */
2592         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2593         for (i = 0; i < nb_mc_addr; i++) {
2594                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2595                         RTE_ETHER_ADDR_LEN);
2596                 off += RTE_ETHER_ADDR_LEN;
2597         }
2598
2599         vnic->mc_addr_cnt = i;
2600         if (vnic->mc_addr_cnt)
2601                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2602         else
2603                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2604
2605 allmulti:
2606         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2607 }
2608
2609 static int
2610 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2611 {
2612         struct bnxt *bp = dev->data->dev_private;
2613         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2614         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2615         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2616         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2617         int ret;
2618
2619         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2620                         fw_major, fw_minor, fw_updt, fw_rsvd);
2621
2622         ret += 1; /* add the size of '\0' */
2623         if (fw_size < (uint32_t)ret)
2624                 return ret;
2625         else
2626                 return 0;
2627 }
2628
2629 static void
2630 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2631         struct rte_eth_rxq_info *qinfo)
2632 {
2633         struct bnxt *bp = dev->data->dev_private;
2634         struct bnxt_rx_queue *rxq;
2635
2636         if (is_bnxt_in_error(bp))
2637                 return;
2638
2639         rxq = dev->data->rx_queues[queue_id];
2640
2641         qinfo->mp = rxq->mb_pool;
2642         qinfo->scattered_rx = dev->data->scattered_rx;
2643         qinfo->nb_desc = rxq->nb_rx_desc;
2644
2645         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2646         qinfo->conf.rx_drop_en = rxq->drop_en;
2647         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2648         qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
2649 }
2650
2651 static void
2652 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2653         struct rte_eth_txq_info *qinfo)
2654 {
2655         struct bnxt *bp = dev->data->dev_private;
2656         struct bnxt_tx_queue *txq;
2657
2658         if (is_bnxt_in_error(bp))
2659                 return;
2660
2661         txq = dev->data->tx_queues[queue_id];
2662
2663         qinfo->nb_desc = txq->nb_tx_desc;
2664
2665         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2666         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2667         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2668
2669         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2670         qinfo->conf.tx_rs_thresh = 0;
2671         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2672         qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
2673 }
2674
2675 static const struct {
2676         eth_rx_burst_t pkt_burst;
2677         const char *info;
2678 } bnxt_rx_burst_info[] = {
2679         {bnxt_recv_pkts,        "Scalar"},
2680 #if defined(RTE_ARCH_X86)
2681         {bnxt_recv_pkts_vec,    "Vector SSE"},
2682 #elif defined(RTE_ARCH_ARM64)
2683         {bnxt_recv_pkts_vec,    "Vector Neon"},
2684 #endif
2685 };
2686
2687 static int
2688 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2689                        struct rte_eth_burst_mode *mode)
2690 {
2691         eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2692         size_t i;
2693
2694         for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) {
2695                 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) {
2696                         snprintf(mode->info, sizeof(mode->info), "%s",
2697                                  bnxt_rx_burst_info[i].info);
2698                         return 0;
2699                 }
2700         }
2701
2702         return -EINVAL;
2703 }
2704
2705 static const struct {
2706         eth_tx_burst_t pkt_burst;
2707         const char *info;
2708 } bnxt_tx_burst_info[] = {
2709         {bnxt_xmit_pkts,        "Scalar"},
2710 #if defined(RTE_ARCH_X86)
2711         {bnxt_xmit_pkts_vec,    "Vector SSE"},
2712 #elif defined(RTE_ARCH_ARM64)
2713         {bnxt_xmit_pkts_vec,    "Vector Neon"},
2714 #endif
2715 };
2716
2717 static int
2718 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2719                        struct rte_eth_burst_mode *mode)
2720 {
2721         eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
2722         size_t i;
2723
2724         for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) {
2725                 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) {
2726                         snprintf(mode->info, sizeof(mode->info), "%s",
2727                                  bnxt_tx_burst_info[i].info);
2728                         return 0;
2729                 }
2730         }
2731
2732         return -EINVAL;
2733 }
2734
2735 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2736 {
2737         struct bnxt *bp = eth_dev->data->dev_private;
2738         uint32_t new_pkt_size;
2739         uint32_t rc = 0;
2740         uint32_t i;
2741
2742         rc = is_bnxt_in_error(bp);
2743         if (rc)
2744                 return rc;
2745
2746         /* Exit if receive queues are not configured yet */
2747         if (!eth_dev->data->nb_rx_queues)
2748                 return rc;
2749
2750         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2751                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2752
2753         /*
2754          * Disallow any MTU change that would require scattered receive support
2755          * if it is not already enabled.
2756          */
2757         if (eth_dev->data->dev_started &&
2758             !eth_dev->data->scattered_rx &&
2759             (new_pkt_size >
2760              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2761                 PMD_DRV_LOG(ERR,
2762                             "MTU change would require scattered rx support. ");
2763                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2764                 return -EINVAL;
2765         }
2766
2767         if (new_mtu > RTE_ETHER_MTU) {
2768                 bp->flags |= BNXT_FLAG_JUMBO;
2769                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2770                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2771         } else {
2772                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2773                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2774                 bp->flags &= ~BNXT_FLAG_JUMBO;
2775         }
2776
2777         /* Is there a change in mtu setting? */
2778         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2779                 return rc;
2780
2781         for (i = 0; i < bp->nr_vnics; i++) {
2782                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2783                 uint16_t size = 0;
2784
2785                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2786                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2787                 if (rc)
2788                         break;
2789
2790                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2791                 size -= RTE_PKTMBUF_HEADROOM;
2792
2793                 if (size < new_mtu) {
2794                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2795                         if (rc)
2796                                 return rc;
2797                 }
2798         }
2799
2800         if (!rc)
2801                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2802
2803         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2804
2805         return rc;
2806 }
2807
2808 static int
2809 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2810 {
2811         struct bnxt *bp = dev->data->dev_private;
2812         uint16_t vlan = bp->vlan;
2813         int rc;
2814
2815         rc = is_bnxt_in_error(bp);
2816         if (rc)
2817                 return rc;
2818
2819         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2820                 PMD_DRV_LOG(ERR,
2821                         "PVID cannot be modified for this function\n");
2822                 return -ENOTSUP;
2823         }
2824         bp->vlan = on ? pvid : 0;
2825
2826         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2827         if (rc)
2828                 bp->vlan = vlan;
2829         return rc;
2830 }
2831
2832 static int
2833 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2834 {
2835         struct bnxt *bp = dev->data->dev_private;
2836         int rc;
2837
2838         rc = is_bnxt_in_error(bp);
2839         if (rc)
2840                 return rc;
2841
2842         return bnxt_hwrm_port_led_cfg(bp, true);
2843 }
2844
2845 static int
2846 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2847 {
2848         struct bnxt *bp = dev->data->dev_private;
2849         int rc;
2850
2851         rc = is_bnxt_in_error(bp);
2852         if (rc)
2853                 return rc;
2854
2855         return bnxt_hwrm_port_led_cfg(bp, false);
2856 }
2857
2858 static uint32_t
2859 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2860 {
2861         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2862         uint32_t desc = 0, raw_cons = 0, cons;
2863         struct bnxt_cp_ring_info *cpr;
2864         struct bnxt_rx_queue *rxq;
2865         struct rx_pkt_cmpl *rxcmp;
2866         int rc;
2867
2868         rc = is_bnxt_in_error(bp);
2869         if (rc)
2870                 return rc;
2871
2872         rxq = dev->data->rx_queues[rx_queue_id];
2873         cpr = rxq->cp_ring;
2874         raw_cons = cpr->cp_raw_cons;
2875
2876         while (1) {
2877                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2878                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2879                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2880
2881                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2882                         break;
2883                 } else {
2884                         raw_cons++;
2885                         desc++;
2886                 }
2887         }
2888
2889         return desc;
2890 }
2891
2892 static int
2893 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2894 {
2895         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2896         struct bnxt_rx_ring_info *rxr;
2897         struct bnxt_cp_ring_info *cpr;
2898         struct rte_mbuf *rx_buf;
2899         struct rx_pkt_cmpl *rxcmp;
2900         uint32_t cons, cp_cons;
2901         int rc;
2902
2903         if (!rxq)
2904                 return -EINVAL;
2905
2906         rc = is_bnxt_in_error(rxq->bp);
2907         if (rc)
2908                 return rc;
2909
2910         cpr = rxq->cp_ring;
2911         rxr = rxq->rx_ring;
2912
2913         if (offset >= rxq->nb_rx_desc)
2914                 return -EINVAL;
2915
2916         cons = RING_CMP(cpr->cp_ring_struct, offset);
2917         cp_cons = cpr->cp_raw_cons;
2918         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2919
2920         if (cons > cp_cons) {
2921                 if (CMPL_VALID(rxcmp, cpr->valid))
2922                         return RTE_ETH_RX_DESC_DONE;
2923         } else {
2924                 if (CMPL_VALID(rxcmp, !cpr->valid))
2925                         return RTE_ETH_RX_DESC_DONE;
2926         }
2927         rx_buf = rxr->rx_buf_ring[cons];
2928         if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf)
2929                 return RTE_ETH_RX_DESC_UNAVAIL;
2930
2931
2932         return RTE_ETH_RX_DESC_AVAIL;
2933 }
2934
2935 static int
2936 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2937 {
2938         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2939         struct bnxt_tx_ring_info *txr;
2940         struct bnxt_cp_ring_info *cpr;
2941         struct bnxt_sw_tx_bd *tx_buf;
2942         struct tx_pkt_cmpl *txcmp;
2943         uint32_t cons, cp_cons;
2944         int rc;
2945
2946         if (!txq)
2947                 return -EINVAL;
2948
2949         rc = is_bnxt_in_error(txq->bp);
2950         if (rc)
2951                 return rc;
2952
2953         cpr = txq->cp_ring;
2954         txr = txq->tx_ring;
2955
2956         if (offset >= txq->nb_tx_desc)
2957                 return -EINVAL;
2958
2959         cons = RING_CMP(cpr->cp_ring_struct, offset);
2960         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2961         cp_cons = cpr->cp_raw_cons;
2962
2963         if (cons > cp_cons) {
2964                 if (CMPL_VALID(txcmp, cpr->valid))
2965                         return RTE_ETH_TX_DESC_UNAVAIL;
2966         } else {
2967                 if (CMPL_VALID(txcmp, !cpr->valid))
2968                         return RTE_ETH_TX_DESC_UNAVAIL;
2969         }
2970         tx_buf = &txr->tx_buf_ring[cons];
2971         if (tx_buf->mbuf == NULL)
2972                 return RTE_ETH_TX_DESC_DONE;
2973
2974         return RTE_ETH_TX_DESC_FULL;
2975 }
2976
2977 static struct bnxt_filter_info *
2978 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2979                                 struct rte_eth_ethertype_filter *efilter,
2980                                 struct bnxt_vnic_info *vnic0,
2981                                 struct bnxt_vnic_info *vnic,
2982                                 int *ret)
2983 {
2984         struct bnxt_filter_info *mfilter = NULL;
2985         int match = 0;
2986         *ret = 0;
2987
2988         if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2989                 efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2990                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2991                         " ethertype filter.", efilter->ether_type);
2992                 *ret = -EINVAL;
2993                 goto exit;
2994         }
2995         if (efilter->queue >= bp->rx_nr_rings) {
2996                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2997                 *ret = -EINVAL;
2998                 goto exit;
2999         }
3000
3001         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3002         vnic = &bp->vnic_info[efilter->queue];
3003         if (vnic == NULL) {
3004                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
3005                 *ret = -EINVAL;
3006                 goto exit;
3007         }
3008
3009         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3010                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
3011                         if ((!memcmp(efilter->mac_addr.addr_bytes,
3012                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
3013                              mfilter->flags ==
3014                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
3015                              mfilter->ethertype == efilter->ether_type)) {
3016                                 match = 1;
3017                                 break;
3018                         }
3019                 }
3020         } else {
3021                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
3022                         if ((!memcmp(efilter->mac_addr.addr_bytes,
3023                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
3024                              mfilter->ethertype == efilter->ether_type &&
3025                              mfilter->flags ==
3026                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
3027                                 match = 1;
3028                                 break;
3029                         }
3030         }
3031
3032         if (match)
3033                 *ret = -EEXIST;
3034
3035 exit:
3036         return mfilter;
3037 }
3038
3039 static int
3040 bnxt_ethertype_filter(struct rte_eth_dev *dev,
3041                         enum rte_filter_op filter_op,
3042                         void *arg)
3043 {
3044         struct bnxt *bp = dev->data->dev_private;
3045         struct rte_eth_ethertype_filter *efilter =
3046                         (struct rte_eth_ethertype_filter *)arg;
3047         struct bnxt_filter_info *bfilter, *filter1;
3048         struct bnxt_vnic_info *vnic, *vnic0;
3049         int ret;
3050
3051         if (filter_op == RTE_ETH_FILTER_NOP)
3052                 return 0;
3053
3054         if (arg == NULL) {
3055                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3056                             filter_op);
3057                 return -EINVAL;
3058         }
3059
3060         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3061         vnic = &bp->vnic_info[efilter->queue];
3062
3063         switch (filter_op) {
3064         case RTE_ETH_FILTER_ADD:
3065                 bnxt_match_and_validate_ether_filter(bp, efilter,
3066                                                         vnic0, vnic, &ret);
3067                 if (ret < 0)
3068                         return ret;
3069
3070                 bfilter = bnxt_get_unused_filter(bp);
3071                 if (bfilter == NULL) {
3072                         PMD_DRV_LOG(ERR,
3073                                 "Not enough resources for a new filter.\n");
3074                         return -ENOMEM;
3075                 }
3076                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3077                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
3078                        RTE_ETHER_ADDR_LEN);
3079                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
3080                        RTE_ETHER_ADDR_LEN);
3081                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3082                 bfilter->ethertype = efilter->ether_type;
3083                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3084
3085                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
3086                 if (filter1 == NULL) {
3087                         ret = -EINVAL;
3088                         goto cleanup;
3089                 }
3090                 bfilter->enables |=
3091                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3092                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3093
3094                 bfilter->dst_id = vnic->fw_vnic_id;
3095
3096                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3097                         bfilter->flags =
3098                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3099                 }
3100
3101                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3102                 if (ret)
3103                         goto cleanup;
3104                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3105                 break;
3106         case RTE_ETH_FILTER_DELETE:
3107                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
3108                                                         vnic0, vnic, &ret);
3109                 if (ret == -EEXIST) {
3110                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
3111
3112                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
3113                                       next);
3114                         bnxt_free_filter(bp, filter1);
3115                 } else if (ret == 0) {
3116                         PMD_DRV_LOG(ERR, "No matching filter found\n");
3117                 }
3118                 break;
3119         default:
3120                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3121                 ret = -EINVAL;
3122                 goto error;
3123         }
3124         return ret;
3125 cleanup:
3126         bnxt_free_filter(bp, bfilter);
3127 error:
3128         return ret;
3129 }
3130
3131 static inline int
3132 parse_ntuple_filter(struct bnxt *bp,
3133                     struct rte_eth_ntuple_filter *nfilter,
3134                     struct bnxt_filter_info *bfilter)
3135 {
3136         uint32_t en = 0;
3137
3138         if (nfilter->queue >= bp->rx_nr_rings) {
3139                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
3140                 return -EINVAL;
3141         }
3142
3143         switch (nfilter->dst_port_mask) {
3144         case UINT16_MAX:
3145                 bfilter->dst_port_mask = -1;
3146                 bfilter->dst_port = nfilter->dst_port;
3147                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
3148                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3149                 break;
3150         default:
3151                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3152                 return -EINVAL;
3153         }
3154
3155         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3156         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3157
3158         switch (nfilter->proto_mask) {
3159         case UINT8_MAX:
3160                 if (nfilter->proto == 17) /* IPPROTO_UDP */
3161                         bfilter->ip_protocol = 17;
3162                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
3163                         bfilter->ip_protocol = 6;
3164                 else
3165                         return -EINVAL;
3166                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3167                 break;
3168         default:
3169                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3170                 return -EINVAL;
3171         }
3172
3173         switch (nfilter->dst_ip_mask) {
3174         case UINT32_MAX:
3175                 bfilter->dst_ipaddr_mask[0] = -1;
3176                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
3177                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
3178                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3179                 break;
3180         default:
3181                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3182                 return -EINVAL;
3183         }
3184
3185         switch (nfilter->src_ip_mask) {
3186         case UINT32_MAX:
3187                 bfilter->src_ipaddr_mask[0] = -1;
3188                 bfilter->src_ipaddr[0] = nfilter->src_ip;
3189                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
3190                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3191                 break;
3192         default:
3193                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3194                 return -EINVAL;
3195         }
3196
3197         switch (nfilter->src_port_mask) {
3198         case UINT16_MAX:
3199                 bfilter->src_port_mask = -1;
3200                 bfilter->src_port = nfilter->src_port;
3201                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
3202                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3203                 break;
3204         default:
3205                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3206                 return -EINVAL;
3207         }
3208
3209         bfilter->enables = en;
3210         return 0;
3211 }
3212
3213 static struct bnxt_filter_info*
3214 bnxt_match_ntuple_filter(struct bnxt *bp,
3215                          struct bnxt_filter_info *bfilter,
3216                          struct bnxt_vnic_info **mvnic)
3217 {
3218         struct bnxt_filter_info *mfilter = NULL;
3219         int i;
3220
3221         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3222                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3223                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
3224                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
3225                             bfilter->src_ipaddr_mask[0] ==
3226                             mfilter->src_ipaddr_mask[0] &&
3227                             bfilter->src_port == mfilter->src_port &&
3228                             bfilter->src_port_mask == mfilter->src_port_mask &&
3229                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
3230                             bfilter->dst_ipaddr_mask[0] ==
3231                             mfilter->dst_ipaddr_mask[0] &&
3232                             bfilter->dst_port == mfilter->dst_port &&
3233                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
3234                             bfilter->flags == mfilter->flags &&
3235                             bfilter->enables == mfilter->enables) {
3236                                 if (mvnic)
3237                                         *mvnic = vnic;
3238                                 return mfilter;
3239                         }
3240                 }
3241         }
3242         return NULL;
3243 }
3244
3245 static int
3246 bnxt_cfg_ntuple_filter(struct bnxt *bp,
3247                        struct rte_eth_ntuple_filter *nfilter,
3248                        enum rte_filter_op filter_op)
3249 {
3250         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
3251         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
3252         int ret;
3253
3254         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
3255                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3256                 return -EINVAL;
3257         }
3258
3259         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
3260                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
3261                 return -EINVAL;
3262         }
3263
3264         bfilter = bnxt_get_unused_filter(bp);
3265         if (bfilter == NULL) {
3266                 PMD_DRV_LOG(ERR,
3267                         "Not enough resources for a new filter.\n");
3268                 return -ENOMEM;
3269         }
3270         ret = parse_ntuple_filter(bp, nfilter, bfilter);
3271         if (ret < 0)
3272                 goto free_filter;
3273
3274         vnic = &bp->vnic_info[nfilter->queue];
3275         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3276         filter1 = STAILQ_FIRST(&vnic0->filter);
3277         if (filter1 == NULL) {
3278                 ret = -EINVAL;
3279                 goto free_filter;
3280         }
3281
3282         bfilter->dst_id = vnic->fw_vnic_id;
3283         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3284         bfilter->enables |=
3285                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3286         bfilter->ethertype = 0x800;
3287         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3288
3289         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3290
3291         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3292             bfilter->dst_id == mfilter->dst_id) {
3293                 PMD_DRV_LOG(ERR, "filter exists.\n");
3294                 ret = -EEXIST;
3295                 goto free_filter;
3296         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3297                    bfilter->dst_id != mfilter->dst_id) {
3298                 mfilter->dst_id = vnic->fw_vnic_id;
3299                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3300                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3301                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3302                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3303                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3304                 goto free_filter;
3305         }
3306         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3307                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3308                 ret = -ENOENT;
3309                 goto free_filter;
3310         }
3311
3312         if (filter_op == RTE_ETH_FILTER_ADD) {
3313                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3314                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3315                 if (ret)
3316                         goto free_filter;
3317                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3318         } else {
3319                 if (mfilter == NULL) {
3320                         /* This should not happen. But for Coverity! */
3321                         ret = -ENOENT;
3322                         goto free_filter;
3323                 }
3324                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3325
3326                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3327                 bnxt_free_filter(bp, mfilter);
3328                 bnxt_free_filter(bp, bfilter);
3329         }
3330
3331         return 0;
3332 free_filter:
3333         bnxt_free_filter(bp, bfilter);
3334         return ret;
3335 }
3336
3337 static int
3338 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3339                         enum rte_filter_op filter_op,
3340                         void *arg)
3341 {
3342         struct bnxt *bp = dev->data->dev_private;
3343         int ret;
3344
3345         if (filter_op == RTE_ETH_FILTER_NOP)
3346                 return 0;
3347
3348         if (arg == NULL) {
3349                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3350                             filter_op);
3351                 return -EINVAL;
3352         }
3353
3354         switch (filter_op) {
3355         case RTE_ETH_FILTER_ADD:
3356                 ret = bnxt_cfg_ntuple_filter(bp,
3357                         (struct rte_eth_ntuple_filter *)arg,
3358                         filter_op);
3359                 break;
3360         case RTE_ETH_FILTER_DELETE:
3361                 ret = bnxt_cfg_ntuple_filter(bp,
3362                         (struct rte_eth_ntuple_filter *)arg,
3363                         filter_op);
3364                 break;
3365         default:
3366                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3367                 ret = -EINVAL;
3368                 break;
3369         }
3370         return ret;
3371 }
3372
3373 static int
3374 bnxt_parse_fdir_filter(struct bnxt *bp,
3375                        struct rte_eth_fdir_filter *fdir,
3376                        struct bnxt_filter_info *filter)
3377 {
3378         enum rte_fdir_mode fdir_mode =
3379                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
3380         struct bnxt_vnic_info *vnic0, *vnic;
3381         struct bnxt_filter_info *filter1;
3382         uint32_t en = 0;
3383         int i;
3384
3385         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3386                 return -EINVAL;
3387
3388         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3389         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3390
3391         switch (fdir->input.flow_type) {
3392         case RTE_ETH_FLOW_IPV4:
3393         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3394                 /* FALLTHROUGH */
3395                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3396                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3397                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3398                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3399                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3400                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3401                 filter->ip_addr_type =
3402                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3403                 filter->src_ipaddr_mask[0] = 0xffffffff;
3404                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3405                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3406                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3407                 filter->ethertype = 0x800;
3408                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3409                 break;
3410         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3411                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3412                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3413                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3414                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3415                 filter->dst_port_mask = 0xffff;
3416                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3417                 filter->src_port_mask = 0xffff;
3418                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3419                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3420                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3421                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3422                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3423                 filter->ip_protocol = 6;
3424                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3425                 filter->ip_addr_type =
3426                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3427                 filter->src_ipaddr_mask[0] = 0xffffffff;
3428                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3429                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3430                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3431                 filter->ethertype = 0x800;
3432                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3433                 break;
3434         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3435                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
3436                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3437                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3438                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3439                 filter->dst_port_mask = 0xffff;
3440                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3441                 filter->src_port_mask = 0xffff;
3442                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3443                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3444                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3445                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3446                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3447                 filter->ip_protocol = 17;
3448                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3449                 filter->ip_addr_type =
3450                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3451                 filter->src_ipaddr_mask[0] = 0xffffffff;
3452                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3453                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3454                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3455                 filter->ethertype = 0x800;
3456                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3457                 break;
3458         case RTE_ETH_FLOW_IPV6:
3459         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3460                 /* FALLTHROUGH */
3461                 filter->ip_addr_type =
3462                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3463                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3464                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3465                 rte_memcpy(filter->src_ipaddr,
3466                            fdir->input.flow.ipv6_flow.src_ip, 16);
3467                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3468                 rte_memcpy(filter->dst_ipaddr,
3469                            fdir->input.flow.ipv6_flow.dst_ip, 16);
3470                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3471                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3472                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3473                 memset(filter->src_ipaddr_mask, 0xff, 16);
3474                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3475                 filter->ethertype = 0x86dd;
3476                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3477                 break;
3478         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3479                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3480                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3481                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3482                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3483                 filter->dst_port_mask = 0xffff;
3484                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3485                 filter->src_port_mask = 0xffff;
3486                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3487                 filter->ip_addr_type =
3488                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3489                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3490                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3491                 rte_memcpy(filter->src_ipaddr,
3492                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3493                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3494                 rte_memcpy(filter->dst_ipaddr,
3495                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3496                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3497                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3498                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3499                 memset(filter->src_ipaddr_mask, 0xff, 16);
3500                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3501                 filter->ethertype = 0x86dd;
3502                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3503                 break;
3504         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3505                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
3506                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3507                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3508                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3509                 filter->dst_port_mask = 0xffff;
3510                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3511                 filter->src_port_mask = 0xffff;
3512                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3513                 filter->ip_addr_type =
3514                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3515                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3516                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3517                 rte_memcpy(filter->src_ipaddr,
3518                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
3519                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3520                 rte_memcpy(filter->dst_ipaddr,
3521                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3522                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3523                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3524                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3525                 memset(filter->src_ipaddr_mask, 0xff, 16);
3526                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3527                 filter->ethertype = 0x86dd;
3528                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3529                 break;
3530         case RTE_ETH_FLOW_L2_PAYLOAD:
3531                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3532                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3533                 break;
3534         case RTE_ETH_FLOW_VXLAN:
3535                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3536                         return -EINVAL;
3537                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3538                 filter->tunnel_type =
3539                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3540                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3541                 break;
3542         case RTE_ETH_FLOW_NVGRE:
3543                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3544                         return -EINVAL;
3545                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3546                 filter->tunnel_type =
3547                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3548                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3549                 break;
3550         case RTE_ETH_FLOW_UNKNOWN:
3551         case RTE_ETH_FLOW_RAW:
3552         case RTE_ETH_FLOW_FRAG_IPV4:
3553         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3554         case RTE_ETH_FLOW_FRAG_IPV6:
3555         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3556         case RTE_ETH_FLOW_IPV6_EX:
3557         case RTE_ETH_FLOW_IPV6_TCP_EX:
3558         case RTE_ETH_FLOW_IPV6_UDP_EX:
3559         case RTE_ETH_FLOW_GENEVE:
3560                 /* FALLTHROUGH */
3561         default:
3562                 return -EINVAL;
3563         }
3564
3565         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3566         vnic = &bp->vnic_info[fdir->action.rx_queue];
3567         if (vnic == NULL) {
3568                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3569                 return -EINVAL;
3570         }
3571
3572         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3573                 rte_memcpy(filter->dst_macaddr,
3574                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3575                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3576         }
3577
3578         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3579                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3580                 filter1 = STAILQ_FIRST(&vnic0->filter);
3581                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3582         } else {
3583                 filter->dst_id = vnic->fw_vnic_id;
3584                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3585                         if (filter->dst_macaddr[i] == 0x00)
3586                                 filter1 = STAILQ_FIRST(&vnic0->filter);
3587                         else
3588                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3589         }
3590
3591         if (filter1 == NULL)
3592                 return -EINVAL;
3593
3594         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3595         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3596
3597         filter->enables = en;
3598
3599         return 0;
3600 }
3601
3602 static struct bnxt_filter_info *
3603 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3604                 struct bnxt_vnic_info **mvnic)
3605 {
3606         struct bnxt_filter_info *mf = NULL;
3607         int i;
3608
3609         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3610                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3611
3612                 STAILQ_FOREACH(mf, &vnic->filter, next) {
3613                         if (mf->filter_type == nf->filter_type &&
3614                             mf->flags == nf->flags &&
3615                             mf->src_port == nf->src_port &&
3616                             mf->src_port_mask == nf->src_port_mask &&
3617                             mf->dst_port == nf->dst_port &&
3618                             mf->dst_port_mask == nf->dst_port_mask &&
3619                             mf->ip_protocol == nf->ip_protocol &&
3620                             mf->ip_addr_type == nf->ip_addr_type &&
3621                             mf->ethertype == nf->ethertype &&
3622                             mf->vni == nf->vni &&
3623                             mf->tunnel_type == nf->tunnel_type &&
3624                             mf->l2_ovlan == nf->l2_ovlan &&
3625                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3626                             mf->l2_ivlan == nf->l2_ivlan &&
3627                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3628                             !memcmp(mf->l2_addr, nf->l2_addr,
3629                                     RTE_ETHER_ADDR_LEN) &&
3630                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3631                                     RTE_ETHER_ADDR_LEN) &&
3632                             !memcmp(mf->src_macaddr, nf->src_macaddr,
3633                                     RTE_ETHER_ADDR_LEN) &&
3634                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3635                                     RTE_ETHER_ADDR_LEN) &&
3636                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3637                                     sizeof(nf->src_ipaddr)) &&
3638                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3639                                     sizeof(nf->src_ipaddr_mask)) &&
3640                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3641                                     sizeof(nf->dst_ipaddr)) &&
3642                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3643                                     sizeof(nf->dst_ipaddr_mask))) {
3644                                 if (mvnic)
3645                                         *mvnic = vnic;
3646                                 return mf;
3647                         }
3648                 }
3649         }
3650         return NULL;
3651 }
3652
3653 static int
3654 bnxt_fdir_filter(struct rte_eth_dev *dev,
3655                  enum rte_filter_op filter_op,
3656                  void *arg)
3657 {
3658         struct bnxt *bp = dev->data->dev_private;
3659         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3660         struct bnxt_filter_info *filter, *match;
3661         struct bnxt_vnic_info *vnic, *mvnic;
3662         int ret = 0, i;
3663
3664         if (filter_op == RTE_ETH_FILTER_NOP)
3665                 return 0;
3666
3667         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3668                 return -EINVAL;
3669
3670         switch (filter_op) {
3671         case RTE_ETH_FILTER_ADD:
3672         case RTE_ETH_FILTER_DELETE:
3673                 /* FALLTHROUGH */
3674                 filter = bnxt_get_unused_filter(bp);
3675                 if (filter == NULL) {
3676                         PMD_DRV_LOG(ERR,
3677                                 "Not enough resources for a new flow.\n");
3678                         return -ENOMEM;
3679                 }
3680
3681                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3682                 if (ret != 0)
3683                         goto free_filter;
3684                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3685
3686                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3687                         vnic = &bp->vnic_info[0];
3688                 else
3689                         vnic = &bp->vnic_info[fdir->action.rx_queue];
3690
3691                 match = bnxt_match_fdir(bp, filter, &mvnic);
3692                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3693                         if (match->dst_id == vnic->fw_vnic_id) {
3694                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
3695                                 ret = -EEXIST;
3696                                 goto free_filter;
3697                         } else {
3698                                 match->dst_id = vnic->fw_vnic_id;
3699                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
3700                                                                   match->dst_id,
3701                                                                   match);
3702                                 STAILQ_REMOVE(&mvnic->filter, match,
3703                                               bnxt_filter_info, next);
3704                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3705                                 PMD_DRV_LOG(ERR,
3706                                         "Filter with matching pattern exist\n");
3707                                 PMD_DRV_LOG(ERR,
3708                                         "Updated it to new destination q\n");
3709                                 goto free_filter;
3710                         }
3711                 }
3712                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3713                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3714                         ret = -ENOENT;
3715                         goto free_filter;
3716                 }
3717
3718                 if (filter_op == RTE_ETH_FILTER_ADD) {
3719                         ret = bnxt_hwrm_set_ntuple_filter(bp,
3720                                                           filter->dst_id,
3721                                                           filter);
3722                         if (ret)
3723                                 goto free_filter;
3724                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3725                 } else {
3726                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3727                         STAILQ_REMOVE(&vnic->filter, match,
3728                                       bnxt_filter_info, next);
3729                         bnxt_free_filter(bp, match);
3730                         bnxt_free_filter(bp, filter);
3731                 }
3732                 break;
3733         case RTE_ETH_FILTER_FLUSH:
3734                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
3735                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3736
3737                         STAILQ_FOREACH(filter, &vnic->filter, next) {
3738                                 if (filter->filter_type ==
3739                                     HWRM_CFA_NTUPLE_FILTER) {
3740                                         ret =
3741                                         bnxt_hwrm_clear_ntuple_filter(bp,
3742                                                                       filter);
3743                                         STAILQ_REMOVE(&vnic->filter, filter,
3744                                                       bnxt_filter_info, next);
3745                                 }
3746                         }
3747                 }
3748                 return ret;
3749         case RTE_ETH_FILTER_UPDATE:
3750         case RTE_ETH_FILTER_STATS:
3751         case RTE_ETH_FILTER_INFO:
3752                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3753                 break;
3754         default:
3755                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3756                 ret = -EINVAL;
3757                 break;
3758         }
3759         return ret;
3760
3761 free_filter:
3762         bnxt_free_filter(bp, filter);
3763         return ret;
3764 }
3765
3766 int
3767 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3768                     enum rte_filter_type filter_type,
3769                     enum rte_filter_op filter_op, void *arg)
3770 {
3771         struct bnxt *bp = dev->data->dev_private;
3772         int ret = 0;
3773
3774         if (!bp)
3775                 return -EIO;
3776
3777         if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
3778                 struct bnxt_representor *vfr = dev->data->dev_private;
3779                 bp = vfr->parent_dev->data->dev_private;
3780                 /* parent is deleted while children are still valid */
3781                 if (!bp) {
3782                         PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
3783                                     dev->data->port_id,
3784                                     filter_type,
3785                                     filter_op);
3786                         return -EIO;
3787                 }
3788         }
3789
3790         ret = is_bnxt_in_error(bp);
3791         if (ret)
3792                 return ret;
3793
3794         switch (filter_type) {
3795         case RTE_ETH_FILTER_TUNNEL:
3796                 PMD_DRV_LOG(ERR,
3797                         "filter type: %d: To be implemented\n", filter_type);
3798                 break;
3799         case RTE_ETH_FILTER_FDIR:
3800                 ret = bnxt_fdir_filter(dev, filter_op, arg);
3801                 break;
3802         case RTE_ETH_FILTER_NTUPLE:
3803                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
3804                 break;
3805         case RTE_ETH_FILTER_ETHERTYPE:
3806                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
3807                 break;
3808         case RTE_ETH_FILTER_GENERIC:
3809                 if (filter_op != RTE_ETH_FILTER_GET)
3810                         return -EINVAL;
3811                 if (BNXT_TRUFLOW_EN(bp))
3812                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3813                 else
3814                         *(const void **)arg = &bnxt_flow_ops;
3815                 break;
3816         default:
3817                 PMD_DRV_LOG(ERR,
3818                         "Filter type (%d) not supported", filter_type);
3819                 ret = -EINVAL;
3820                 break;
3821         }
3822         return ret;
3823 }
3824
3825 static const uint32_t *
3826 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3827 {
3828         static const uint32_t ptypes[] = {
3829                 RTE_PTYPE_L2_ETHER_VLAN,
3830                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3831                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3832                 RTE_PTYPE_L4_ICMP,
3833                 RTE_PTYPE_L4_TCP,
3834                 RTE_PTYPE_L4_UDP,
3835                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3836                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3837                 RTE_PTYPE_INNER_L4_ICMP,
3838                 RTE_PTYPE_INNER_L4_TCP,
3839                 RTE_PTYPE_INNER_L4_UDP,
3840                 RTE_PTYPE_UNKNOWN
3841         };
3842
3843         if (!dev->rx_pkt_burst)
3844                 return NULL;
3845
3846         return ptypes;
3847 }
3848
3849 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3850                          int reg_win)
3851 {
3852         uint32_t reg_base = *reg_arr & 0xfffff000;
3853         uint32_t win_off;
3854         int i;
3855
3856         for (i = 0; i < count; i++) {
3857                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3858                         return -ERANGE;
3859         }
3860         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3861         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3862         return 0;
3863 }
3864
3865 static int bnxt_map_ptp_regs(struct bnxt *bp)
3866 {
3867         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3868         uint32_t *reg_arr;
3869         int rc, i;
3870
3871         reg_arr = ptp->rx_regs;
3872         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3873         if (rc)
3874                 return rc;
3875
3876         reg_arr = ptp->tx_regs;
3877         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3878         if (rc)
3879                 return rc;
3880
3881         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3882                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3883
3884         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3885                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3886
3887         return 0;
3888 }
3889
3890 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3891 {
3892         rte_write32(0, (uint8_t *)bp->bar0 +
3893                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3894         rte_write32(0, (uint8_t *)bp->bar0 +
3895                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3896 }
3897
3898 static uint64_t bnxt_cc_read(struct bnxt *bp)
3899 {
3900         uint64_t ns;
3901
3902         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3903                               BNXT_GRCPF_REG_SYNC_TIME));
3904         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3905                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3906         return ns;
3907 }
3908
3909 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3910 {
3911         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3912         uint32_t fifo;
3913
3914         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3915                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3916         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3917                 return -EAGAIN;
3918
3919         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3920                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3921         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3922                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3923         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3924                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3925
3926         return 0;
3927 }
3928
3929 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3930 {
3931         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3932         struct bnxt_pf_info *pf = bp->pf;
3933         uint16_t port_id;
3934         uint32_t fifo;
3935
3936         if (!ptp)
3937                 return -ENODEV;
3938
3939         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3940                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3941         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3942                 return -EAGAIN;
3943
3944         port_id = pf->port_id;
3945         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3946                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3947
3948         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3949                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3950         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3951 /*              bnxt_clr_rx_ts(bp);       TBD  */
3952                 return -EBUSY;
3953         }
3954
3955         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3956                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3957         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3958                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3959
3960         return 0;
3961 }
3962
3963 static int
3964 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3965 {
3966         uint64_t ns;
3967         struct bnxt *bp = dev->data->dev_private;
3968         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3969
3970         if (!ptp)
3971                 return 0;
3972
3973         ns = rte_timespec_to_ns(ts);
3974         /* Set the timecounters to a new value. */
3975         ptp->tc.nsec = ns;
3976
3977         return 0;
3978 }
3979
3980 static int
3981 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3982 {
3983         struct bnxt *bp = dev->data->dev_private;
3984         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3985         uint64_t ns, systime_cycles = 0;
3986         int rc = 0;
3987
3988         if (!ptp)
3989                 return 0;
3990
3991         if (BNXT_CHIP_THOR(bp))
3992                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3993                                              &systime_cycles);
3994         else
3995                 systime_cycles = bnxt_cc_read(bp);
3996
3997         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3998         *ts = rte_ns_to_timespec(ns);
3999
4000         return rc;
4001 }
4002 static int
4003 bnxt_timesync_enable(struct rte_eth_dev *dev)
4004 {
4005         struct bnxt *bp = dev->data->dev_private;
4006         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4007         uint32_t shift = 0;
4008         int rc;
4009
4010         if (!ptp)
4011                 return 0;
4012
4013         ptp->rx_filter = 1;
4014         ptp->tx_tstamp_en = 1;
4015         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
4016
4017         rc = bnxt_hwrm_ptp_cfg(bp);
4018         if (rc)
4019                 return rc;
4020
4021         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
4022         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4023         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4024
4025         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
4026         ptp->tc.cc_shift = shift;
4027         ptp->tc.nsec_mask = (1ULL << shift) - 1;
4028
4029         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
4030         ptp->rx_tstamp_tc.cc_shift = shift;
4031         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4032
4033         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
4034         ptp->tx_tstamp_tc.cc_shift = shift;
4035         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4036
4037         if (!BNXT_CHIP_THOR(bp))
4038                 bnxt_map_ptp_regs(bp);
4039
4040         return 0;
4041 }
4042
4043 static int
4044 bnxt_timesync_disable(struct rte_eth_dev *dev)
4045 {
4046         struct bnxt *bp = dev->data->dev_private;
4047         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4048
4049         if (!ptp)
4050                 return 0;
4051
4052         ptp->rx_filter = 0;
4053         ptp->tx_tstamp_en = 0;
4054         ptp->rxctl = 0;
4055
4056         bnxt_hwrm_ptp_cfg(bp);
4057
4058         if (!BNXT_CHIP_THOR(bp))
4059                 bnxt_unmap_ptp_regs(bp);
4060
4061         return 0;
4062 }
4063
4064 static int
4065 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4066                                  struct timespec *timestamp,
4067                                  uint32_t flags __rte_unused)
4068 {
4069         struct bnxt *bp = dev->data->dev_private;
4070         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4071         uint64_t rx_tstamp_cycles = 0;
4072         uint64_t ns;
4073
4074         if (!ptp)
4075                 return 0;
4076
4077         if (BNXT_CHIP_THOR(bp))
4078                 rx_tstamp_cycles = ptp->rx_timestamp;
4079         else
4080                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
4081
4082         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
4083         *timestamp = rte_ns_to_timespec(ns);
4084         return  0;
4085 }
4086
4087 static int
4088 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4089                                  struct timespec *timestamp)
4090 {
4091         struct bnxt *bp = dev->data->dev_private;
4092         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4093         uint64_t tx_tstamp_cycles = 0;
4094         uint64_t ns;
4095         int rc = 0;
4096
4097         if (!ptp)
4098                 return 0;
4099
4100         if (BNXT_CHIP_THOR(bp))
4101                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
4102                                              &tx_tstamp_cycles);
4103         else
4104                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
4105
4106         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
4107         *timestamp = rte_ns_to_timespec(ns);
4108
4109         return rc;
4110 }
4111
4112 static int
4113 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4114 {
4115         struct bnxt *bp = dev->data->dev_private;
4116         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4117
4118         if (!ptp)
4119                 return 0;
4120
4121         ptp->tc.nsec += delta;
4122
4123         return 0;
4124 }
4125
4126 static int
4127 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
4128 {
4129         struct bnxt *bp = dev->data->dev_private;
4130         int rc;
4131         uint32_t dir_entries;
4132         uint32_t entry_length;
4133
4134         rc = is_bnxt_in_error(bp);
4135         if (rc)
4136                 return rc;
4137
4138         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
4139                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4140                     bp->pdev->addr.devid, bp->pdev->addr.function);
4141
4142         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4143         if (rc != 0)
4144                 return rc;
4145
4146         return dir_entries * entry_length;
4147 }
4148
4149 static int
4150 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
4151                 struct rte_dev_eeprom_info *in_eeprom)
4152 {
4153         struct bnxt *bp = dev->data->dev_private;
4154         uint32_t index;
4155         uint32_t offset;
4156         int rc;
4157
4158         rc = is_bnxt_in_error(bp);
4159         if (rc)
4160                 return rc;
4161
4162         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4163                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4164                     bp->pdev->addr.devid, bp->pdev->addr.function,
4165                     in_eeprom->offset, in_eeprom->length);
4166
4167         if (in_eeprom->offset == 0) /* special offset value to get directory */
4168                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
4169                                                 in_eeprom->data);
4170
4171         index = in_eeprom->offset >> 24;
4172         offset = in_eeprom->offset & 0xffffff;
4173
4174         if (index != 0)
4175                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
4176                                            in_eeprom->length, in_eeprom->data);
4177
4178         return 0;
4179 }
4180
4181 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
4182 {
4183         switch (dir_type) {
4184         case BNX_DIR_TYPE_CHIMP_PATCH:
4185         case BNX_DIR_TYPE_BOOTCODE:
4186         case BNX_DIR_TYPE_BOOTCODE_2:
4187         case BNX_DIR_TYPE_APE_FW:
4188         case BNX_DIR_TYPE_APE_PATCH:
4189         case BNX_DIR_TYPE_KONG_FW:
4190         case BNX_DIR_TYPE_KONG_PATCH:
4191         case BNX_DIR_TYPE_BONO_FW:
4192         case BNX_DIR_TYPE_BONO_PATCH:
4193                 /* FALLTHROUGH */
4194                 return true;
4195         }
4196
4197         return false;
4198 }
4199
4200 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
4201 {
4202         switch (dir_type) {
4203         case BNX_DIR_TYPE_AVS:
4204         case BNX_DIR_TYPE_EXP_ROM_MBA:
4205         case BNX_DIR_TYPE_PCIE:
4206         case BNX_DIR_TYPE_TSCF_UCODE:
4207         case BNX_DIR_TYPE_EXT_PHY:
4208         case BNX_DIR_TYPE_CCM:
4209         case BNX_DIR_TYPE_ISCSI_BOOT:
4210         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
4211         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
4212                 /* FALLTHROUGH */
4213                 return true;
4214         }
4215
4216         return false;
4217 }
4218
4219 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
4220 {
4221         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
4222                 bnxt_dir_type_is_other_exec_format(dir_type);
4223 }
4224
4225 static int
4226 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
4227                 struct rte_dev_eeprom_info *in_eeprom)
4228 {
4229         struct bnxt *bp = dev->data->dev_private;
4230         uint8_t index, dir_op;
4231         uint16_t type, ext, ordinal, attr;
4232         int rc;
4233
4234         rc = is_bnxt_in_error(bp);
4235         if (rc)
4236                 return rc;
4237
4238         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4239                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4240                     bp->pdev->addr.devid, bp->pdev->addr.function,
4241                     in_eeprom->offset, in_eeprom->length);
4242
4243         if (!BNXT_PF(bp)) {
4244                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
4245                 return -EINVAL;
4246         }
4247
4248         type = in_eeprom->magic >> 16;
4249
4250         if (type == 0xffff) { /* special value for directory operations */
4251                 index = in_eeprom->magic & 0xff;
4252                 dir_op = in_eeprom->magic >> 8;
4253                 if (index == 0)
4254                         return -EINVAL;
4255                 switch (dir_op) {
4256                 case 0x0e: /* erase */
4257                         if (in_eeprom->offset != ~in_eeprom->magic)
4258                                 return -EINVAL;
4259                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
4260                 default:
4261                         return -EINVAL;
4262                 }
4263         }
4264
4265         /* Create or re-write an NVM item: */
4266         if (bnxt_dir_type_is_executable(type) == true)
4267                 return -EOPNOTSUPP;
4268         ext = in_eeprom->magic & 0xffff;
4269         ordinal = in_eeprom->offset >> 16;
4270         attr = in_eeprom->offset & 0xffff;
4271
4272         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
4273                                      in_eeprom->data, in_eeprom->length);
4274 }
4275
4276 /*
4277  * Initialization
4278  */
4279
4280 static const struct eth_dev_ops bnxt_dev_ops = {
4281         .dev_infos_get = bnxt_dev_info_get_op,
4282         .dev_close = bnxt_dev_close_op,
4283         .dev_configure = bnxt_dev_configure_op,
4284         .dev_start = bnxt_dev_start_op,
4285         .dev_stop = bnxt_dev_stop_op,
4286         .dev_set_link_up = bnxt_dev_set_link_up_op,
4287         .dev_set_link_down = bnxt_dev_set_link_down_op,
4288         .stats_get = bnxt_stats_get_op,
4289         .stats_reset = bnxt_stats_reset_op,
4290         .rx_queue_setup = bnxt_rx_queue_setup_op,
4291         .rx_queue_release = bnxt_rx_queue_release_op,
4292         .tx_queue_setup = bnxt_tx_queue_setup_op,
4293         .tx_queue_release = bnxt_tx_queue_release_op,
4294         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4295         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4296         .reta_update = bnxt_reta_update_op,
4297         .reta_query = bnxt_reta_query_op,
4298         .rss_hash_update = bnxt_rss_hash_update_op,
4299         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4300         .link_update = bnxt_link_update_op,
4301         .promiscuous_enable = bnxt_promiscuous_enable_op,
4302         .promiscuous_disable = bnxt_promiscuous_disable_op,
4303         .allmulticast_enable = bnxt_allmulticast_enable_op,
4304         .allmulticast_disable = bnxt_allmulticast_disable_op,
4305         .mac_addr_add = bnxt_mac_addr_add_op,
4306         .mac_addr_remove = bnxt_mac_addr_remove_op,
4307         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
4308         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
4309         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4310         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4311         .vlan_filter_set = bnxt_vlan_filter_set_op,
4312         .vlan_offload_set = bnxt_vlan_offload_set_op,
4313         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
4314         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
4315         .mtu_set = bnxt_mtu_set_op,
4316         .mac_addr_set = bnxt_set_default_mac_addr_op,
4317         .xstats_get = bnxt_dev_xstats_get_op,
4318         .xstats_get_names = bnxt_dev_xstats_get_names_op,
4319         .xstats_reset = bnxt_dev_xstats_reset_op,
4320         .fw_version_get = bnxt_fw_version_get,
4321         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4322         .rxq_info_get = bnxt_rxq_info_get_op,
4323         .txq_info_get = bnxt_txq_info_get_op,
4324         .rx_burst_mode_get = bnxt_rx_burst_mode_get,
4325         .tx_burst_mode_get = bnxt_tx_burst_mode_get,
4326         .dev_led_on = bnxt_dev_led_on_op,
4327         .dev_led_off = bnxt_dev_led_off_op,
4328         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4329         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4330         .rx_queue_start = bnxt_rx_queue_start,
4331         .rx_queue_stop = bnxt_rx_queue_stop,
4332         .tx_queue_start = bnxt_tx_queue_start,
4333         .tx_queue_stop = bnxt_tx_queue_stop,
4334         .filter_ctrl = bnxt_filter_ctrl_op,
4335         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4336         .get_eeprom_length    = bnxt_get_eeprom_length_op,
4337         .get_eeprom           = bnxt_get_eeprom_op,
4338         .set_eeprom           = bnxt_set_eeprom_op,
4339         .timesync_enable      = bnxt_timesync_enable,
4340         .timesync_disable     = bnxt_timesync_disable,
4341         .timesync_read_time   = bnxt_timesync_read_time,
4342         .timesync_write_time   = bnxt_timesync_write_time,
4343         .timesync_adjust_time = bnxt_timesync_adjust_time,
4344         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4345         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4346 };
4347
4348 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4349 {
4350         uint32_t offset;
4351
4352         /* Only pre-map the reset GRC registers using window 3 */
4353         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4354                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4355
4356         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4357
4358         return offset;
4359 }
4360
4361 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4362 {
4363         struct bnxt_error_recovery_info *info = bp->recovery_info;
4364         uint32_t reg_base = 0xffffffff;
4365         int i;
4366
4367         /* Only pre-map the monitoring GRC registers using window 2 */
4368         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4369                 uint32_t reg = info->status_regs[i];
4370
4371                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4372                         continue;
4373
4374                 if (reg_base == 0xffffffff)
4375                         reg_base = reg & 0xfffff000;
4376                 if ((reg & 0xfffff000) != reg_base)
4377                         return -ERANGE;
4378
4379                 /* Use mask 0xffc as the Lower 2 bits indicates
4380                  * address space location
4381                  */
4382                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4383                                                 (reg & 0xffc);
4384         }
4385
4386         if (reg_base == 0xffffffff)
4387                 return 0;
4388
4389         rte_write32(reg_base, (uint8_t *)bp->bar0 +
4390                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4391
4392         return 0;
4393 }
4394
4395 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4396 {
4397         struct bnxt_error_recovery_info *info = bp->recovery_info;
4398         uint32_t delay = info->delay_after_reset[index];
4399         uint32_t val = info->reset_reg_val[index];
4400         uint32_t reg = info->reset_reg[index];
4401         uint32_t type, offset;
4402
4403         type = BNXT_FW_STATUS_REG_TYPE(reg);
4404         offset = BNXT_FW_STATUS_REG_OFF(reg);
4405
4406         switch (type) {
4407         case BNXT_FW_STATUS_REG_TYPE_CFG:
4408                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4409                 break;
4410         case BNXT_FW_STATUS_REG_TYPE_GRC:
4411                 offset = bnxt_map_reset_regs(bp, offset);
4412                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4413                 break;
4414         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4415                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4416                 break;
4417         }
4418         /* wait on a specific interval of time until core reset is complete */
4419         if (delay)
4420                 rte_delay_ms(delay);
4421 }
4422
4423 static void bnxt_dev_cleanup(struct bnxt *bp)
4424 {
4425         bp->eth_dev->data->dev_link.link_status = 0;
4426         bp->link_info->link_up = 0;
4427         if (bp->eth_dev->data->dev_started)
4428                 bnxt_dev_stop_op(bp->eth_dev);
4429
4430         bnxt_uninit_resources(bp, true);
4431 }
4432
4433 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4434 {
4435         struct rte_eth_dev *dev = bp->eth_dev;
4436         struct rte_vlan_filter_conf *vfc;
4437         int vidx, vbit, rc;
4438         uint16_t vlan_id;
4439
4440         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4441                 vfc = &dev->data->vlan_filter_conf;
4442                 vidx = vlan_id / 64;
4443                 vbit = vlan_id % 64;
4444
4445                 /* Each bit corresponds to a VLAN id */
4446                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4447                         rc = bnxt_add_vlan_filter(bp, vlan_id);
4448                         if (rc)
4449                                 return rc;
4450                 }
4451         }
4452
4453         return 0;
4454 }
4455
4456 static int bnxt_restore_mac_filters(struct bnxt *bp)
4457 {
4458         struct rte_eth_dev *dev = bp->eth_dev;
4459         struct rte_eth_dev_info dev_info;
4460         struct rte_ether_addr *addr;
4461         uint64_t pool_mask;
4462         uint32_t pool = 0;
4463         uint16_t i;
4464         int rc;
4465
4466         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
4467                 return 0;
4468
4469         rc = bnxt_dev_info_get_op(dev, &dev_info);
4470         if (rc)
4471                 return rc;
4472
4473         /* replay MAC address configuration */
4474         for (i = 1; i < dev_info.max_mac_addrs; i++) {
4475                 addr = &dev->data->mac_addrs[i];
4476
4477                 /* skip zero address */
4478                 if (rte_is_zero_ether_addr(addr))
4479                         continue;
4480
4481                 pool = 0;
4482                 pool_mask = dev->data->mac_pool_sel[i];
4483
4484                 do {
4485                         if (pool_mask & 1ULL) {
4486                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4487                                 if (rc)
4488                                         return rc;
4489                         }
4490                         pool_mask >>= 1;
4491                         pool++;
4492                 } while (pool_mask);
4493         }
4494
4495         return 0;
4496 }
4497
4498 static int bnxt_restore_filters(struct bnxt *bp)
4499 {
4500         struct rte_eth_dev *dev = bp->eth_dev;
4501         int ret = 0;
4502
4503         if (dev->data->all_multicast) {
4504                 ret = bnxt_allmulticast_enable_op(dev);
4505                 if (ret)
4506                         return ret;
4507         }
4508         if (dev->data->promiscuous) {
4509                 ret = bnxt_promiscuous_enable_op(dev);
4510                 if (ret)
4511                         return ret;
4512         }
4513
4514         ret = bnxt_restore_mac_filters(bp);
4515         if (ret)
4516                 return ret;
4517
4518         ret = bnxt_restore_vlan_filters(bp);
4519         /* TODO restore other filters as well */
4520         return ret;
4521 }
4522
4523 static void bnxt_dev_recover(void *arg)
4524 {
4525         struct bnxt *bp = arg;
4526         int timeout = bp->fw_reset_max_msecs;
4527         int rc = 0;
4528
4529         /* Clear Error flag so that device re-init should happen */
4530         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4531
4532         do {
4533                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4534                 if (rc == 0)
4535                         break;
4536                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4537                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4538         } while (rc && timeout);
4539
4540         if (rc) {
4541                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4542                 goto err;
4543         }
4544
4545         rc = bnxt_init_resources(bp, true);
4546         if (rc) {
4547                 PMD_DRV_LOG(ERR,
4548                             "Failed to initialize resources after reset\n");
4549                 goto err;
4550         }
4551         /* clear reset flag as the device is initialized now */
4552         bp->flags &= ~BNXT_FLAG_FW_RESET;
4553
4554         rc = bnxt_dev_start_op(bp->eth_dev);
4555         if (rc) {
4556                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4557                 goto err_start;
4558         }
4559
4560         rc = bnxt_restore_filters(bp);
4561         if (rc)
4562                 goto err_start;
4563
4564         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4565         return;
4566 err_start:
4567         bnxt_dev_stop_op(bp->eth_dev);
4568 err:
4569         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4570         bnxt_uninit_resources(bp, false);
4571         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4572 }
4573
4574 void bnxt_dev_reset_and_resume(void *arg)
4575 {
4576         struct bnxt *bp = arg;
4577         int rc;
4578
4579         bnxt_dev_cleanup(bp);
4580
4581         bnxt_wait_for_device_shutdown(bp);
4582
4583         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4584                                bnxt_dev_recover, (void *)bp);
4585         if (rc)
4586                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4587 }
4588
4589 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4590 {
4591         struct bnxt_error_recovery_info *info = bp->recovery_info;
4592         uint32_t reg = info->status_regs[index];
4593         uint32_t type, offset, val = 0;
4594
4595         type = BNXT_FW_STATUS_REG_TYPE(reg);
4596         offset = BNXT_FW_STATUS_REG_OFF(reg);
4597
4598         switch (type) {
4599         case BNXT_FW_STATUS_REG_TYPE_CFG:
4600                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4601                 break;
4602         case BNXT_FW_STATUS_REG_TYPE_GRC:
4603                 offset = info->mapped_status_regs[index];
4604                 /* FALLTHROUGH */
4605         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4606                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4607                                        offset));
4608                 break;
4609         }
4610
4611         return val;
4612 }
4613
4614 static int bnxt_fw_reset_all(struct bnxt *bp)
4615 {
4616         struct bnxt_error_recovery_info *info = bp->recovery_info;
4617         uint32_t i;
4618         int rc = 0;
4619
4620         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4621                 /* Reset through master function driver */
4622                 for (i = 0; i < info->reg_array_cnt; i++)
4623                         bnxt_write_fw_reset_reg(bp, i);
4624                 /* Wait for time specified by FW after triggering reset */
4625                 rte_delay_ms(info->master_func_wait_period_after_reset);
4626         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4627                 /* Reset with the help of Kong processor */
4628                 rc = bnxt_hwrm_fw_reset(bp);
4629                 if (rc)
4630                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4631         }
4632
4633         return rc;
4634 }
4635
4636 static void bnxt_fw_reset_cb(void *arg)
4637 {
4638         struct bnxt *bp = arg;
4639         struct bnxt_error_recovery_info *info = bp->recovery_info;
4640         int rc = 0;
4641
4642         /* Only Master function can do FW reset */
4643         if (bnxt_is_master_func(bp) &&
4644             bnxt_is_recovery_enabled(bp)) {
4645                 rc = bnxt_fw_reset_all(bp);
4646                 if (rc) {
4647                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4648                         return;
4649                 }
4650         }
4651
4652         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4653          * EXCEPTION_FATAL_ASYNC event to all the functions
4654          * (including MASTER FUNC). After receiving this Async, all the active
4655          * drivers should treat this case as FW initiated recovery
4656          */
4657         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4658                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4659                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4660
4661                 /* To recover from error */
4662                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4663                                   (void *)bp);
4664         }
4665 }
4666
4667 /* Driver should poll FW heartbeat, reset_counter with the frequency
4668  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4669  * When the driver detects heartbeat stop or change in reset_counter,
4670  * it has to trigger a reset to recover from the error condition.
4671  * A “master PF” is the function who will have the privilege to
4672  * initiate the chimp reset. The master PF will be elected by the
4673  * firmware and will be notified through async message.
4674  */
4675 static void bnxt_check_fw_health(void *arg)
4676 {
4677         struct bnxt *bp = arg;
4678         struct bnxt_error_recovery_info *info = bp->recovery_info;
4679         uint32_t val = 0, wait_msec;
4680
4681         if (!info || !bnxt_is_recovery_enabled(bp) ||
4682             is_bnxt_in_error(bp))
4683                 return;
4684
4685         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4686         if (val == info->last_heart_beat)
4687                 goto reset;
4688
4689         info->last_heart_beat = val;
4690
4691         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4692         if (val != info->last_reset_counter)
4693                 goto reset;
4694
4695         info->last_reset_counter = val;
4696
4697         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4698                           bnxt_check_fw_health, (void *)bp);
4699
4700         return;
4701 reset:
4702         /* Stop DMA to/from device */
4703         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4704         bp->flags |= BNXT_FLAG_FW_RESET;
4705
4706         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4707
4708         if (bnxt_is_master_func(bp))
4709                 wait_msec = info->master_func_wait_period;
4710         else
4711                 wait_msec = info->normal_func_wait_period;
4712
4713         rte_eal_alarm_set(US_PER_MS * wait_msec,
4714                           bnxt_fw_reset_cb, (void *)bp);
4715 }
4716
4717 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4718 {
4719         uint32_t polling_freq;
4720
4721         pthread_mutex_lock(&bp->health_check_lock);
4722
4723         if (!bnxt_is_recovery_enabled(bp))
4724                 goto done;
4725
4726         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4727                 goto done;
4728
4729         polling_freq = bp->recovery_info->driver_polling_freq;
4730
4731         rte_eal_alarm_set(US_PER_MS * polling_freq,
4732                           bnxt_check_fw_health, (void *)bp);
4733         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4734
4735 done:
4736         pthread_mutex_unlock(&bp->health_check_lock);
4737 }
4738
4739 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4740 {
4741         if (!bnxt_is_recovery_enabled(bp))
4742                 return;
4743
4744         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4745         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4746 }
4747
4748 static bool bnxt_vf_pciid(uint16_t device_id)
4749 {
4750         switch (device_id) {
4751         case BROADCOM_DEV_ID_57304_VF:
4752         case BROADCOM_DEV_ID_57406_VF:
4753         case BROADCOM_DEV_ID_5731X_VF:
4754         case BROADCOM_DEV_ID_5741X_VF:
4755         case BROADCOM_DEV_ID_57414_VF:
4756         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4757         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4758         case BROADCOM_DEV_ID_58802_VF:
4759         case BROADCOM_DEV_ID_57500_VF1:
4760         case BROADCOM_DEV_ID_57500_VF2:
4761                 /* FALLTHROUGH */
4762                 return true;
4763         default:
4764                 return false;
4765         }
4766 }
4767
4768 static bool bnxt_thor_device(uint16_t device_id)
4769 {
4770         switch (device_id) {
4771         case BROADCOM_DEV_ID_57508:
4772         case BROADCOM_DEV_ID_57504:
4773         case BROADCOM_DEV_ID_57502:
4774         case BROADCOM_DEV_ID_57508_MF1:
4775         case BROADCOM_DEV_ID_57504_MF1:
4776         case BROADCOM_DEV_ID_57502_MF1:
4777         case BROADCOM_DEV_ID_57508_MF2:
4778         case BROADCOM_DEV_ID_57504_MF2:
4779         case BROADCOM_DEV_ID_57502_MF2:
4780         case BROADCOM_DEV_ID_57500_VF1:
4781         case BROADCOM_DEV_ID_57500_VF2:
4782                 /* FALLTHROUGH */
4783                 return true;
4784         default:
4785                 return false;
4786         }
4787 }
4788
4789 bool bnxt_stratus_device(struct bnxt *bp)
4790 {
4791         uint16_t device_id = bp->pdev->id.device_id;
4792
4793         switch (device_id) {
4794         case BROADCOM_DEV_ID_STRATUS_NIC:
4795         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4796         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4797                 /* FALLTHROUGH */
4798                 return true;
4799         default:
4800                 return false;
4801         }
4802 }
4803
4804 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4805 {
4806         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4807         struct bnxt *bp = eth_dev->data->dev_private;
4808
4809         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4810         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4811         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4812         if (!bp->bar0 || !bp->doorbell_base) {
4813                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4814                 return -ENODEV;
4815         }
4816
4817         bp->eth_dev = eth_dev;
4818         bp->pdev = pci_dev;
4819
4820         return 0;
4821 }
4822
4823 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4824                                   struct bnxt_ctx_pg_info *ctx_pg,
4825                                   uint32_t mem_size,
4826                                   const char *suffix,
4827                                   uint16_t idx)
4828 {
4829         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4830         const struct rte_memzone *mz = NULL;
4831         char mz_name[RTE_MEMZONE_NAMESIZE];
4832         rte_iova_t mz_phys_addr;
4833         uint64_t valid_bits = 0;
4834         uint32_t sz;
4835         int i;
4836
4837         if (!mem_size)
4838                 return 0;
4839
4840         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4841                          BNXT_PAGE_SIZE;
4842         rmem->page_size = BNXT_PAGE_SIZE;
4843         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4844         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4845         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4846
4847         valid_bits = PTU_PTE_VALID;
4848
4849         if (rmem->nr_pages > 1) {
4850                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4851                          "bnxt_ctx_pg_tbl%s_%x_%d",
4852                          suffix, idx, bp->eth_dev->data->port_id);
4853                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4854                 mz = rte_memzone_lookup(mz_name);
4855                 if (!mz) {
4856                         mz = rte_memzone_reserve_aligned(mz_name,
4857                                                 rmem->nr_pages * 8,
4858                                                 SOCKET_ID_ANY,
4859                                                 RTE_MEMZONE_2MB |
4860                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4861                                                 RTE_MEMZONE_IOVA_CONTIG,
4862                                                 BNXT_PAGE_SIZE);
4863                         if (mz == NULL)
4864                                 return -ENOMEM;
4865                 }
4866
4867                 memset(mz->addr, 0, mz->len);
4868                 mz_phys_addr = mz->iova;
4869
4870                 rmem->pg_tbl = mz->addr;
4871                 rmem->pg_tbl_map = mz_phys_addr;
4872                 rmem->pg_tbl_mz = mz;
4873         }
4874
4875         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4876                  suffix, idx, bp->eth_dev->data->port_id);
4877         mz = rte_memzone_lookup(mz_name);
4878         if (!mz) {
4879                 mz = rte_memzone_reserve_aligned(mz_name,
4880                                                  mem_size,
4881                                                  SOCKET_ID_ANY,
4882                                                  RTE_MEMZONE_1GB |
4883                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4884                                                  RTE_MEMZONE_IOVA_CONTIG,
4885                                                  BNXT_PAGE_SIZE);
4886                 if (mz == NULL)
4887                         return -ENOMEM;
4888         }
4889
4890         memset(mz->addr, 0, mz->len);
4891         mz_phys_addr = mz->iova;
4892
4893         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4894                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4895                 rmem->dma_arr[i] = mz_phys_addr + sz;
4896
4897                 if (rmem->nr_pages > 1) {
4898                         if (i == rmem->nr_pages - 2 &&
4899                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4900                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4901                         else if (i == rmem->nr_pages - 1 &&
4902                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4903                                 valid_bits |= PTU_PTE_LAST;
4904
4905                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4906                                                            valid_bits);
4907                 }
4908         }
4909
4910         rmem->mz = mz;
4911         if (rmem->vmem_size)
4912                 rmem->vmem = (void **)mz->addr;
4913         rmem->dma_arr[0] = mz_phys_addr;
4914         return 0;
4915 }
4916
4917 static void bnxt_free_ctx_mem(struct bnxt *bp)
4918 {
4919         int i;
4920
4921         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4922                 return;
4923
4924         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4925         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4926         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4927         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4928         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4929         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4930         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4931         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4932         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4933         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4934         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4935
4936         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4937                 if (bp->ctx->tqm_mem[i])
4938                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4939         }
4940
4941         rte_free(bp->ctx);
4942         bp->ctx = NULL;
4943 }
4944
4945 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4946
4947 #define min_t(type, x, y) ({                    \
4948         type __min1 = (x);                      \
4949         type __min2 = (y);                      \
4950         __min1 < __min2 ? __min1 : __min2; })
4951
4952 #define max_t(type, x, y) ({                    \
4953         type __max1 = (x);                      \
4954         type __max2 = (y);                      \
4955         __max1 > __max2 ? __max1 : __max2; })
4956
4957 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4958
4959 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4960 {
4961         struct bnxt_ctx_pg_info *ctx_pg;
4962         struct bnxt_ctx_mem_info *ctx;
4963         uint32_t mem_size, ena, entries;
4964         uint32_t entries_sp, min;
4965         int i, rc;
4966
4967         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4968         if (rc) {
4969                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4970                 return rc;
4971         }
4972         ctx = bp->ctx;
4973         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4974                 return 0;
4975
4976         ctx_pg = &ctx->qp_mem;
4977         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4978         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4979         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4980         if (rc)
4981                 return rc;
4982
4983         ctx_pg = &ctx->srq_mem;
4984         ctx_pg->entries = ctx->srq_max_l2_entries;
4985         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4986         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4987         if (rc)
4988                 return rc;
4989
4990         ctx_pg = &ctx->cq_mem;
4991         ctx_pg->entries = ctx->cq_max_l2_entries;
4992         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4993         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4994         if (rc)
4995                 return rc;
4996
4997         ctx_pg = &ctx->vnic_mem;
4998         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4999                 ctx->vnic_max_ring_table_entries;
5000         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
5001         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
5002         if (rc)
5003                 return rc;
5004
5005         ctx_pg = &ctx->stat_mem;
5006         ctx_pg->entries = ctx->stat_max_entries;
5007         mem_size = ctx->stat_entry_size * ctx_pg->entries;
5008         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
5009         if (rc)
5010                 return rc;
5011
5012         min = ctx->tqm_min_entries_per_ring;
5013
5014         entries_sp = ctx->qp_max_l2_entries +
5015                      ctx->vnic_max_vnic_entries +
5016                      2 * ctx->qp_min_qp1_entries + min;
5017         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
5018
5019         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
5020         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
5021         entries = clamp_t(uint32_t, entries, min,
5022                           ctx->tqm_max_entries_per_ring);
5023         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
5024                 ctx_pg = ctx->tqm_mem[i];
5025                 ctx_pg->entries = i ? entries : entries_sp;
5026                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
5027                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
5028                 if (rc)
5029                         return rc;
5030                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
5031         }
5032
5033         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
5034         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
5035         if (rc)
5036                 PMD_DRV_LOG(ERR,
5037                             "Failed to configure context mem: rc = %d\n", rc);
5038         else
5039                 ctx->flags |= BNXT_CTX_FLAG_INITED;
5040
5041         return rc;
5042 }
5043
5044 static int bnxt_alloc_stats_mem(struct bnxt *bp)
5045 {
5046         struct rte_pci_device *pci_dev = bp->pdev;
5047         char mz_name[RTE_MEMZONE_NAMESIZE];
5048         const struct rte_memzone *mz = NULL;
5049         uint32_t total_alloc_len;
5050         rte_iova_t mz_phys_addr;
5051
5052         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
5053                 return 0;
5054
5055         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
5056                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
5057                  pci_dev->addr.bus, pci_dev->addr.devid,
5058                  pci_dev->addr.function, "rx_port_stats");
5059         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
5060         mz = rte_memzone_lookup(mz_name);
5061         total_alloc_len =
5062                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
5063                                        sizeof(struct rx_port_stats_ext) + 512);
5064         if (!mz) {
5065                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
5066                                          SOCKET_ID_ANY,
5067                                          RTE_MEMZONE_2MB |
5068                                          RTE_MEMZONE_SIZE_HINT_ONLY |
5069                                          RTE_MEMZONE_IOVA_CONTIG);
5070                 if (mz == NULL)
5071                         return -ENOMEM;
5072         }
5073         memset(mz->addr, 0, mz->len);
5074         mz_phys_addr = mz->iova;
5075
5076         bp->rx_mem_zone = (const void *)mz;
5077         bp->hw_rx_port_stats = mz->addr;
5078         bp->hw_rx_port_stats_map = mz_phys_addr;
5079
5080         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
5081                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
5082                  pci_dev->addr.bus, pci_dev->addr.devid,
5083                  pci_dev->addr.function, "tx_port_stats");
5084         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
5085         mz = rte_memzone_lookup(mz_name);
5086         total_alloc_len =
5087                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
5088                                        sizeof(struct tx_port_stats_ext) + 512);
5089         if (!mz) {
5090                 mz = rte_memzone_reserve(mz_name,
5091                                          total_alloc_len,
5092                                          SOCKET_ID_ANY,
5093                                          RTE_MEMZONE_2MB |
5094                                          RTE_MEMZONE_SIZE_HINT_ONLY |
5095                                          RTE_MEMZONE_IOVA_CONTIG);
5096                 if (mz == NULL)
5097                         return -ENOMEM;
5098         }
5099         memset(mz->addr, 0, mz->len);
5100         mz_phys_addr = mz->iova;
5101
5102         bp->tx_mem_zone = (const void *)mz;
5103         bp->hw_tx_port_stats = mz->addr;
5104         bp->hw_tx_port_stats_map = mz_phys_addr;
5105         bp->flags |= BNXT_FLAG_PORT_STATS;
5106
5107         /* Display extended statistics if FW supports it */
5108         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
5109             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
5110             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
5111                 return 0;
5112
5113         bp->hw_rx_port_stats_ext = (void *)
5114                 ((uint8_t *)bp->hw_rx_port_stats +
5115                  sizeof(struct rx_port_stats));
5116         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
5117                 sizeof(struct rx_port_stats);
5118         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
5119
5120         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
5121             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
5122                 bp->hw_tx_port_stats_ext = (void *)
5123                         ((uint8_t *)bp->hw_tx_port_stats +
5124                          sizeof(struct tx_port_stats));
5125                 bp->hw_tx_port_stats_ext_map =
5126                         bp->hw_tx_port_stats_map +
5127                         sizeof(struct tx_port_stats);
5128                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
5129         }
5130
5131         return 0;
5132 }
5133
5134 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
5135 {
5136         struct bnxt *bp = eth_dev->data->dev_private;
5137         int rc = 0;
5138
5139         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
5140                                                RTE_ETHER_ADDR_LEN *
5141                                                bp->max_l2_ctx,
5142                                                0);
5143         if (eth_dev->data->mac_addrs == NULL) {
5144                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
5145                 return -ENOMEM;
5146         }
5147
5148         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
5149                 if (BNXT_PF(bp))
5150                         return -EINVAL;
5151
5152                 /* Generate a random MAC address, if none was assigned by PF */
5153                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
5154                 bnxt_eth_hw_addr_random(bp->mac_addr);
5155                 PMD_DRV_LOG(INFO,
5156                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
5157                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
5158                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
5159
5160                 rc = bnxt_hwrm_set_mac(bp);
5161                 if (rc)
5162                         return rc;
5163         }
5164
5165         /* Copy the permanent MAC from the FUNC_QCAPS response */
5166         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
5167
5168         return rc;
5169 }
5170
5171 static int bnxt_restore_dflt_mac(struct bnxt *bp)
5172 {
5173         int rc = 0;
5174
5175         /* MAC is already configured in FW */
5176         if (BNXT_HAS_DFLT_MAC_SET(bp))
5177                 return 0;
5178
5179         /* Restore the old MAC configured */
5180         rc = bnxt_hwrm_set_mac(bp);
5181         if (rc)
5182                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
5183
5184         return rc;
5185 }
5186
5187 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
5188 {
5189         if (!BNXT_PF(bp))
5190                 return;
5191
5192 #define ALLOW_FUNC(x)   \
5193         { \
5194                 uint32_t arg = (x); \
5195                 bp->pf->vf_req_fwd[((arg) >> 5)] &= \
5196                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
5197         }
5198
5199         /* Forward all requests if firmware is new enough */
5200         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
5201              (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
5202             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
5203                 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
5204         } else {
5205                 PMD_DRV_LOG(WARNING,
5206                             "Firmware too old for VF mailbox functionality\n");
5207                 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
5208         }
5209
5210         /*
5211          * The following are used for driver cleanup. If we disallow these,
5212          * VF drivers can't clean up cleanly.
5213          */
5214         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
5215         ALLOW_FUNC(HWRM_VNIC_FREE);
5216         ALLOW_FUNC(HWRM_RING_FREE);
5217         ALLOW_FUNC(HWRM_RING_GRP_FREE);
5218         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
5219         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
5220         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
5221         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
5222         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
5223 }
5224
5225 uint16_t
5226 bnxt_get_svif(uint16_t port_id, bool func_svif,
5227               enum bnxt_ulp_intf_type type)
5228 {
5229         struct rte_eth_dev *eth_dev;
5230         struct bnxt *bp;
5231
5232         eth_dev = &rte_eth_devices[port_id];
5233         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5234                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
5235                 if (!vfr)
5236                         return 0;
5237
5238                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5239                         return vfr->svif;
5240
5241                 eth_dev = vfr->parent_dev;
5242         }
5243
5244         bp = eth_dev->data->dev_private;
5245
5246         return func_svif ? bp->func_svif : bp->port_svif;
5247 }
5248
5249 uint16_t
5250 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
5251 {
5252         struct rte_eth_dev *eth_dev;
5253         struct bnxt_vnic_info *vnic;
5254         struct bnxt *bp;
5255
5256         eth_dev = &rte_eth_devices[port];
5257         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5258                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
5259                 if (!vfr)
5260                         return 0;
5261
5262                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5263                         return vfr->dflt_vnic_id;
5264
5265                 eth_dev = vfr->parent_dev;
5266         }
5267
5268         bp = eth_dev->data->dev_private;
5269
5270         vnic = BNXT_GET_DEFAULT_VNIC(bp);
5271
5272         return vnic->fw_vnic_id;
5273 }
5274
5275 uint16_t
5276 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
5277 {
5278         struct rte_eth_dev *eth_dev;
5279         struct bnxt *bp;
5280
5281         eth_dev = &rte_eth_devices[port];
5282         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5283                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
5284                 if (!vfr)
5285                         return 0;
5286
5287                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5288                         return vfr->fw_fid;
5289
5290                 eth_dev = vfr->parent_dev;
5291         }
5292
5293         bp = eth_dev->data->dev_private;
5294
5295         return bp->fw_fid;
5296 }
5297
5298 enum bnxt_ulp_intf_type
5299 bnxt_get_interface_type(uint16_t port)
5300 {
5301         struct rte_eth_dev *eth_dev;
5302         struct bnxt *bp;
5303
5304         eth_dev = &rte_eth_devices[port];
5305         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
5306                 return BNXT_ULP_INTF_TYPE_VF_REP;
5307
5308         bp = eth_dev->data->dev_private;
5309         if (BNXT_PF(bp))
5310                 return BNXT_ULP_INTF_TYPE_PF;
5311         else if (BNXT_VF_IS_TRUSTED(bp))
5312                 return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
5313         else if (BNXT_VF(bp))
5314                 return BNXT_ULP_INTF_TYPE_VF;
5315
5316         return BNXT_ULP_INTF_TYPE_INVALID;
5317 }
5318
5319 uint16_t
5320 bnxt_get_phy_port_id(uint16_t port_id)
5321 {
5322         struct bnxt_representor *vfr;
5323         struct rte_eth_dev *eth_dev;
5324         struct bnxt *bp;
5325
5326         eth_dev = &rte_eth_devices[port_id];
5327         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5328                 vfr = eth_dev->data->dev_private;
5329                 if (!vfr)
5330                         return 0;
5331
5332                 eth_dev = vfr->parent_dev;
5333         }
5334
5335         bp = eth_dev->data->dev_private;
5336
5337         return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
5338 }
5339
5340 uint16_t
5341 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
5342 {
5343         struct rte_eth_dev *eth_dev;
5344         struct bnxt *bp;
5345
5346         eth_dev = &rte_eth_devices[port_id];
5347         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5348                 struct bnxt_representor *vfr = eth_dev->data->dev_private;
5349                 if (!vfr)
5350                         return 0;
5351
5352                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5353                         return vfr->fw_fid - 1;
5354
5355                 eth_dev = vfr->parent_dev;
5356         }
5357
5358         bp = eth_dev->data->dev_private;
5359
5360         return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
5361 }
5362
5363 uint16_t
5364 bnxt_get_vport(uint16_t port_id)
5365 {
5366         return (1 << bnxt_get_phy_port_id(port_id));
5367 }
5368
5369 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
5370 {
5371         struct bnxt_error_recovery_info *info = bp->recovery_info;
5372
5373         if (info) {
5374                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
5375                         memset(info, 0, sizeof(*info));
5376                 return;
5377         }
5378
5379         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5380                 return;
5381
5382         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5383                            sizeof(*info), 0);
5384         if (!info)
5385                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5386
5387         bp->recovery_info = info;
5388 }
5389
5390 static void bnxt_check_fw_status(struct bnxt *bp)
5391 {
5392         uint32_t fw_status;
5393
5394         if (!(bp->recovery_info &&
5395               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5396                 return;
5397
5398         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5399         if (fw_status != BNXT_FW_STATUS_HEALTHY)
5400                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5401                             fw_status);
5402 }
5403
5404 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5405 {
5406         struct bnxt_error_recovery_info *info = bp->recovery_info;
5407         uint32_t status_loc;
5408         uint32_t sig_ver;
5409
5410         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5411                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5412         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5413                                    BNXT_GRCP_WINDOW_2_BASE +
5414                                    offsetof(struct hcomm_status,
5415                                             sig_ver)));
5416         /* If the signature is absent, then FW does not support this feature */
5417         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5418             HCOMM_STATUS_SIGNATURE_VAL)
5419                 return 0;
5420
5421         if (!info) {
5422                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5423                                    sizeof(*info), 0);
5424                 if (!info)
5425                         return -ENOMEM;
5426                 bp->recovery_info = info;
5427         } else {
5428                 memset(info, 0, sizeof(*info));
5429         }
5430
5431         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5432                                       BNXT_GRCP_WINDOW_2_BASE +
5433                                       offsetof(struct hcomm_status,
5434                                                fw_status_loc)));
5435
5436         /* Only pre-map the FW health status GRC register */
5437         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5438                 return 0;
5439
5440         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5441         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5442                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5443
5444         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5445                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5446
5447         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5448
5449         return 0;
5450 }
5451
5452 static int bnxt_init_fw(struct bnxt *bp)
5453 {
5454         uint16_t mtu;
5455         int rc = 0;
5456
5457         bp->fw_cap = 0;
5458
5459         rc = bnxt_map_hcomm_fw_status_reg(bp);
5460         if (rc)
5461                 return rc;
5462
5463         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5464         if (rc) {
5465                 bnxt_check_fw_status(bp);
5466                 return rc;
5467         }
5468
5469         rc = bnxt_hwrm_func_reset(bp);
5470         if (rc)
5471                 return -EIO;
5472
5473         rc = bnxt_hwrm_vnic_qcaps(bp);
5474         if (rc)
5475                 return rc;
5476
5477         rc = bnxt_hwrm_queue_qportcfg(bp);
5478         if (rc)
5479                 return rc;
5480
5481         /* Get the MAX capabilities for this function.
5482          * This function also allocates context memory for TQM rings and
5483          * informs the firmware about this allocated backing store memory.
5484          */
5485         rc = bnxt_hwrm_func_qcaps(bp);
5486         if (rc)
5487                 return rc;
5488
5489         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5490         if (rc)
5491                 return rc;
5492
5493         bnxt_hwrm_port_mac_qcfg(bp);
5494
5495         bnxt_hwrm_parent_pf_qcfg(bp);
5496
5497         bnxt_hwrm_port_phy_qcaps(bp);
5498
5499         bnxt_alloc_error_recovery_info(bp);
5500         /* Get the adapter error recovery support info */
5501         rc = bnxt_hwrm_error_recovery_qcfg(bp);
5502         if (rc)
5503                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5504
5505         bnxt_hwrm_port_led_qcaps(bp);
5506
5507         return 0;
5508 }
5509
5510 static int
5511 bnxt_init_locks(struct bnxt *bp)
5512 {
5513         int err;
5514
5515         err = pthread_mutex_init(&bp->flow_lock, NULL);
5516         if (err) {
5517                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5518                 return err;
5519         }
5520
5521         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5522         if (err)
5523                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5524
5525         err = pthread_mutex_init(&bp->health_check_lock, NULL);
5526         if (err)
5527                 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
5528         return err;
5529 }
5530
5531 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5532 {
5533         int rc = 0;
5534
5535         rc = bnxt_init_fw(bp);
5536         if (rc)
5537                 return rc;
5538
5539         if (!reconfig_dev) {
5540                 rc = bnxt_setup_mac_addr(bp->eth_dev);
5541                 if (rc)
5542                         return rc;
5543         } else {
5544                 rc = bnxt_restore_dflt_mac(bp);
5545                 if (rc)
5546                         return rc;
5547         }
5548
5549         bnxt_config_vf_req_fwd(bp);
5550
5551         rc = bnxt_hwrm_func_driver_register(bp);
5552         if (rc) {
5553                 PMD_DRV_LOG(ERR, "Failed to register driver");
5554                 return -EBUSY;
5555         }
5556
5557         if (BNXT_PF(bp)) {
5558                 if (bp->pdev->max_vfs) {
5559                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5560                         if (rc) {
5561                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5562                                 return rc;
5563                         }
5564                 } else {
5565                         rc = bnxt_hwrm_allocate_pf_only(bp);
5566                         if (rc) {
5567                                 PMD_DRV_LOG(ERR,
5568                                             "Failed to allocate PF resources");
5569                                 return rc;
5570                         }
5571                 }
5572         }
5573
5574         rc = bnxt_alloc_mem(bp, reconfig_dev);
5575         if (rc)
5576                 return rc;
5577
5578         rc = bnxt_setup_int(bp);
5579         if (rc)
5580                 return rc;
5581
5582         rc = bnxt_request_int(bp);
5583         if (rc)
5584                 return rc;
5585
5586         rc = bnxt_init_ctx_mem(bp);
5587         if (rc) {
5588                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5589                 return rc;
5590         }
5591
5592         rc = bnxt_init_locks(bp);
5593         if (rc)
5594                 return rc;
5595
5596         return 0;
5597 }
5598
5599 static int
5600 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5601                           const char *value, void *opaque_arg)
5602 {
5603         struct bnxt *bp = opaque_arg;
5604         unsigned long truflow;
5605         char *end = NULL;
5606
5607         if (!value || !opaque_arg) {
5608                 PMD_DRV_LOG(ERR,
5609                             "Invalid parameter passed to truflow devargs.\n");
5610                 return -EINVAL;
5611         }
5612
5613         truflow = strtoul(value, &end, 10);
5614         if (end == NULL || *end != '\0' ||
5615             (truflow == ULONG_MAX && errno == ERANGE)) {
5616                 PMD_DRV_LOG(ERR,
5617                             "Invalid parameter passed to truflow devargs.\n");
5618                 return -EINVAL;
5619         }
5620
5621         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5622                 PMD_DRV_LOG(ERR,
5623                             "Invalid value passed to truflow devargs.\n");
5624                 return -EINVAL;
5625         }
5626
5627         if (truflow) {
5628                 bp->flags |= BNXT_FLAG_TRUFLOW_EN;
5629                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5630         } else {
5631                 bp->flags &= ~BNXT_FLAG_TRUFLOW_EN;
5632                 PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n");
5633         }
5634
5635         return 0;
5636 }
5637
5638 static int
5639 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5640                              const char *value, void *opaque_arg)
5641 {
5642         struct bnxt *bp = opaque_arg;
5643         unsigned long flow_xstat;
5644         char *end = NULL;
5645
5646         if (!value || !opaque_arg) {
5647                 PMD_DRV_LOG(ERR,
5648                             "Invalid parameter passed to flow_xstat devarg.\n");
5649                 return -EINVAL;
5650         }
5651
5652         flow_xstat = strtoul(value, &end, 10);
5653         if (end == NULL || *end != '\0' ||
5654             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5655                 PMD_DRV_LOG(ERR,
5656                             "Invalid parameter passed to flow_xstat devarg.\n");
5657                 return -EINVAL;
5658         }
5659
5660         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5661                 PMD_DRV_LOG(ERR,
5662                             "Invalid value passed to flow_xstat devarg.\n");
5663                 return -EINVAL;
5664         }
5665
5666         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
5667         if (BNXT_FLOW_XSTATS_EN(bp))
5668                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5669
5670         return 0;
5671 }
5672
5673 static int
5674 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
5675                                         const char *value, void *opaque_arg)
5676 {
5677         struct bnxt *bp = opaque_arg;
5678         unsigned long max_num_kflows;
5679         char *end = NULL;
5680
5681         if (!value || !opaque_arg) {
5682                 PMD_DRV_LOG(ERR,
5683                         "Invalid parameter passed to max_num_kflows devarg.\n");
5684                 return -EINVAL;
5685         }
5686
5687         max_num_kflows = strtoul(value, &end, 10);
5688         if (end == NULL || *end != '\0' ||
5689                 (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
5690                 PMD_DRV_LOG(ERR,
5691                         "Invalid parameter passed to max_num_kflows devarg.\n");
5692                 return -EINVAL;
5693         }
5694
5695         if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
5696                 PMD_DRV_LOG(ERR,
5697                         "Invalid value passed to max_num_kflows devarg.\n");
5698                 return -EINVAL;
5699         }
5700
5701         bp->max_num_kflows = max_num_kflows;
5702         if (bp->max_num_kflows)
5703                 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
5704                                 max_num_kflows);
5705
5706         return 0;
5707 }
5708
5709 static int
5710 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key,
5711                             const char *value, void *opaque_arg)
5712 {
5713         struct bnxt_representor *vfr_bp = opaque_arg;
5714         unsigned long rep_is_pf;
5715         char *end = NULL;
5716
5717         if (!value || !opaque_arg) {
5718                 PMD_DRV_LOG(ERR,
5719                             "Invalid parameter passed to rep_is_pf devargs.\n");
5720                 return -EINVAL;
5721         }
5722
5723         rep_is_pf = strtoul(value, &end, 10);
5724         if (end == NULL || *end != '\0' ||
5725             (rep_is_pf == ULONG_MAX && errno == ERANGE)) {
5726                 PMD_DRV_LOG(ERR,
5727                             "Invalid parameter passed to rep_is_pf devargs.\n");
5728                 return -EINVAL;
5729         }
5730
5731         if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) {
5732                 PMD_DRV_LOG(ERR,
5733                             "Invalid value passed to rep_is_pf devargs.\n");
5734                 return -EINVAL;
5735         }
5736
5737         vfr_bp->flags |= rep_is_pf;
5738         if (BNXT_REP_PF(vfr_bp))
5739                 PMD_DRV_LOG(INFO, "PF representor\n");
5740         else
5741                 PMD_DRV_LOG(INFO, "VF representor\n");
5742
5743         return 0;
5744 }
5745
5746 static int
5747 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key,
5748                                const char *value, void *opaque_arg)
5749 {
5750         struct bnxt_representor *vfr_bp = opaque_arg;
5751         unsigned long rep_based_pf;
5752         char *end = NULL;
5753
5754         if (!value || !opaque_arg) {
5755                 PMD_DRV_LOG(ERR,
5756                             "Invalid parameter passed to rep_based_pf "
5757                             "devargs.\n");
5758                 return -EINVAL;
5759         }
5760
5761         rep_based_pf = strtoul(value, &end, 10);
5762         if (end == NULL || *end != '\0' ||
5763             (rep_based_pf == ULONG_MAX && errno == ERANGE)) {
5764                 PMD_DRV_LOG(ERR,
5765                             "Invalid parameter passed to rep_based_pf "
5766                             "devargs.\n");
5767                 return -EINVAL;
5768         }
5769
5770         if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) {
5771                 PMD_DRV_LOG(ERR,
5772                             "Invalid value passed to rep_based_pf devargs.\n");
5773                 return -EINVAL;
5774         }
5775
5776         vfr_bp->rep_based_pf = rep_based_pf;
5777         PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf);
5778
5779         return 0;
5780 }
5781
5782 static int
5783 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key,
5784                             const char *value, void *opaque_arg)
5785 {
5786         struct bnxt_representor *vfr_bp = opaque_arg;
5787         unsigned long rep_q_r2f;
5788         char *end = NULL;
5789
5790         if (!value || !opaque_arg) {
5791                 PMD_DRV_LOG(ERR,
5792                             "Invalid parameter passed to rep_q_r2f "
5793                             "devargs.\n");
5794                 return -EINVAL;
5795         }
5796
5797         rep_q_r2f = strtoul(value, &end, 10);
5798         if (end == NULL || *end != '\0' ||
5799             (rep_q_r2f == ULONG_MAX && errno == ERANGE)) {
5800                 PMD_DRV_LOG(ERR,
5801                             "Invalid parameter passed to rep_q_r2f "
5802                             "devargs.\n");
5803                 return -EINVAL;
5804         }
5805
5806         if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) {
5807                 PMD_DRV_LOG(ERR,
5808                             "Invalid value passed to rep_q_r2f devargs.\n");
5809                 return -EINVAL;
5810         }
5811
5812         vfr_bp->rep_q_r2f = rep_q_r2f;
5813         vfr_bp->flags |= BNXT_REP_Q_R2F_VALID;
5814         PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f);
5815
5816         return 0;
5817 }
5818
5819 static int
5820 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key,
5821                             const char *value, void *opaque_arg)
5822 {
5823         struct bnxt_representor *vfr_bp = opaque_arg;
5824         unsigned long rep_q_f2r;
5825         char *end = NULL;
5826
5827         if (!value || !opaque_arg) {
5828                 PMD_DRV_LOG(ERR,
5829                             "Invalid parameter passed to rep_q_f2r "
5830                             "devargs.\n");
5831                 return -EINVAL;
5832         }
5833
5834         rep_q_f2r = strtoul(value, &end, 10);
5835         if (end == NULL || *end != '\0' ||
5836             (rep_q_f2r == ULONG_MAX && errno == ERANGE)) {
5837                 PMD_DRV_LOG(ERR,
5838                             "Invalid parameter passed to rep_q_f2r "
5839                             "devargs.\n");
5840                 return -EINVAL;
5841         }
5842
5843         if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) {
5844                 PMD_DRV_LOG(ERR,
5845                             "Invalid value passed to rep_q_f2r devargs.\n");
5846                 return -EINVAL;
5847         }
5848
5849         vfr_bp->rep_q_f2r = rep_q_f2r;
5850         vfr_bp->flags |= BNXT_REP_Q_F2R_VALID;
5851         PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r);
5852
5853         return 0;
5854 }
5855
5856 static int
5857 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key,
5858                              const char *value, void *opaque_arg)
5859 {
5860         struct bnxt_representor *vfr_bp = opaque_arg;
5861         unsigned long rep_fc_r2f;
5862         char *end = NULL;
5863
5864         if (!value || !opaque_arg) {
5865                 PMD_DRV_LOG(ERR,
5866                             "Invalid parameter passed to rep_fc_r2f "
5867                             "devargs.\n");
5868                 return -EINVAL;
5869         }
5870
5871         rep_fc_r2f = strtoul(value, &end, 10);
5872         if (end == NULL || *end != '\0' ||
5873             (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) {
5874                 PMD_DRV_LOG(ERR,
5875                             "Invalid parameter passed to rep_fc_r2f "
5876                             "devargs.\n");
5877                 return -EINVAL;
5878         }
5879
5880         if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) {
5881                 PMD_DRV_LOG(ERR,
5882                             "Invalid value passed to rep_fc_r2f devargs.\n");
5883                 return -EINVAL;
5884         }
5885
5886         vfr_bp->flags |= BNXT_REP_FC_R2F_VALID;
5887         vfr_bp->rep_fc_r2f = rep_fc_r2f;
5888         PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f);
5889
5890         return 0;
5891 }
5892
5893 static int
5894 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key,
5895                              const char *value, void *opaque_arg)
5896 {
5897         struct bnxt_representor *vfr_bp = opaque_arg;
5898         unsigned long rep_fc_f2r;
5899         char *end = NULL;
5900
5901         if (!value || !opaque_arg) {
5902                 PMD_DRV_LOG(ERR,
5903                             "Invalid parameter passed to rep_fc_f2r "
5904                             "devargs.\n");
5905                 return -EINVAL;
5906         }
5907
5908         rep_fc_f2r = strtoul(value, &end, 10);
5909         if (end == NULL || *end != '\0' ||
5910             (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) {
5911                 PMD_DRV_LOG(ERR,
5912                             "Invalid parameter passed to rep_fc_f2r "
5913                             "devargs.\n");
5914                 return -EINVAL;
5915         }
5916
5917         if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) {
5918                 PMD_DRV_LOG(ERR,
5919                             "Invalid value passed to rep_fc_f2r devargs.\n");
5920                 return -EINVAL;
5921         }
5922
5923         vfr_bp->flags |= BNXT_REP_FC_F2R_VALID;
5924         vfr_bp->rep_fc_f2r = rep_fc_f2r;
5925         PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r);
5926
5927         return 0;
5928 }
5929
5930 static void
5931 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5932 {
5933         struct rte_kvargs *kvlist;
5934
5935         if (devargs == NULL)
5936                 return;
5937
5938         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5939         if (kvlist == NULL)
5940                 return;
5941
5942         /*
5943          * Handler for "truflow" devarg.
5944          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
5945          */
5946         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5947                            bnxt_parse_devarg_truflow, bp);
5948
5949         /*
5950          * Handler for "flow_xstat" devarg.
5951          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
5952          */
5953         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5954                            bnxt_parse_devarg_flow_xstat, bp);
5955
5956         /*
5957          * Handler for "max_num_kflows" devarg.
5958          * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
5959          */
5960         rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5961                            bnxt_parse_devarg_max_num_kflows, bp);
5962
5963         rte_kvargs_free(kvlist);
5964 }
5965
5966 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5967 {
5968         int rc = 0;
5969
5970         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5971                 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5972                 if (rc)
5973                         PMD_DRV_LOG(ERR,
5974                                     "Failed to alloc switch domain: %d\n", rc);
5975                 else
5976                         PMD_DRV_LOG(INFO,
5977                                     "Switch domain allocated %d\n",
5978                                     bp->switch_domain_id);
5979         }
5980
5981         return rc;
5982 }
5983
5984 static int
5985 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
5986 {
5987         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5988         static int version_printed;
5989         struct bnxt *bp;
5990         int rc;
5991
5992         if (version_printed++ == 0)
5993                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5994
5995         eth_dev->dev_ops = &bnxt_dev_ops;
5996         eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
5997         eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op;
5998         eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op;
5999         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
6000         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
6001
6002         /*
6003          * For secondary processes, we don't initialise any further
6004          * as primary has already done this work.
6005          */
6006         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6007                 return 0;
6008
6009         rte_eth_copy_pci_info(eth_dev, pci_dev);
6010
6011         bp = eth_dev->data->dev_private;
6012
6013         /* Parse dev arguments passed on when starting the DPDK application. */
6014         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
6015
6016         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
6017
6018         if (bnxt_vf_pciid(pci_dev->id.device_id))
6019                 bp->flags |= BNXT_FLAG_VF;
6020
6021         if (bnxt_thor_device(pci_dev->id.device_id))
6022                 bp->flags |= BNXT_FLAG_THOR_CHIP;
6023
6024         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
6025             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
6026             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
6027             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
6028                 bp->flags |= BNXT_FLAG_STINGRAY;
6029
6030         rc = bnxt_init_board(eth_dev);
6031         if (rc) {
6032                 PMD_DRV_LOG(ERR,
6033                             "Failed to initialize board rc: %x\n", rc);
6034                 return rc;
6035         }
6036
6037         rc = bnxt_alloc_pf_info(bp);
6038         if (rc)
6039                 goto error_free;
6040
6041         rc = bnxt_alloc_link_info(bp);
6042         if (rc)
6043                 goto error_free;
6044
6045         rc = bnxt_alloc_parent_info(bp);
6046         if (rc)
6047                 goto error_free;
6048
6049         rc = bnxt_alloc_hwrm_resources(bp);
6050         if (rc) {
6051                 PMD_DRV_LOG(ERR,
6052                             "Failed to allocate hwrm resource rc: %x\n", rc);
6053                 goto error_free;
6054         }
6055         rc = bnxt_alloc_leds_info(bp);
6056         if (rc)
6057                 goto error_free;
6058
6059         rc = bnxt_alloc_cos_queues(bp);
6060         if (rc)
6061                 goto error_free;
6062
6063         rc = bnxt_init_resources(bp, false);
6064         if (rc)
6065                 goto error_free;
6066
6067         rc = bnxt_alloc_stats_mem(bp);
6068         if (rc)
6069                 goto error_free;
6070
6071         bnxt_alloc_switch_domain(bp);
6072
6073         /* Pass the information to the rte_eth_dev_close() that it should also
6074          * release the private port resources.
6075          */
6076         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
6077
6078         PMD_DRV_LOG(INFO,
6079                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
6080                     pci_dev->mem_resource[0].phys_addr,
6081                     pci_dev->mem_resource[0].addr);
6082
6083         return 0;
6084
6085 error_free:
6086         bnxt_dev_uninit(eth_dev);
6087         return rc;
6088 }
6089
6090
6091 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
6092 {
6093         if (!ctx)
6094                 return;
6095
6096         if (ctx->va)
6097                 rte_free(ctx->va);
6098
6099         ctx->va = NULL;
6100         ctx->dma = RTE_BAD_IOVA;
6101         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
6102 }
6103
6104 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
6105 {
6106         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
6107                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
6108                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
6109                                   bp->flow_stat->max_fc,
6110                                   false);
6111
6112         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
6113                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
6114                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
6115                                   bp->flow_stat->max_fc,
6116                                   false);
6117
6118         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
6119                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
6120         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
6121
6122         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
6123                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
6124         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
6125
6126         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
6127                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
6128         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
6129
6130         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
6131                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
6132         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
6133 }
6134
6135 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
6136 {
6137         bnxt_unregister_fc_ctx_mem(bp);
6138
6139         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
6140         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
6141         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
6142         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
6143 }
6144
6145 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
6146 {
6147         if (BNXT_FLOW_XSTATS_EN(bp))
6148                 bnxt_uninit_fc_ctx_mem(bp);
6149 }
6150
6151 static void
6152 bnxt_free_error_recovery_info(struct bnxt *bp)
6153 {
6154         rte_free(bp->recovery_info);
6155         bp->recovery_info = NULL;
6156         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
6157 }
6158
6159 static void
6160 bnxt_uninit_locks(struct bnxt *bp)
6161 {
6162         pthread_mutex_destroy(&bp->flow_lock);
6163         pthread_mutex_destroy(&bp->def_cp_lock);
6164         pthread_mutex_destroy(&bp->health_check_lock);
6165         if (bp->rep_info) {
6166                 pthread_mutex_destroy(&bp->rep_info->vfr_lock);
6167                 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
6168         }
6169 }
6170
6171 static int
6172 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
6173 {
6174         int rc;
6175
6176         bnxt_free_int(bp);
6177         bnxt_free_mem(bp, reconfig_dev);
6178         bnxt_hwrm_func_buf_unrgtr(bp);
6179         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
6180         bp->flags &= ~BNXT_FLAG_REGISTERED;
6181         bnxt_free_ctx_mem(bp);
6182         if (!reconfig_dev) {
6183                 bnxt_free_hwrm_resources(bp);
6184                 bnxt_free_error_recovery_info(bp);
6185         }
6186
6187         bnxt_uninit_ctx_mem(bp);
6188
6189         bnxt_uninit_locks(bp);
6190         bnxt_free_flow_stats_info(bp);
6191         bnxt_free_rep_info(bp);
6192         rte_free(bp->ptp_cfg);
6193         bp->ptp_cfg = NULL;
6194         return rc;
6195 }
6196
6197 static int
6198 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
6199 {
6200         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6201                 return -EPERM;
6202
6203         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
6204
6205         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
6206                 bnxt_dev_close_op(eth_dev);
6207
6208         return 0;
6209 }
6210
6211 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
6212 {
6213         struct bnxt *bp = eth_dev->data->dev_private;
6214         struct rte_eth_dev *vf_rep_eth_dev;
6215         int ret = 0, i;
6216
6217         if (!bp)
6218                 return -EINVAL;
6219
6220         for (i = 0; i < bp->num_reps; i++) {
6221                 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
6222                 if (!vf_rep_eth_dev)
6223                         continue;
6224                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n",
6225                             vf_rep_eth_dev->data->port_id);
6226                 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit);
6227         }
6228         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n",
6229                     eth_dev->data->port_id);
6230         ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
6231
6232         return ret;
6233 }
6234
6235 static void bnxt_free_rep_info(struct bnxt *bp)
6236 {
6237         rte_free(bp->rep_info);
6238         bp->rep_info = NULL;
6239         rte_free(bp->cfa_code_map);
6240         bp->cfa_code_map = NULL;
6241 }
6242
6243 static int bnxt_init_rep_info(struct bnxt *bp)
6244 {
6245         int i = 0, rc;
6246
6247         if (bp->rep_info)
6248                 return 0;
6249
6250         bp->rep_info = rte_zmalloc("bnxt_rep_info",
6251                                    sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
6252                                    0);
6253         if (!bp->rep_info) {
6254                 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
6255                 return -ENOMEM;
6256         }
6257         bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
6258                                        sizeof(*bp->cfa_code_map) *
6259                                        BNXT_MAX_CFA_CODE, 0);
6260         if (!bp->cfa_code_map) {
6261                 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
6262                 bnxt_free_rep_info(bp);
6263                 return -ENOMEM;
6264         }
6265
6266         for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
6267                 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
6268
6269         rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
6270         if (rc) {
6271                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
6272                 bnxt_free_rep_info(bp);
6273                 return rc;
6274         }
6275
6276         rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
6277         if (rc) {
6278                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
6279                 bnxt_free_rep_info(bp);
6280                 return rc;
6281         }
6282
6283         return rc;
6284 }
6285
6286 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
6287                                struct rte_eth_devargs eth_da,
6288                                struct rte_eth_dev *backing_eth_dev,
6289                                const char *dev_args)
6290 {
6291         struct rte_eth_dev *vf_rep_eth_dev;
6292         char name[RTE_ETH_NAME_MAX_LEN];
6293         struct bnxt *backing_bp;
6294         uint16_t num_rep;
6295         int i, ret = 0;
6296         struct rte_kvargs *kvlist;
6297
6298         num_rep = eth_da.nb_representor_ports;
6299         if (num_rep > BNXT_MAX_VF_REPS) {
6300                 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
6301                             num_rep, BNXT_MAX_VF_REPS);
6302                 return -EINVAL;
6303         }
6304
6305         if (num_rep >= RTE_MAX_ETHPORTS) {
6306                 PMD_DRV_LOG(ERR,
6307                             "nb_representor_ports = %d > %d MAX ETHPORTS\n",
6308                             num_rep, RTE_MAX_ETHPORTS);
6309                 return -EINVAL;
6310         }
6311
6312         backing_bp = backing_eth_dev->data->dev_private;
6313
6314         if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
6315                 PMD_DRV_LOG(ERR,
6316                             "Not a PF or trusted VF. No Representor support\n");
6317                 /* Returning an error is not an option.
6318                  * Applications are not handling this correctly
6319                  */
6320                 return 0;
6321         }
6322
6323         if (bnxt_init_rep_info(backing_bp))
6324                 return 0;
6325
6326         for (i = 0; i < num_rep; i++) {
6327                 struct bnxt_representor representor = {
6328                         .vf_id = eth_da.representor_ports[i],
6329                         .switch_domain_id = backing_bp->switch_domain_id,
6330                         .parent_dev = backing_eth_dev
6331                 };
6332
6333                 if (representor.vf_id >= BNXT_MAX_VF_REPS) {
6334                         PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
6335                                     representor.vf_id, BNXT_MAX_VF_REPS);
6336                         continue;
6337                 }
6338
6339                 /* representor port net_bdf_port */
6340                 snprintf(name, sizeof(name), "net_%s_representor_%d",
6341                          pci_dev->device.name, eth_da.representor_ports[i]);
6342
6343                 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args);
6344                 if (kvlist) {
6345                         /*
6346                          * Handler for "rep_is_pf" devarg.
6347                          * Invoked as for ex: "-w 000:00:0d.0,
6348                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6349                          */
6350                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF,
6351                                            bnxt_parse_devarg_rep_is_pf,
6352                                            (void *)&representor);
6353                         /*
6354                          * Handler for "rep_based_pf" devarg.
6355                          * Invoked as for ex: "-w 000:00:0d.0,
6356                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6357                          */
6358                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_BASED_PF,
6359                                            bnxt_parse_devarg_rep_based_pf,
6360                                            (void *)&representor);
6361                         /*
6362                          * Handler for "rep_based_pf" devarg.
6363                          * Invoked as for ex: "-w 000:00:0d.0,
6364                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6365                          */
6366                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F,
6367                                            bnxt_parse_devarg_rep_q_r2f,
6368                                            (void *)&representor);
6369                         /*
6370                          * Handler for "rep_based_pf" devarg.
6371                          * Invoked as for ex: "-w 000:00:0d.0,
6372                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6373                          */
6374                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R,
6375                                            bnxt_parse_devarg_rep_q_f2r,
6376                                            (void *)&representor);
6377                         /*
6378                          * Handler for "rep_based_pf" devarg.
6379                          * Invoked as for ex: "-w 000:00:0d.0,
6380                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6381                          */
6382                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F,
6383                                            bnxt_parse_devarg_rep_fc_r2f,
6384                                            (void *)&representor);
6385                         /*
6386                          * Handler for "rep_based_pf" devarg.
6387                          * Invoked as for ex: "-w 000:00:0d.0,
6388                          * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
6389                          */
6390                         rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R,
6391                                            bnxt_parse_devarg_rep_fc_f2r,
6392                                            (void *)&representor);
6393                 }
6394
6395                 ret = rte_eth_dev_create(&pci_dev->device, name,
6396                                          sizeof(struct bnxt_representor),
6397                                          NULL, NULL,
6398                                          bnxt_representor_init,
6399                                          &representor);
6400                 if (ret) {
6401                         PMD_DRV_LOG(ERR, "failed to create bnxt vf "
6402                                     "representor %s.", name);
6403                         goto err;
6404                 }
6405
6406                 vf_rep_eth_dev = rte_eth_dev_allocated(name);
6407                 if (!vf_rep_eth_dev) {
6408                         PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
6409                                     " for VF-Rep: %s.", name);
6410                         ret = -ENODEV;
6411                         goto err;
6412                 }
6413
6414                 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n",
6415                             backing_eth_dev->data->port_id);
6416                 backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
6417                                                          vf_rep_eth_dev;
6418                 backing_bp->num_reps++;
6419
6420         }
6421
6422         return 0;
6423
6424 err:
6425         /* If num_rep > 1, then rollback already created
6426          * ports, since we'll be failing the probe anyway
6427          */
6428         if (num_rep > 1)
6429                 bnxt_pci_remove_dev_with_reps(backing_eth_dev);
6430
6431         return ret;
6432 }
6433
6434 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6435                           struct rte_pci_device *pci_dev)
6436 {
6437         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
6438         struct rte_eth_dev *backing_eth_dev;
6439         uint16_t num_rep;
6440         int ret = 0;
6441
6442         if (pci_dev->device.devargs) {
6443                 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
6444                                             &eth_da);
6445                 if (ret)
6446                         return ret;
6447         }
6448
6449         num_rep = eth_da.nb_representor_ports;
6450         PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
6451                     num_rep);
6452
6453         /* We could come here after first level of probe is already invoked
6454          * as part of an application bringup(OVS-DPDK vswitchd), so first check
6455          * for already allocated eth_dev for the backing device (PF/Trusted VF)
6456          */
6457         backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6458         if (backing_eth_dev == NULL) {
6459                 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
6460                                          sizeof(struct bnxt),
6461                                          eth_dev_pci_specific_init, pci_dev,
6462                                          bnxt_dev_init, NULL);
6463
6464                 if (ret || !num_rep)
6465                         return ret;
6466
6467                 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6468         }
6469         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n",
6470                     backing_eth_dev->data->port_id);
6471         /* probe representor ports now */
6472         ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev,
6473                                   pci_dev->device.devargs->args);
6474
6475         return ret;
6476 }
6477
6478 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
6479 {
6480         struct rte_eth_dev *eth_dev;
6481
6482         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6483         if (!eth_dev)
6484                 return 0; /* Invoked typically only by OVS-DPDK, by the
6485                            * time it comes here the eth_dev is already
6486                            * deleted by rte_eth_dev_close(), so returning
6487                            * +ve value will at least help in proper cleanup
6488                            */
6489
6490         PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id);
6491         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
6492                 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
6493                         return rte_eth_dev_destroy(eth_dev,
6494                                                    bnxt_representor_uninit);
6495                 else
6496                         return rte_eth_dev_destroy(eth_dev,
6497                                                    bnxt_dev_uninit);
6498         } else {
6499                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
6500         }
6501 }
6502
6503 static struct rte_pci_driver bnxt_rte_pmd = {
6504         .id_table = bnxt_pci_id_map,
6505         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
6506                         RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
6507                                                   * and OVS-DPDK
6508                                                   */
6509         .probe = bnxt_pci_probe,
6510         .remove = bnxt_pci_remove,
6511 };
6512
6513 static bool
6514 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
6515 {
6516         if (strcmp(dev->device->driver->name, drv->driver.name))
6517                 return false;
6518
6519         return true;
6520 }
6521
6522 bool is_bnxt_supported(struct rte_eth_dev *dev)
6523 {
6524         return is_device_supported(dev, &bnxt_rte_pmd);
6525 }
6526
6527 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
6528 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
6529 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
6530 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");