net/bnxt: parse representors along with other devargs
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_reps.h"
22 #include "bnxt_ring.h"
23 #include "bnxt_rxq.h"
24 #include "bnxt_rxr.h"
25 #include "bnxt_stats.h"
26 #include "bnxt_txq.h"
27 #include "bnxt_txr.h"
28 #include "bnxt_vnic.h"
29 #include "hsi_struct_def_dpdk.h"
30 #include "bnxt_nvm_defs.h"
31 #include "bnxt_tf_common.h"
32
33 #define DRV_MODULE_NAME         "bnxt"
34 static const char bnxt_version[] =
35         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
36
37 /*
38  * The set of PCI devices this driver supports
39  */
40 static const struct rte_pci_id bnxt_pci_id_map[] = {
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
42                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
43         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
44                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
94         { .vendor_id = 0, /* sentinel */ },
95 };
96
97 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
98 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
99 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
100 #define BNXT_DEVARG_REPRESENTOR "representor"
101
102 static const char *const bnxt_dev_args[] = {
103         BNXT_DEVARG_REPRESENTOR,
104         BNXT_DEVARG_TRUFLOW,
105         BNXT_DEVARG_FLOW_XSTAT,
106         BNXT_DEVARG_MAX_NUM_KFLOWS,
107         NULL
108 };
109
110 /*
111  * truflow == false to disable the feature
112  * truflow == true to enable the feature
113  */
114 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
115
116 /*
117  * flow_xstat == false to disable the feature
118  * flow_xstat == true to enable the feature
119  */
120 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
121
122 /*
123  * max_num_kflows must be >= 32
124  * and must be a power-of-2 supported value
125  * return: 1 -> invalid
126  *         0 -> valid
127  */
128 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
129 {
130         if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
131                 return 1;
132         return 0;
133 }
134
135 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
136 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
137 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
138 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
139 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
140 static int bnxt_restore_vlan_filters(struct bnxt *bp);
141 static void bnxt_dev_recover(void *arg);
142 static void bnxt_free_error_recovery_info(struct bnxt *bp);
143 static void bnxt_free_rep_info(struct bnxt *bp);
144
145 int is_bnxt_in_error(struct bnxt *bp)
146 {
147         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
148                 return -EIO;
149         if (bp->flags & BNXT_FLAG_FW_RESET)
150                 return -EBUSY;
151
152         return 0;
153 }
154
155 /***********************/
156
157 /*
158  * High level utility functions
159  */
160
161 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
162 {
163         if (!BNXT_CHIP_THOR(bp))
164                 return 1;
165
166         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
167                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
168                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
169 }
170
171 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
172 {
173         if (!BNXT_CHIP_THOR(bp))
174                 return HW_HASH_INDEX_SIZE;
175
176         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
177 }
178
179 static void bnxt_free_parent_info(struct bnxt *bp)
180 {
181         rte_free(bp->parent);
182 }
183
184 static void bnxt_free_pf_info(struct bnxt *bp)
185 {
186         rte_free(bp->pf);
187 }
188
189 static void bnxt_free_link_info(struct bnxt *bp)
190 {
191         rte_free(bp->link_info);
192 }
193
194 static void bnxt_free_leds_info(struct bnxt *bp)
195 {
196         rte_free(bp->leds);
197         bp->leds = NULL;
198 }
199
200 static void bnxt_free_flow_stats_info(struct bnxt *bp)
201 {
202         rte_free(bp->flow_stat);
203         bp->flow_stat = NULL;
204 }
205
206 static void bnxt_free_cos_queues(struct bnxt *bp)
207 {
208         rte_free(bp->rx_cos_queue);
209         rte_free(bp->tx_cos_queue);
210 }
211
212 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
213 {
214         bnxt_free_filter_mem(bp);
215         bnxt_free_vnic_attributes(bp);
216         bnxt_free_vnic_mem(bp);
217
218         /* tx/rx rings are configured as part of *_queue_setup callbacks.
219          * If the number of rings change across fw update,
220          * we don't have much choice except to warn the user.
221          */
222         if (!reconfig) {
223                 bnxt_free_stats(bp);
224                 bnxt_free_tx_rings(bp);
225                 bnxt_free_rx_rings(bp);
226         }
227         bnxt_free_async_cp_ring(bp);
228         bnxt_free_rxtx_nq_ring(bp);
229
230         rte_free(bp->grp_info);
231         bp->grp_info = NULL;
232 }
233
234 static int bnxt_alloc_parent_info(struct bnxt *bp)
235 {
236         bp->parent = rte_zmalloc("bnxt_parent_info",
237                                  sizeof(struct bnxt_parent_info), 0);
238         if (bp->parent == NULL)
239                 return -ENOMEM;
240
241         return 0;
242 }
243
244 static int bnxt_alloc_pf_info(struct bnxt *bp)
245 {
246         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
247         if (bp->pf == NULL)
248                 return -ENOMEM;
249
250         return 0;
251 }
252
253 static int bnxt_alloc_link_info(struct bnxt *bp)
254 {
255         bp->link_info =
256                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
257         if (bp->link_info == NULL)
258                 return -ENOMEM;
259
260         return 0;
261 }
262
263 static int bnxt_alloc_leds_info(struct bnxt *bp)
264 {
265         bp->leds = rte_zmalloc("bnxt_leds",
266                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
267                                0);
268         if (bp->leds == NULL)
269                 return -ENOMEM;
270
271         return 0;
272 }
273
274 static int bnxt_alloc_cos_queues(struct bnxt *bp)
275 {
276         bp->rx_cos_queue =
277                 rte_zmalloc("bnxt_rx_cosq",
278                             BNXT_COS_QUEUE_COUNT *
279                             sizeof(struct bnxt_cos_queue_info),
280                             0);
281         if (bp->rx_cos_queue == NULL)
282                 return -ENOMEM;
283
284         bp->tx_cos_queue =
285                 rte_zmalloc("bnxt_tx_cosq",
286                             BNXT_COS_QUEUE_COUNT *
287                             sizeof(struct bnxt_cos_queue_info),
288                             0);
289         if (bp->tx_cos_queue == NULL)
290                 return -ENOMEM;
291
292         return 0;
293 }
294
295 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
296 {
297         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
298                                     sizeof(struct bnxt_flow_stat_info), 0);
299         if (bp->flow_stat == NULL)
300                 return -ENOMEM;
301
302         return 0;
303 }
304
305 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
306 {
307         int rc;
308
309         rc = bnxt_alloc_ring_grps(bp);
310         if (rc)
311                 goto alloc_mem_err;
312
313         rc = bnxt_alloc_async_ring_struct(bp);
314         if (rc)
315                 goto alloc_mem_err;
316
317         rc = bnxt_alloc_vnic_mem(bp);
318         if (rc)
319                 goto alloc_mem_err;
320
321         rc = bnxt_alloc_vnic_attributes(bp);
322         if (rc)
323                 goto alloc_mem_err;
324
325         rc = bnxt_alloc_filter_mem(bp);
326         if (rc)
327                 goto alloc_mem_err;
328
329         rc = bnxt_alloc_async_cp_ring(bp);
330         if (rc)
331                 goto alloc_mem_err;
332
333         rc = bnxt_alloc_rxtx_nq_ring(bp);
334         if (rc)
335                 goto alloc_mem_err;
336
337         if (BNXT_FLOW_XSTATS_EN(bp)) {
338                 rc = bnxt_alloc_flow_stats_info(bp);
339                 if (rc)
340                         goto alloc_mem_err;
341         }
342
343         return 0;
344
345 alloc_mem_err:
346         bnxt_free_mem(bp, reconfig);
347         return rc;
348 }
349
350 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
351 {
352         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
353         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
354         uint64_t rx_offloads = dev_conf->rxmode.offloads;
355         struct bnxt_rx_queue *rxq;
356         unsigned int j;
357         int rc;
358
359         rc = bnxt_vnic_grp_alloc(bp, vnic);
360         if (rc)
361                 goto err_out;
362
363         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
364                     vnic_id, vnic, vnic->fw_grp_ids);
365
366         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
367         if (rc)
368                 goto err_out;
369
370         /* Alloc RSS context only if RSS mode is enabled */
371         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
372                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
373
374                 rc = 0;
375                 for (j = 0; j < nr_ctxs; j++) {
376                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
377                         if (rc)
378                                 break;
379                 }
380                 if (rc) {
381                         PMD_DRV_LOG(ERR,
382                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
383                                     vnic_id, j, rc);
384                         goto err_out;
385                 }
386                 vnic->num_lb_ctxts = nr_ctxs;
387         }
388
389         /*
390          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
391          * setting is not available at this time, it will not be
392          * configured correctly in the CFA.
393          */
394         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
395                 vnic->vlan_strip = true;
396         else
397                 vnic->vlan_strip = false;
398
399         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
400         if (rc)
401                 goto err_out;
402
403         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
404         if (rc)
405                 goto err_out;
406
407         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
408                 rxq = bp->eth_dev->data->rx_queues[j];
409
410                 PMD_DRV_LOG(DEBUG,
411                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
412                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
413
414                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
415                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
416                 else
417                         vnic->rx_queue_cnt++;
418         }
419
420         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
421
422         rc = bnxt_vnic_rss_configure(bp, vnic);
423         if (rc)
424                 goto err_out;
425
426         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
427
428         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
429                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
430         else
431                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
432
433         return 0;
434 err_out:
435         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
436                     vnic_id, rc);
437         return rc;
438 }
439
440 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
441 {
442         int rc = 0;
443
444         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
445                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
446         if (rc)
447                 return rc;
448
449         PMD_DRV_LOG(DEBUG,
450                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
451                     " rx_fc_in_tbl.ctx_id = %d\n",
452                     bp->flow_stat->rx_fc_in_tbl.va,
453                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
454                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
455
456         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
457                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
458         if (rc)
459                 return rc;
460
461         PMD_DRV_LOG(DEBUG,
462                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
463                     " rx_fc_out_tbl.ctx_id = %d\n",
464                     bp->flow_stat->rx_fc_out_tbl.va,
465                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
466                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
467
468         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
469                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
470         if (rc)
471                 return rc;
472
473         PMD_DRV_LOG(DEBUG,
474                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
475                     " tx_fc_in_tbl.ctx_id = %d\n",
476                     bp->flow_stat->tx_fc_in_tbl.va,
477                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
478                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
479
480         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
481                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
482         if (rc)
483                 return rc;
484
485         PMD_DRV_LOG(DEBUG,
486                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
487                     " tx_fc_out_tbl.ctx_id = %d\n",
488                     bp->flow_stat->tx_fc_out_tbl.va,
489                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
490                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
491
492         memset(bp->flow_stat->rx_fc_out_tbl.va,
493                0,
494                bp->flow_stat->rx_fc_out_tbl.size);
495         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
496                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
497                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
498                                        bp->flow_stat->max_fc,
499                                        true);
500         if (rc)
501                 return rc;
502
503         memset(bp->flow_stat->tx_fc_out_tbl.va,
504                0,
505                bp->flow_stat->tx_fc_out_tbl.size);
506         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
507                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
508                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
509                                        bp->flow_stat->max_fc,
510                                        true);
511
512         return rc;
513 }
514
515 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
516                                   struct bnxt_ctx_mem_buf_info *ctx)
517 {
518         if (!ctx)
519                 return -EINVAL;
520
521         ctx->va = rte_zmalloc(type, size, 0);
522         if (ctx->va == NULL)
523                 return -ENOMEM;
524         rte_mem_lock_page(ctx->va);
525         ctx->size = size;
526         ctx->dma = rte_mem_virt2iova(ctx->va);
527         if (ctx->dma == RTE_BAD_IOVA)
528                 return -ENOMEM;
529
530         return 0;
531 }
532
533 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
534 {
535         struct rte_pci_device *pdev = bp->pdev;
536         char type[RTE_MEMZONE_NAMESIZE];
537         uint16_t max_fc;
538         int rc = 0;
539
540         max_fc = bp->flow_stat->max_fc;
541
542         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
543                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
544         /* 4 bytes for each counter-id */
545         rc = bnxt_alloc_ctx_mem_buf(type,
546                                     max_fc * 4,
547                                     &bp->flow_stat->rx_fc_in_tbl);
548         if (rc)
549                 return rc;
550
551         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
552                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
553         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
554         rc = bnxt_alloc_ctx_mem_buf(type,
555                                     max_fc * 16,
556                                     &bp->flow_stat->rx_fc_out_tbl);
557         if (rc)
558                 return rc;
559
560         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
561                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
562         /* 4 bytes for each counter-id */
563         rc = bnxt_alloc_ctx_mem_buf(type,
564                                     max_fc * 4,
565                                     &bp->flow_stat->tx_fc_in_tbl);
566         if (rc)
567                 return rc;
568
569         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
570                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
571         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
572         rc = bnxt_alloc_ctx_mem_buf(type,
573                                     max_fc * 16,
574                                     &bp->flow_stat->tx_fc_out_tbl);
575         if (rc)
576                 return rc;
577
578         rc = bnxt_register_fc_ctx_mem(bp);
579
580         return rc;
581 }
582
583 static int bnxt_init_ctx_mem(struct bnxt *bp)
584 {
585         int rc = 0;
586
587         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
588             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
589             !BNXT_FLOW_XSTATS_EN(bp))
590                 return 0;
591
592         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
593         if (rc)
594                 return rc;
595
596         rc = bnxt_init_fc_ctx_mem(bp);
597
598         return rc;
599 }
600
601 static int bnxt_init_chip(struct bnxt *bp)
602 {
603         struct rte_eth_link new;
604         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
605         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
606         uint32_t intr_vector = 0;
607         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
608         uint32_t vec = BNXT_MISC_VEC_ID;
609         unsigned int i, j;
610         int rc;
611
612         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
613                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
614                         DEV_RX_OFFLOAD_JUMBO_FRAME;
615                 bp->flags |= BNXT_FLAG_JUMBO;
616         } else {
617                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
618                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
619                 bp->flags &= ~BNXT_FLAG_JUMBO;
620         }
621
622         /* THOR does not support ring groups.
623          * But we will use the array to save RSS context IDs.
624          */
625         if (BNXT_CHIP_THOR(bp))
626                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
627
628         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
629         if (rc) {
630                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
631                 goto err_out;
632         }
633
634         rc = bnxt_alloc_hwrm_rings(bp);
635         if (rc) {
636                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
637                 goto err_out;
638         }
639
640         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
641         if (rc) {
642                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
643                 goto err_out;
644         }
645
646         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
647                 goto skip_cosq_cfg;
648
649         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
650                 if (bp->rx_cos_queue[i].id != 0xff) {
651                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
652
653                         if (!vnic) {
654                                 PMD_DRV_LOG(ERR,
655                                             "Num pools more than FW profile\n");
656                                 rc = -EINVAL;
657                                 goto err_out;
658                         }
659                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
660                         bp->rx_cosq_cnt++;
661                 }
662         }
663
664 skip_cosq_cfg:
665         rc = bnxt_mq_rx_configure(bp);
666         if (rc) {
667                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
668                 goto err_out;
669         }
670
671         /* VNIC configuration */
672         for (i = 0; i < bp->nr_vnics; i++) {
673                 rc = bnxt_setup_one_vnic(bp, i);
674                 if (rc)
675                         goto err_out;
676         }
677
678         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
679         if (rc) {
680                 PMD_DRV_LOG(ERR,
681                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
682                 goto err_out;
683         }
684
685         /* check and configure queue intr-vector mapping */
686         if ((rte_intr_cap_multiple(intr_handle) ||
687              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
688             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
689                 intr_vector = bp->eth_dev->data->nb_rx_queues;
690                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
691                 if (intr_vector > bp->rx_cp_nr_rings) {
692                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
693                                         bp->rx_cp_nr_rings);
694                         return -ENOTSUP;
695                 }
696                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
697                 if (rc)
698                         return rc;
699         }
700
701         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
702                 intr_handle->intr_vec =
703                         rte_zmalloc("intr_vec",
704                                     bp->eth_dev->data->nb_rx_queues *
705                                     sizeof(int), 0);
706                 if (intr_handle->intr_vec == NULL) {
707                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
708                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
709                         rc = -ENOMEM;
710                         goto err_disable;
711                 }
712                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
713                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
714                          intr_handle->intr_vec, intr_handle->nb_efd,
715                         intr_handle->max_intr);
716                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
717                      queue_id++) {
718                         intr_handle->intr_vec[queue_id] =
719                                                         vec + BNXT_RX_VEC_START;
720                         if (vec < base + intr_handle->nb_efd - 1)
721                                 vec++;
722                 }
723         }
724
725         /* enable uio/vfio intr/eventfd mapping */
726         rc = rte_intr_enable(intr_handle);
727 #ifndef RTE_EXEC_ENV_FREEBSD
728         /* In FreeBSD OS, nic_uio driver does not support interrupts */
729         if (rc)
730                 goto err_free;
731 #endif
732
733         rc = bnxt_get_hwrm_link_config(bp, &new);
734         if (rc) {
735                 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
736                 goto err_free;
737         }
738
739         if (!bp->link_info->link_up) {
740                 rc = bnxt_set_hwrm_link_config(bp, true);
741                 if (rc) {
742                         PMD_DRV_LOG(ERR,
743                                 "HWRM link config failure rc: %x\n", rc);
744                         goto err_free;
745                 }
746         }
747         bnxt_print_link_info(bp->eth_dev);
748
749         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
750         if (!bp->mark_table)
751                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
752
753         return 0;
754
755 err_free:
756         rte_free(intr_handle->intr_vec);
757 err_disable:
758         rte_intr_efd_disable(intr_handle);
759 err_out:
760         /* Some of the error status returned by FW may not be from errno.h */
761         if (rc > 0)
762                 rc = -EIO;
763
764         return rc;
765 }
766
767 static int bnxt_shutdown_nic(struct bnxt *bp)
768 {
769         bnxt_free_all_hwrm_resources(bp);
770         bnxt_free_all_filters(bp);
771         bnxt_free_all_vnics(bp);
772         return 0;
773 }
774
775 /*
776  * Device configuration and status function
777  */
778
779 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
780 {
781         uint32_t link_speed = bp->link_info->support_speeds;
782         uint32_t speed_capa = 0;
783
784         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
785                 speed_capa |= ETH_LINK_SPEED_100M;
786         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
787                 speed_capa |= ETH_LINK_SPEED_100M_HD;
788         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
789                 speed_capa |= ETH_LINK_SPEED_1G;
790         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
791                 speed_capa |= ETH_LINK_SPEED_2_5G;
792         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
793                 speed_capa |= ETH_LINK_SPEED_10G;
794         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
795                 speed_capa |= ETH_LINK_SPEED_20G;
796         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
797                 speed_capa |= ETH_LINK_SPEED_25G;
798         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
799                 speed_capa |= ETH_LINK_SPEED_40G;
800         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
801                 speed_capa |= ETH_LINK_SPEED_50G;
802         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
803                 speed_capa |= ETH_LINK_SPEED_100G;
804         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
805                 speed_capa |= ETH_LINK_SPEED_200G;
806
807         if (bp->link_info->auto_mode ==
808             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
809                 speed_capa |= ETH_LINK_SPEED_FIXED;
810         else
811                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
812
813         return speed_capa;
814 }
815
816 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
817                                 struct rte_eth_dev_info *dev_info)
818 {
819         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
820         struct bnxt *bp = eth_dev->data->dev_private;
821         uint16_t max_vnics, i, j, vpool, vrxq;
822         unsigned int max_rx_rings;
823         int rc;
824
825         rc = is_bnxt_in_error(bp);
826         if (rc)
827                 return rc;
828
829         /* MAC Specifics */
830         dev_info->max_mac_addrs = bp->max_l2_ctx;
831         dev_info->max_hash_mac_addrs = 0;
832
833         /* PF/VF specifics */
834         if (BNXT_PF(bp))
835                 dev_info->max_vfs = pdev->max_vfs;
836
837         max_rx_rings = BNXT_MAX_RINGS(bp);
838         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
839         dev_info->max_rx_queues = max_rx_rings;
840         dev_info->max_tx_queues = max_rx_rings;
841         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
842         dev_info->hash_key_size = 40;
843         max_vnics = bp->max_vnics;
844
845         /* MTU specifics */
846         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
847         dev_info->max_mtu = BNXT_MAX_MTU;
848
849         /* Fast path specifics */
850         dev_info->min_rx_bufsize = 1;
851         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
852
853         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
854         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
855                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
856         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
857         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
858
859         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
860
861         /* *INDENT-OFF* */
862         dev_info->default_rxconf = (struct rte_eth_rxconf) {
863                 .rx_thresh = {
864                         .pthresh = 8,
865                         .hthresh = 8,
866                         .wthresh = 0,
867                 },
868                 .rx_free_thresh = 32,
869                 /* If no descriptors available, pkts are dropped by default */
870                 .rx_drop_en = 1,
871         };
872
873         dev_info->default_txconf = (struct rte_eth_txconf) {
874                 .tx_thresh = {
875                         .pthresh = 32,
876                         .hthresh = 0,
877                         .wthresh = 0,
878                 },
879                 .tx_free_thresh = 32,
880                 .tx_rs_thresh = 32,
881         };
882         eth_dev->data->dev_conf.intr_conf.lsc = 1;
883
884         eth_dev->data->dev_conf.intr_conf.rxq = 1;
885         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
886         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
887         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
888         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
889
890         /* *INDENT-ON* */
891
892         /*
893          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
894          *       need further investigation.
895          */
896
897         /* VMDq resources */
898         vpool = 64; /* ETH_64_POOLS */
899         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
900         for (i = 0; i < 4; vpool >>= 1, i++) {
901                 if (max_vnics > vpool) {
902                         for (j = 0; j < 5; vrxq >>= 1, j++) {
903                                 if (dev_info->max_rx_queues > vrxq) {
904                                         if (vpool > vrxq)
905                                                 vpool = vrxq;
906                                         goto found;
907                                 }
908                         }
909                         /* Not enough resources to support VMDq */
910                         break;
911                 }
912         }
913         /* Not enough resources to support VMDq */
914         vpool = 0;
915         vrxq = 0;
916 found:
917         dev_info->max_vmdq_pools = vpool;
918         dev_info->vmdq_queue_num = vrxq;
919
920         dev_info->vmdq_pool_base = 0;
921         dev_info->vmdq_queue_base = 0;
922
923         return 0;
924 }
925
926 /* Configure the device based on the configuration provided */
927 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
928 {
929         struct bnxt *bp = eth_dev->data->dev_private;
930         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
931         int rc;
932
933         bp->rx_queues = (void *)eth_dev->data->rx_queues;
934         bp->tx_queues = (void *)eth_dev->data->tx_queues;
935         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
936         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
937
938         rc = is_bnxt_in_error(bp);
939         if (rc)
940                 return rc;
941
942         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
943                 rc = bnxt_hwrm_check_vf_rings(bp);
944                 if (rc) {
945                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
946                         return -ENOSPC;
947                 }
948
949                 /* If a resource has already been allocated - in this case
950                  * it is the async completion ring, free it. Reallocate it after
951                  * resource reservation. This will ensure the resource counts
952                  * are calculated correctly.
953                  */
954
955                 pthread_mutex_lock(&bp->def_cp_lock);
956
957                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
958                         bnxt_disable_int(bp);
959                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
960                 }
961
962                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
963                 if (rc) {
964                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
965                         pthread_mutex_unlock(&bp->def_cp_lock);
966                         return -ENOSPC;
967                 }
968
969                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
970                         rc = bnxt_alloc_async_cp_ring(bp);
971                         if (rc) {
972                                 pthread_mutex_unlock(&bp->def_cp_lock);
973                                 return rc;
974                         }
975                         bnxt_enable_int(bp);
976                 }
977
978                 pthread_mutex_unlock(&bp->def_cp_lock);
979         } else {
980                 /* legacy driver needs to get updated values */
981                 rc = bnxt_hwrm_func_qcaps(bp);
982                 if (rc) {
983                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
984                         return rc;
985                 }
986         }
987
988         /* Inherit new configurations */
989         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
990             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
991             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
992                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
993             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
994             bp->max_stat_ctx)
995                 goto resource_error;
996
997         if (BNXT_HAS_RING_GRPS(bp) &&
998             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
999                 goto resource_error;
1000
1001         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1002             bp->max_vnics < eth_dev->data->nb_rx_queues)
1003                 goto resource_error;
1004
1005         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1006         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1007
1008         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1009                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1010         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1011
1012         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1013                 eth_dev->data->mtu =
1014                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1015                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1016                         BNXT_NUM_VLANS;
1017                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1018         }
1019         return 0;
1020
1021 resource_error:
1022         PMD_DRV_LOG(ERR,
1023                     "Insufficient resources to support requested config\n");
1024         PMD_DRV_LOG(ERR,
1025                     "Num Queues Requested: Tx %d, Rx %d\n",
1026                     eth_dev->data->nb_tx_queues,
1027                     eth_dev->data->nb_rx_queues);
1028         PMD_DRV_LOG(ERR,
1029                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1030                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1031                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1032         return -ENOSPC;
1033 }
1034
1035 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1036 {
1037         struct rte_eth_link *link = &eth_dev->data->dev_link;
1038
1039         if (link->link_status)
1040                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1041                         eth_dev->data->port_id,
1042                         (uint32_t)link->link_speed,
1043                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1044                         ("full-duplex") : ("half-duplex\n"));
1045         else
1046                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1047                         eth_dev->data->port_id);
1048 }
1049
1050 /*
1051  * Determine whether the current configuration requires support for scattered
1052  * receive; return 1 if scattered receive is required and 0 if not.
1053  */
1054 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1055 {
1056         uint16_t buf_size;
1057         int i;
1058
1059         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1060                 return 1;
1061
1062         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1063                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1064
1065                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1066                                       RTE_PKTMBUF_HEADROOM);
1067                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1068                         return 1;
1069         }
1070         return 0;
1071 }
1072
1073 static eth_rx_burst_t
1074 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1075 {
1076         struct bnxt *bp = eth_dev->data->dev_private;
1077
1078 #ifdef RTE_ARCH_X86
1079 #ifndef RTE_LIBRTE_IEEE1588
1080         /*
1081          * Vector mode receive can be enabled only if scatter rx is not
1082          * in use and rx offloads are limited to VLAN stripping and
1083          * CRC stripping.
1084          */
1085         if (!eth_dev->data->scattered_rx &&
1086             !(eth_dev->data->dev_conf.rxmode.offloads &
1087               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1088                 DEV_RX_OFFLOAD_KEEP_CRC |
1089                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1090                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1091                 DEV_RX_OFFLOAD_UDP_CKSUM |
1092                 DEV_RX_OFFLOAD_TCP_CKSUM |
1093                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1094                 DEV_RX_OFFLOAD_RSS_HASH |
1095                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1096             !BNXT_TRUFLOW_EN(bp)) {
1097                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1098                             eth_dev->data->port_id);
1099                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1100                 return bnxt_recv_pkts_vec;
1101         }
1102         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1103                     eth_dev->data->port_id);
1104         PMD_DRV_LOG(INFO,
1105                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1106                     eth_dev->data->port_id,
1107                     eth_dev->data->scattered_rx,
1108                     eth_dev->data->dev_conf.rxmode.offloads);
1109 #endif
1110 #endif
1111         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1112         return bnxt_recv_pkts;
1113 }
1114
1115 static eth_tx_burst_t
1116 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1117 {
1118 #ifdef RTE_ARCH_X86
1119 #ifndef RTE_LIBRTE_IEEE1588
1120         struct bnxt *bp = eth_dev->data->dev_private;
1121
1122         /*
1123          * Vector mode transmit can be enabled only if not using scatter rx
1124          * or tx offloads.
1125          */
1126         if (!eth_dev->data->scattered_rx &&
1127             !eth_dev->data->dev_conf.txmode.offloads &&
1128             !BNXT_TRUFLOW_EN(bp)) {
1129                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1130                             eth_dev->data->port_id);
1131                 return bnxt_xmit_pkts_vec;
1132         }
1133         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1134                     eth_dev->data->port_id);
1135         PMD_DRV_LOG(INFO,
1136                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1137                     eth_dev->data->port_id,
1138                     eth_dev->data->scattered_rx,
1139                     eth_dev->data->dev_conf.txmode.offloads);
1140 #endif
1141 #endif
1142         return bnxt_xmit_pkts;
1143 }
1144
1145 static int bnxt_handle_if_change_status(struct bnxt *bp)
1146 {
1147         int rc;
1148
1149         /* Since fw has undergone a reset and lost all contexts,
1150          * set fatal flag to not issue hwrm during cleanup
1151          */
1152         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1153         bnxt_uninit_resources(bp, true);
1154
1155         /* clear fatal flag so that re-init happens */
1156         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1157         rc = bnxt_init_resources(bp, true);
1158
1159         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1160
1161         return rc;
1162 }
1163
1164 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1165 {
1166         struct bnxt *bp = eth_dev->data->dev_private;
1167         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1168         int vlan_mask = 0;
1169         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1170
1171         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1172                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1173                 return -EINVAL;
1174         }
1175
1176         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1177                 PMD_DRV_LOG(ERR,
1178                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1179                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1180         }
1181
1182         do {
1183                 rc = bnxt_hwrm_if_change(bp, true);
1184                 if (rc == 0 || rc != -EAGAIN)
1185                         break;
1186
1187                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1188         } while (retry_cnt--);
1189
1190         if (rc)
1191                 return rc;
1192
1193         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1194                 rc = bnxt_handle_if_change_status(bp);
1195                 if (rc)
1196                         return rc;
1197         }
1198
1199         bnxt_enable_int(bp);
1200
1201         rc = bnxt_init_chip(bp);
1202         if (rc)
1203                 goto error;
1204
1205         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1206         eth_dev->data->dev_started = 1;
1207
1208         bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1209
1210         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1211                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1212         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1213                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1214         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1215         if (rc)
1216                 goto error;
1217
1218         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1219         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1220
1221         pthread_mutex_lock(&bp->def_cp_lock);
1222         bnxt_schedule_fw_health_check(bp);
1223         pthread_mutex_unlock(&bp->def_cp_lock);
1224
1225         if (BNXT_TRUFLOW_EN(bp))
1226                 bnxt_ulp_init(bp);
1227
1228         return 0;
1229
1230 error:
1231         bnxt_shutdown_nic(bp);
1232         bnxt_free_tx_mbufs(bp);
1233         bnxt_free_rx_mbufs(bp);
1234         bnxt_hwrm_if_change(bp, false);
1235         eth_dev->data->dev_started = 0;
1236         return rc;
1237 }
1238
1239 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1240 {
1241         struct bnxt *bp = eth_dev->data->dev_private;
1242         int rc = 0;
1243
1244         if (!bp->link_info->link_up)
1245                 rc = bnxt_set_hwrm_link_config(bp, true);
1246         if (!rc)
1247                 eth_dev->data->dev_link.link_status = 1;
1248
1249         bnxt_print_link_info(eth_dev);
1250         return rc;
1251 }
1252
1253 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1254 {
1255         struct bnxt *bp = eth_dev->data->dev_private;
1256
1257         eth_dev->data->dev_link.link_status = 0;
1258         bnxt_set_hwrm_link_config(bp, false);
1259         bp->link_info->link_up = 0;
1260
1261         return 0;
1262 }
1263
1264 static void bnxt_free_switch_domain(struct bnxt *bp)
1265 {
1266         if (bp->switch_domain_id)
1267                 rte_eth_switch_domain_free(bp->switch_domain_id);
1268 }
1269
1270 /* Unload the driver, release resources */
1271 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1272 {
1273         struct bnxt *bp = eth_dev->data->dev_private;
1274         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1275         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1276
1277         if (BNXT_TRUFLOW_EN(bp))
1278                 bnxt_ulp_deinit(bp);
1279
1280         eth_dev->data->dev_started = 0;
1281         /* Prevent crashes when queues are still in use */
1282         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1283         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1284
1285         bnxt_disable_int(bp);
1286
1287         /* disable uio/vfio intr/eventfd mapping */
1288         rte_intr_disable(intr_handle);
1289
1290         bnxt_cancel_fw_health_check(bp);
1291
1292         bnxt_dev_set_link_down_op(eth_dev);
1293
1294         /* Wait for link to be reset and the async notification to process.
1295          * During reset recovery, there is no need to wait and
1296          * VF/NPAR functions do not have privilege to change PHY config.
1297          */
1298         if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1299                 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1300
1301         /* Clean queue intr-vector mapping */
1302         rte_intr_efd_disable(intr_handle);
1303         if (intr_handle->intr_vec != NULL) {
1304                 rte_free(intr_handle->intr_vec);
1305                 intr_handle->intr_vec = NULL;
1306         }
1307
1308         bnxt_hwrm_port_clr_stats(bp);
1309         bnxt_free_tx_mbufs(bp);
1310         bnxt_free_rx_mbufs(bp);
1311         /* Process any remaining notifications in default completion queue */
1312         bnxt_int_handler(eth_dev);
1313         bnxt_shutdown_nic(bp);
1314         bnxt_hwrm_if_change(bp, false);
1315
1316         rte_free(bp->mark_table);
1317         bp->mark_table = NULL;
1318
1319         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1320         bp->rx_cosq_cnt = 0;
1321         /* All filters are deleted on a port stop. */
1322         if (BNXT_FLOW_XSTATS_EN(bp))
1323                 bp->flow_stat->flow_count = 0;
1324 }
1325
1326 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1327 {
1328         struct bnxt *bp = eth_dev->data->dev_private;
1329
1330         /* cancel the recovery handler before remove dev */
1331         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1332         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1333         bnxt_cancel_fc_thread(bp);
1334
1335         if (eth_dev->data->dev_started)
1336                 bnxt_dev_stop_op(eth_dev);
1337
1338         bnxt_free_switch_domain(bp);
1339
1340         bnxt_uninit_resources(bp, false);
1341
1342         bnxt_free_leds_info(bp);
1343         bnxt_free_cos_queues(bp);
1344         bnxt_free_link_info(bp);
1345         bnxt_free_pf_info(bp);
1346         bnxt_free_parent_info(bp);
1347
1348         eth_dev->dev_ops = NULL;
1349         eth_dev->rx_pkt_burst = NULL;
1350         eth_dev->tx_pkt_burst = NULL;
1351
1352         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1353         bp->tx_mem_zone = NULL;
1354         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1355         bp->rx_mem_zone = NULL;
1356
1357         rte_free(bp->pf->vf_info);
1358         bp->pf->vf_info = NULL;
1359
1360         rte_free(bp->grp_info);
1361         bp->grp_info = NULL;
1362 }
1363
1364 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1365                                     uint32_t index)
1366 {
1367         struct bnxt *bp = eth_dev->data->dev_private;
1368         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1369         struct bnxt_vnic_info *vnic;
1370         struct bnxt_filter_info *filter, *temp_filter;
1371         uint32_t i;
1372
1373         if (is_bnxt_in_error(bp))
1374                 return;
1375
1376         /*
1377          * Loop through all VNICs from the specified filter flow pools to
1378          * remove the corresponding MAC addr filter
1379          */
1380         for (i = 0; i < bp->nr_vnics; i++) {
1381                 if (!(pool_mask & (1ULL << i)))
1382                         continue;
1383
1384                 vnic = &bp->vnic_info[i];
1385                 filter = STAILQ_FIRST(&vnic->filter);
1386                 while (filter) {
1387                         temp_filter = STAILQ_NEXT(filter, next);
1388                         if (filter->mac_index == index) {
1389                                 STAILQ_REMOVE(&vnic->filter, filter,
1390                                                 bnxt_filter_info, next);
1391                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1392                                 bnxt_free_filter(bp, filter);
1393                         }
1394                         filter = temp_filter;
1395                 }
1396         }
1397 }
1398
1399 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1400                                struct rte_ether_addr *mac_addr, uint32_t index,
1401                                uint32_t pool)
1402 {
1403         struct bnxt_filter_info *filter;
1404         int rc = 0;
1405
1406         /* Attach requested MAC address to the new l2_filter */
1407         STAILQ_FOREACH(filter, &vnic->filter, next) {
1408                 if (filter->mac_index == index) {
1409                         PMD_DRV_LOG(DEBUG,
1410                                     "MAC addr already existed for pool %d\n",
1411                                     pool);
1412                         return 0;
1413                 }
1414         }
1415
1416         filter = bnxt_alloc_filter(bp);
1417         if (!filter) {
1418                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1419                 return -ENODEV;
1420         }
1421
1422         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1423          * if the MAC that's been programmed now is a different one, then,
1424          * copy that addr to filter->l2_addr
1425          */
1426         if (mac_addr)
1427                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1428         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1429
1430         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1431         if (!rc) {
1432                 filter->mac_index = index;
1433                 if (filter->mac_index == 0)
1434                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1435                 else
1436                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1437         } else {
1438                 bnxt_free_filter(bp, filter);
1439         }
1440
1441         return rc;
1442 }
1443
1444 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1445                                 struct rte_ether_addr *mac_addr,
1446                                 uint32_t index, uint32_t pool)
1447 {
1448         struct bnxt *bp = eth_dev->data->dev_private;
1449         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1450         int rc = 0;
1451
1452         rc = is_bnxt_in_error(bp);
1453         if (rc)
1454                 return rc;
1455
1456         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1457                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1458                 return -ENOTSUP;
1459         }
1460
1461         if (!vnic) {
1462                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1463                 return -EINVAL;
1464         }
1465
1466         /* Filter settings will get applied when port is started */
1467         if (!eth_dev->data->dev_started)
1468                 return 0;
1469
1470         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1471
1472         return rc;
1473 }
1474
1475 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1476                      bool exp_link_status)
1477 {
1478         int rc = 0;
1479         struct bnxt *bp = eth_dev->data->dev_private;
1480         struct rte_eth_link new;
1481         int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1482                   BNXT_LINK_DOWN_WAIT_CNT;
1483
1484         rc = is_bnxt_in_error(bp);
1485         if (rc)
1486                 return rc;
1487
1488         memset(&new, 0, sizeof(new));
1489         do {
1490                 /* Retrieve link info from hardware */
1491                 rc = bnxt_get_hwrm_link_config(bp, &new);
1492                 if (rc) {
1493                         new.link_speed = ETH_LINK_SPEED_100M;
1494                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1495                         PMD_DRV_LOG(ERR,
1496                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1497                         goto out;
1498                 }
1499
1500                 if (!wait_to_complete || new.link_status == exp_link_status)
1501                         break;
1502
1503                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1504         } while (cnt--);
1505
1506 out:
1507         /* Timed out or success */
1508         if (new.link_status != eth_dev->data->dev_link.link_status ||
1509         new.link_speed != eth_dev->data->dev_link.link_speed) {
1510                 rte_eth_linkstatus_set(eth_dev, &new);
1511
1512                 _rte_eth_dev_callback_process(eth_dev,
1513                                               RTE_ETH_EVENT_INTR_LSC,
1514                                               NULL);
1515
1516                 bnxt_print_link_info(eth_dev);
1517         }
1518
1519         return rc;
1520 }
1521
1522 int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1523                         int wait_to_complete)
1524 {
1525         return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1526 }
1527
1528 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1529 {
1530         struct bnxt *bp = eth_dev->data->dev_private;
1531         struct bnxt_vnic_info *vnic;
1532         uint32_t old_flags;
1533         int rc;
1534
1535         rc = is_bnxt_in_error(bp);
1536         if (rc)
1537                 return rc;
1538
1539         /* Filter settings will get applied when port is started */
1540         if (!eth_dev->data->dev_started)
1541                 return 0;
1542
1543         if (bp->vnic_info == NULL)
1544                 return 0;
1545
1546         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1547
1548         old_flags = vnic->flags;
1549         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1550         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1551         if (rc != 0)
1552                 vnic->flags = old_flags;
1553
1554         return rc;
1555 }
1556
1557 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1558 {
1559         struct bnxt *bp = eth_dev->data->dev_private;
1560         struct bnxt_vnic_info *vnic;
1561         uint32_t old_flags;
1562         int rc;
1563
1564         rc = is_bnxt_in_error(bp);
1565         if (rc)
1566                 return rc;
1567
1568         /* Filter settings will get applied when port is started */
1569         if (!eth_dev->data->dev_started)
1570                 return 0;
1571
1572         if (bp->vnic_info == NULL)
1573                 return 0;
1574
1575         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1576
1577         old_flags = vnic->flags;
1578         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1579         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1580         if (rc != 0)
1581                 vnic->flags = old_flags;
1582
1583         return rc;
1584 }
1585
1586 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1587 {
1588         struct bnxt *bp = eth_dev->data->dev_private;
1589         struct bnxt_vnic_info *vnic;
1590         uint32_t old_flags;
1591         int rc;
1592
1593         rc = is_bnxt_in_error(bp);
1594         if (rc)
1595                 return rc;
1596
1597         /* Filter settings will get applied when port is started */
1598         if (!eth_dev->data->dev_started)
1599                 return 0;
1600
1601         if (bp->vnic_info == NULL)
1602                 return 0;
1603
1604         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1605
1606         old_flags = vnic->flags;
1607         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1608         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1609         if (rc != 0)
1610                 vnic->flags = old_flags;
1611
1612         return rc;
1613 }
1614
1615 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1616 {
1617         struct bnxt *bp = eth_dev->data->dev_private;
1618         struct bnxt_vnic_info *vnic;
1619         uint32_t old_flags;
1620         int rc;
1621
1622         rc = is_bnxt_in_error(bp);
1623         if (rc)
1624                 return rc;
1625
1626         /* Filter settings will get applied when port is started */
1627         if (!eth_dev->data->dev_started)
1628                 return 0;
1629
1630         if (bp->vnic_info == NULL)
1631                 return 0;
1632
1633         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1634
1635         old_flags = vnic->flags;
1636         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1637         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1638         if (rc != 0)
1639                 vnic->flags = old_flags;
1640
1641         return rc;
1642 }
1643
1644 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1645 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1646 {
1647         if (qid >= bp->rx_nr_rings)
1648                 return NULL;
1649
1650         return bp->eth_dev->data->rx_queues[qid];
1651 }
1652
1653 /* Return rxq corresponding to a given rss table ring/group ID. */
1654 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1655 {
1656         struct bnxt_rx_queue *rxq;
1657         unsigned int i;
1658
1659         if (!BNXT_HAS_RING_GRPS(bp)) {
1660                 for (i = 0; i < bp->rx_nr_rings; i++) {
1661                         rxq = bp->eth_dev->data->rx_queues[i];
1662                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1663                                 return rxq->index;
1664                 }
1665         } else {
1666                 for (i = 0; i < bp->rx_nr_rings; i++) {
1667                         if (bp->grp_info[i].fw_grp_id == fwr)
1668                                 return i;
1669                 }
1670         }
1671
1672         return INVALID_HW_RING_ID;
1673 }
1674
1675 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1676                             struct rte_eth_rss_reta_entry64 *reta_conf,
1677                             uint16_t reta_size)
1678 {
1679         struct bnxt *bp = eth_dev->data->dev_private;
1680         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1681         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1682         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1683         uint16_t idx, sft;
1684         int i, rc;
1685
1686         rc = is_bnxt_in_error(bp);
1687         if (rc)
1688                 return rc;
1689
1690         if (!vnic->rss_table)
1691                 return -EINVAL;
1692
1693         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1694                 return -EINVAL;
1695
1696         if (reta_size != tbl_size) {
1697                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1698                         "(%d) must equal the size supported by the hardware "
1699                         "(%d)\n", reta_size, tbl_size);
1700                 return -EINVAL;
1701         }
1702
1703         for (i = 0; i < reta_size; i++) {
1704                 struct bnxt_rx_queue *rxq;
1705
1706                 idx = i / RTE_RETA_GROUP_SIZE;
1707                 sft = i % RTE_RETA_GROUP_SIZE;
1708
1709                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1710                         continue;
1711
1712                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1713                 if (!rxq) {
1714                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1715                         return -EINVAL;
1716                 }
1717
1718                 if (BNXT_CHIP_THOR(bp)) {
1719                         vnic->rss_table[i * 2] =
1720                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1721                         vnic->rss_table[i * 2 + 1] =
1722                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1723                 } else {
1724                         vnic->rss_table[i] =
1725                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1726                 }
1727         }
1728
1729         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1730         return 0;
1731 }
1732
1733 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1734                               struct rte_eth_rss_reta_entry64 *reta_conf,
1735                               uint16_t reta_size)
1736 {
1737         struct bnxt *bp = eth_dev->data->dev_private;
1738         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1739         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1740         uint16_t idx, sft, i;
1741         int rc;
1742
1743         rc = is_bnxt_in_error(bp);
1744         if (rc)
1745                 return rc;
1746
1747         /* Retrieve from the default VNIC */
1748         if (!vnic)
1749                 return -EINVAL;
1750         if (!vnic->rss_table)
1751                 return -EINVAL;
1752
1753         if (reta_size != tbl_size) {
1754                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1755                         "(%d) must equal the size supported by the hardware "
1756                         "(%d)\n", reta_size, tbl_size);
1757                 return -EINVAL;
1758         }
1759
1760         for (idx = 0, i = 0; i < reta_size; i++) {
1761                 idx = i / RTE_RETA_GROUP_SIZE;
1762                 sft = i % RTE_RETA_GROUP_SIZE;
1763
1764                 if (reta_conf[idx].mask & (1ULL << sft)) {
1765                         uint16_t qid;
1766
1767                         if (BNXT_CHIP_THOR(bp))
1768                                 qid = bnxt_rss_to_qid(bp,
1769                                                       vnic->rss_table[i * 2]);
1770                         else
1771                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1772
1773                         if (qid == INVALID_HW_RING_ID) {
1774                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1775                                 return -EINVAL;
1776                         }
1777                         reta_conf[idx].reta[sft] = qid;
1778                 }
1779         }
1780
1781         return 0;
1782 }
1783
1784 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1785                                    struct rte_eth_rss_conf *rss_conf)
1786 {
1787         struct bnxt *bp = eth_dev->data->dev_private;
1788         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1789         struct bnxt_vnic_info *vnic;
1790         int rc;
1791
1792         rc = is_bnxt_in_error(bp);
1793         if (rc)
1794                 return rc;
1795
1796         /*
1797          * If RSS enablement were different than dev_configure,
1798          * then return -EINVAL
1799          */
1800         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1801                 if (!rss_conf->rss_hf)
1802                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1803         } else {
1804                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1805                         return -EINVAL;
1806         }
1807
1808         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1809         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1810                rss_conf,
1811                sizeof(*rss_conf));
1812
1813         /* Update the default RSS VNIC(s) */
1814         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1815         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1816
1817         /*
1818          * If hashkey is not specified, use the previously configured
1819          * hashkey
1820          */
1821         if (!rss_conf->rss_key)
1822                 goto rss_config;
1823
1824         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1825                 PMD_DRV_LOG(ERR,
1826                             "Invalid hashkey length, should be 16 bytes\n");
1827                 return -EINVAL;
1828         }
1829         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1830
1831 rss_config:
1832         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1833         return 0;
1834 }
1835
1836 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1837                                      struct rte_eth_rss_conf *rss_conf)
1838 {
1839         struct bnxt *bp = eth_dev->data->dev_private;
1840         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1841         int len, rc;
1842         uint32_t hash_types;
1843
1844         rc = is_bnxt_in_error(bp);
1845         if (rc)
1846                 return rc;
1847
1848         /* RSS configuration is the same for all VNICs */
1849         if (vnic && vnic->rss_hash_key) {
1850                 if (rss_conf->rss_key) {
1851                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1852                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1853                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1854                 }
1855
1856                 hash_types = vnic->hash_type;
1857                 rss_conf->rss_hf = 0;
1858                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1859                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1860                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1861                 }
1862                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1863                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1864                         hash_types &=
1865                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1866                 }
1867                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1868                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1869                         hash_types &=
1870                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1871                 }
1872                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1873                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1874                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1875                 }
1876                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1877                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1878                         hash_types &=
1879                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1880                 }
1881                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1882                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1883                         hash_types &=
1884                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1885                 }
1886                 if (hash_types) {
1887                         PMD_DRV_LOG(ERR,
1888                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1889                                 vnic->hash_type);
1890                         return -ENOTSUP;
1891                 }
1892         } else {
1893                 rss_conf->rss_hf = 0;
1894         }
1895         return 0;
1896 }
1897
1898 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1899                                struct rte_eth_fc_conf *fc_conf)
1900 {
1901         struct bnxt *bp = dev->data->dev_private;
1902         struct rte_eth_link link_info;
1903         int rc;
1904
1905         rc = is_bnxt_in_error(bp);
1906         if (rc)
1907                 return rc;
1908
1909         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1910         if (rc)
1911                 return rc;
1912
1913         memset(fc_conf, 0, sizeof(*fc_conf));
1914         if (bp->link_info->auto_pause)
1915                 fc_conf->autoneg = 1;
1916         switch (bp->link_info->pause) {
1917         case 0:
1918                 fc_conf->mode = RTE_FC_NONE;
1919                 break;
1920         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1921                 fc_conf->mode = RTE_FC_TX_PAUSE;
1922                 break;
1923         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1924                 fc_conf->mode = RTE_FC_RX_PAUSE;
1925                 break;
1926         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1927                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1928                 fc_conf->mode = RTE_FC_FULL;
1929                 break;
1930         }
1931         return 0;
1932 }
1933
1934 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1935                                struct rte_eth_fc_conf *fc_conf)
1936 {
1937         struct bnxt *bp = dev->data->dev_private;
1938         int rc;
1939
1940         rc = is_bnxt_in_error(bp);
1941         if (rc)
1942                 return rc;
1943
1944         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1945                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1946                 return -ENOTSUP;
1947         }
1948
1949         switch (fc_conf->mode) {
1950         case RTE_FC_NONE:
1951                 bp->link_info->auto_pause = 0;
1952                 bp->link_info->force_pause = 0;
1953                 break;
1954         case RTE_FC_RX_PAUSE:
1955                 if (fc_conf->autoneg) {
1956                         bp->link_info->auto_pause =
1957                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1958                         bp->link_info->force_pause = 0;
1959                 } else {
1960                         bp->link_info->auto_pause = 0;
1961                         bp->link_info->force_pause =
1962                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1963                 }
1964                 break;
1965         case RTE_FC_TX_PAUSE:
1966                 if (fc_conf->autoneg) {
1967                         bp->link_info->auto_pause =
1968                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1969                         bp->link_info->force_pause = 0;
1970                 } else {
1971                         bp->link_info->auto_pause = 0;
1972                         bp->link_info->force_pause =
1973                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1974                 }
1975                 break;
1976         case RTE_FC_FULL:
1977                 if (fc_conf->autoneg) {
1978                         bp->link_info->auto_pause =
1979                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1980                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1981                         bp->link_info->force_pause = 0;
1982                 } else {
1983                         bp->link_info->auto_pause = 0;
1984                         bp->link_info->force_pause =
1985                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1986                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1987                 }
1988                 break;
1989         }
1990         return bnxt_set_hwrm_link_config(bp, true);
1991 }
1992
1993 /* Add UDP tunneling port */
1994 static int
1995 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1996                          struct rte_eth_udp_tunnel *udp_tunnel)
1997 {
1998         struct bnxt *bp = eth_dev->data->dev_private;
1999         uint16_t tunnel_type = 0;
2000         int rc = 0;
2001
2002         rc = is_bnxt_in_error(bp);
2003         if (rc)
2004                 return rc;
2005
2006         switch (udp_tunnel->prot_type) {
2007         case RTE_TUNNEL_TYPE_VXLAN:
2008                 if (bp->vxlan_port_cnt) {
2009                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2010                                 udp_tunnel->udp_port);
2011                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2012                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2013                                 return -ENOSPC;
2014                         }
2015                         bp->vxlan_port_cnt++;
2016                         return 0;
2017                 }
2018                 tunnel_type =
2019                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2020                 bp->vxlan_port_cnt++;
2021                 break;
2022         case RTE_TUNNEL_TYPE_GENEVE:
2023                 if (bp->geneve_port_cnt) {
2024                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2025                                 udp_tunnel->udp_port);
2026                         if (bp->geneve_port != udp_tunnel->udp_port) {
2027                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2028                                 return -ENOSPC;
2029                         }
2030                         bp->geneve_port_cnt++;
2031                         return 0;
2032                 }
2033                 tunnel_type =
2034                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2035                 bp->geneve_port_cnt++;
2036                 break;
2037         default:
2038                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2039                 return -ENOTSUP;
2040         }
2041         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2042                                              tunnel_type);
2043         return rc;
2044 }
2045
2046 static int
2047 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2048                          struct rte_eth_udp_tunnel *udp_tunnel)
2049 {
2050         struct bnxt *bp = eth_dev->data->dev_private;
2051         uint16_t tunnel_type = 0;
2052         uint16_t port = 0;
2053         int rc = 0;
2054
2055         rc = is_bnxt_in_error(bp);
2056         if (rc)
2057                 return rc;
2058
2059         switch (udp_tunnel->prot_type) {
2060         case RTE_TUNNEL_TYPE_VXLAN:
2061                 if (!bp->vxlan_port_cnt) {
2062                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2063                         return -EINVAL;
2064                 }
2065                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2066                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2067                                 udp_tunnel->udp_port, bp->vxlan_port);
2068                         return -EINVAL;
2069                 }
2070                 if (--bp->vxlan_port_cnt)
2071                         return 0;
2072
2073                 tunnel_type =
2074                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2075                 port = bp->vxlan_fw_dst_port_id;
2076                 break;
2077         case RTE_TUNNEL_TYPE_GENEVE:
2078                 if (!bp->geneve_port_cnt) {
2079                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2080                         return -EINVAL;
2081                 }
2082                 if (bp->geneve_port != udp_tunnel->udp_port) {
2083                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2084                                 udp_tunnel->udp_port, bp->geneve_port);
2085                         return -EINVAL;
2086                 }
2087                 if (--bp->geneve_port_cnt)
2088                         return 0;
2089
2090                 tunnel_type =
2091                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2092                 port = bp->geneve_fw_dst_port_id;
2093                 break;
2094         default:
2095                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2096                 return -ENOTSUP;
2097         }
2098
2099         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2100         if (!rc) {
2101                 if (tunnel_type ==
2102                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
2103                         bp->vxlan_port = 0;
2104                 if (tunnel_type ==
2105                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
2106                         bp->geneve_port = 0;
2107         }
2108         return rc;
2109 }
2110
2111 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2112 {
2113         struct bnxt_filter_info *filter;
2114         struct bnxt_vnic_info *vnic;
2115         int rc = 0;
2116         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2117
2118         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2119         filter = STAILQ_FIRST(&vnic->filter);
2120         while (filter) {
2121                 /* Search for this matching MAC+VLAN filter */
2122                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2123                         /* Delete the filter */
2124                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2125                         if (rc)
2126                                 return rc;
2127                         STAILQ_REMOVE(&vnic->filter, filter,
2128                                       bnxt_filter_info, next);
2129                         bnxt_free_filter(bp, filter);
2130                         PMD_DRV_LOG(INFO,
2131                                     "Deleted vlan filter for %d\n",
2132                                     vlan_id);
2133                         return 0;
2134                 }
2135                 filter = STAILQ_NEXT(filter, next);
2136         }
2137         return -ENOENT;
2138 }
2139
2140 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2141 {
2142         struct bnxt_filter_info *filter;
2143         struct bnxt_vnic_info *vnic;
2144         int rc = 0;
2145         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2146                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2147         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2148
2149         /* Implementation notes on the use of VNIC in this command:
2150          *
2151          * By default, these filters belong to default vnic for the function.
2152          * Once these filters are set up, only destination VNIC can be modified.
2153          * If the destination VNIC is not specified in this command,
2154          * then the HWRM shall only create an l2 context id.
2155          */
2156
2157         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2158         filter = STAILQ_FIRST(&vnic->filter);
2159         /* Check if the VLAN has already been added */
2160         while (filter) {
2161                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2162                         return -EEXIST;
2163
2164                 filter = STAILQ_NEXT(filter, next);
2165         }
2166
2167         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2168          * command to create MAC+VLAN filter with the right flags, enables set.
2169          */
2170         filter = bnxt_alloc_filter(bp);
2171         if (!filter) {
2172                 PMD_DRV_LOG(ERR,
2173                             "MAC/VLAN filter alloc failed\n");
2174                 return -ENOMEM;
2175         }
2176         /* MAC + VLAN ID filter */
2177         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2178          * untagged packets are received
2179          *
2180          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2181          * packets and only the programmed vlan's packets are received
2182          */
2183         filter->l2_ivlan = vlan_id;
2184         filter->l2_ivlan_mask = 0x0FFF;
2185         filter->enables |= en;
2186         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2187
2188         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2189         if (rc) {
2190                 /* Free the newly allocated filter as we were
2191                  * not able to create the filter in hardware.
2192                  */
2193                 bnxt_free_filter(bp, filter);
2194                 return rc;
2195         }
2196
2197         filter->mac_index = 0;
2198         /* Add this new filter to the list */
2199         if (vlan_id == 0)
2200                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2201         else
2202                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2203
2204         PMD_DRV_LOG(INFO,
2205                     "Added Vlan filter for %d\n", vlan_id);
2206         return rc;
2207 }
2208
2209 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2210                 uint16_t vlan_id, int on)
2211 {
2212         struct bnxt *bp = eth_dev->data->dev_private;
2213         int rc;
2214
2215         rc = is_bnxt_in_error(bp);
2216         if (rc)
2217                 return rc;
2218
2219         if (!eth_dev->data->dev_started) {
2220                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2221                 return -EINVAL;
2222         }
2223
2224         /* These operations apply to ALL existing MAC/VLAN filters */
2225         if (on)
2226                 return bnxt_add_vlan_filter(bp, vlan_id);
2227         else
2228                 return bnxt_del_vlan_filter(bp, vlan_id);
2229 }
2230
2231 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2232                                     struct bnxt_vnic_info *vnic)
2233 {
2234         struct bnxt_filter_info *filter;
2235         int rc;
2236
2237         filter = STAILQ_FIRST(&vnic->filter);
2238         while (filter) {
2239                 if (filter->mac_index == 0 &&
2240                     !memcmp(filter->l2_addr, bp->mac_addr,
2241                             RTE_ETHER_ADDR_LEN)) {
2242                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2243                         if (!rc) {
2244                                 STAILQ_REMOVE(&vnic->filter, filter,
2245                                               bnxt_filter_info, next);
2246                                 bnxt_free_filter(bp, filter);
2247                         }
2248                         return rc;
2249                 }
2250                 filter = STAILQ_NEXT(filter, next);
2251         }
2252         return 0;
2253 }
2254
2255 static int
2256 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2257 {
2258         struct bnxt_vnic_info *vnic;
2259         unsigned int i;
2260         int rc;
2261
2262         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2263         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2264                 /* Remove any VLAN filters programmed */
2265                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2266                         bnxt_del_vlan_filter(bp, i);
2267
2268                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2269                 if (rc)
2270                         return rc;
2271         } else {
2272                 /* Default filter will allow packets that match the
2273                  * dest mac. So, it has to be deleted, otherwise, we
2274                  * will endup receiving vlan packets for which the
2275                  * filter is not programmed, when hw-vlan-filter
2276                  * configuration is ON
2277                  */
2278                 bnxt_del_dflt_mac_filter(bp, vnic);
2279                 /* This filter will allow only untagged packets */
2280                 bnxt_add_vlan_filter(bp, 0);
2281         }
2282         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2283                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2284
2285         return 0;
2286 }
2287
2288 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2289 {
2290         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2291         unsigned int i;
2292         int rc;
2293
2294         /* Destroy vnic filters and vnic */
2295         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2296             DEV_RX_OFFLOAD_VLAN_FILTER) {
2297                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2298                         bnxt_del_vlan_filter(bp, i);
2299         }
2300         bnxt_del_dflt_mac_filter(bp, vnic);
2301
2302         rc = bnxt_hwrm_vnic_free(bp, vnic);
2303         if (rc)
2304                 return rc;
2305
2306         rte_free(vnic->fw_grp_ids);
2307         vnic->fw_grp_ids = NULL;
2308
2309         vnic->rx_queue_cnt = 0;
2310
2311         return 0;
2312 }
2313
2314 static int
2315 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2316 {
2317         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2318         int rc;
2319
2320         /* Destroy, recreate and reconfigure the default vnic */
2321         rc = bnxt_free_one_vnic(bp, 0);
2322         if (rc)
2323                 return rc;
2324
2325         /* default vnic 0 */
2326         rc = bnxt_setup_one_vnic(bp, 0);
2327         if (rc)
2328                 return rc;
2329
2330         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2331             DEV_RX_OFFLOAD_VLAN_FILTER) {
2332                 rc = bnxt_add_vlan_filter(bp, 0);
2333                 if (rc)
2334                         return rc;
2335                 rc = bnxt_restore_vlan_filters(bp);
2336                 if (rc)
2337                         return rc;
2338         } else {
2339                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2340                 if (rc)
2341                         return rc;
2342         }
2343
2344         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2345         if (rc)
2346                 return rc;
2347
2348         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2349                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2350
2351         return rc;
2352 }
2353
2354 static int
2355 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2356 {
2357         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2358         struct bnxt *bp = dev->data->dev_private;
2359         int rc;
2360
2361         rc = is_bnxt_in_error(bp);
2362         if (rc)
2363                 return rc;
2364
2365         /* Filter settings will get applied when port is started */
2366         if (!dev->data->dev_started)
2367                 return 0;
2368
2369         if (mask & ETH_VLAN_FILTER_MASK) {
2370                 /* Enable or disable VLAN filtering */
2371                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2372                 if (rc)
2373                         return rc;
2374         }
2375
2376         if (mask & ETH_VLAN_STRIP_MASK) {
2377                 /* Enable or disable VLAN stripping */
2378                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2379                 if (rc)
2380                         return rc;
2381         }
2382
2383         if (mask & ETH_VLAN_EXTEND_MASK) {
2384                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2385                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2386                 else
2387                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2388         }
2389
2390         return 0;
2391 }
2392
2393 static int
2394 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2395                       uint16_t tpid)
2396 {
2397         struct bnxt *bp = dev->data->dev_private;
2398         int qinq = dev->data->dev_conf.rxmode.offloads &
2399                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2400
2401         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2402             vlan_type != ETH_VLAN_TYPE_OUTER) {
2403                 PMD_DRV_LOG(ERR,
2404                             "Unsupported vlan type.");
2405                 return -EINVAL;
2406         }
2407         if (!qinq) {
2408                 PMD_DRV_LOG(ERR,
2409                             "QinQ not enabled. Needs to be ON as we can "
2410                             "accelerate only outer vlan\n");
2411                 return -EINVAL;
2412         }
2413
2414         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2415                 switch (tpid) {
2416                 case RTE_ETHER_TYPE_QINQ:
2417                         bp->outer_tpid_bd =
2418                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2419                                 break;
2420                 case RTE_ETHER_TYPE_VLAN:
2421                         bp->outer_tpid_bd =
2422                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2423                                 break;
2424                 case 0x9100:
2425                         bp->outer_tpid_bd =
2426                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2427                                 break;
2428                 case 0x9200:
2429                         bp->outer_tpid_bd =
2430                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2431                                 break;
2432                 case 0x9300:
2433                         bp->outer_tpid_bd =
2434                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2435                                 break;
2436                 default:
2437                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2438                         return -EINVAL;
2439                 }
2440                 bp->outer_tpid_bd |= tpid;
2441                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2442         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2443                 PMD_DRV_LOG(ERR,
2444                             "Can accelerate only outer vlan in QinQ\n");
2445                 return -EINVAL;
2446         }
2447
2448         return 0;
2449 }
2450
2451 static int
2452 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2453                              struct rte_ether_addr *addr)
2454 {
2455         struct bnxt *bp = dev->data->dev_private;
2456         /* Default Filter is tied to VNIC 0 */
2457         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2458         int rc;
2459
2460         rc = is_bnxt_in_error(bp);
2461         if (rc)
2462                 return rc;
2463
2464         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2465                 return -EPERM;
2466
2467         if (rte_is_zero_ether_addr(addr))
2468                 return -EINVAL;
2469
2470         /* Filter settings will get applied when port is started */
2471         if (!dev->data->dev_started)
2472                 return 0;
2473
2474         /* Check if the requested MAC is already added */
2475         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2476                 return 0;
2477
2478         /* Destroy filter and re-create it */
2479         bnxt_del_dflt_mac_filter(bp, vnic);
2480
2481         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2482         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2483                 /* This filter will allow only untagged packets */
2484                 rc = bnxt_add_vlan_filter(bp, 0);
2485         } else {
2486                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2487         }
2488
2489         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2490         return rc;
2491 }
2492
2493 static int
2494 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2495                           struct rte_ether_addr *mc_addr_set,
2496                           uint32_t nb_mc_addr)
2497 {
2498         struct bnxt *bp = eth_dev->data->dev_private;
2499         char *mc_addr_list = (char *)mc_addr_set;
2500         struct bnxt_vnic_info *vnic;
2501         uint32_t off = 0, i = 0;
2502         int rc;
2503
2504         rc = is_bnxt_in_error(bp);
2505         if (rc)
2506                 return rc;
2507
2508         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2509
2510         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2511                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2512                 goto allmulti;
2513         }
2514
2515         /* TODO Check for Duplicate mcast addresses */
2516         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2517         for (i = 0; i < nb_mc_addr; i++) {
2518                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2519                         RTE_ETHER_ADDR_LEN);
2520                 off += RTE_ETHER_ADDR_LEN;
2521         }
2522
2523         vnic->mc_addr_cnt = i;
2524         if (vnic->mc_addr_cnt)
2525                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2526         else
2527                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2528
2529 allmulti:
2530         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2531 }
2532
2533 static int
2534 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2535 {
2536         struct bnxt *bp = dev->data->dev_private;
2537         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2538         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2539         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2540         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2541         int ret;
2542
2543         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2544                         fw_major, fw_minor, fw_updt, fw_rsvd);
2545
2546         ret += 1; /* add the size of '\0' */
2547         if (fw_size < (uint32_t)ret)
2548                 return ret;
2549         else
2550                 return 0;
2551 }
2552
2553 static void
2554 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2555         struct rte_eth_rxq_info *qinfo)
2556 {
2557         struct bnxt *bp = dev->data->dev_private;
2558         struct bnxt_rx_queue *rxq;
2559
2560         if (is_bnxt_in_error(bp))
2561                 return;
2562
2563         rxq = dev->data->rx_queues[queue_id];
2564
2565         qinfo->mp = rxq->mb_pool;
2566         qinfo->scattered_rx = dev->data->scattered_rx;
2567         qinfo->nb_desc = rxq->nb_rx_desc;
2568
2569         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2570         qinfo->conf.rx_drop_en = 0;
2571         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2572 }
2573
2574 static void
2575 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2576         struct rte_eth_txq_info *qinfo)
2577 {
2578         struct bnxt *bp = dev->data->dev_private;
2579         struct bnxt_tx_queue *txq;
2580
2581         if (is_bnxt_in_error(bp))
2582                 return;
2583
2584         txq = dev->data->tx_queues[queue_id];
2585
2586         qinfo->nb_desc = txq->nb_tx_desc;
2587
2588         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2589         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2590         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2591
2592         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2593         qinfo->conf.tx_rs_thresh = 0;
2594         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2595 }
2596
2597 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2598 {
2599         struct bnxt *bp = eth_dev->data->dev_private;
2600         uint32_t new_pkt_size;
2601         uint32_t rc = 0;
2602         uint32_t i;
2603
2604         rc = is_bnxt_in_error(bp);
2605         if (rc)
2606                 return rc;
2607
2608         /* Exit if receive queues are not configured yet */
2609         if (!eth_dev->data->nb_rx_queues)
2610                 return rc;
2611
2612         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2613                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2614
2615 #ifdef RTE_ARCH_X86
2616         /*
2617          * If vector-mode tx/rx is active, disallow any MTU change that would
2618          * require scattered receive support.
2619          */
2620         if (eth_dev->data->dev_started &&
2621             (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2622              eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2623             (new_pkt_size >
2624              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2625                 PMD_DRV_LOG(ERR,
2626                             "MTU change would require scattered rx support. ");
2627                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2628                 return -EINVAL;
2629         }
2630 #endif
2631
2632         if (new_mtu > RTE_ETHER_MTU) {
2633                 bp->flags |= BNXT_FLAG_JUMBO;
2634                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2635                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2636         } else {
2637                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2638                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2639                 bp->flags &= ~BNXT_FLAG_JUMBO;
2640         }
2641
2642         /* Is there a change in mtu setting? */
2643         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2644                 return rc;
2645
2646         for (i = 0; i < bp->nr_vnics; i++) {
2647                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2648                 uint16_t size = 0;
2649
2650                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2651                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2652                 if (rc)
2653                         break;
2654
2655                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2656                 size -= RTE_PKTMBUF_HEADROOM;
2657
2658                 if (size < new_mtu) {
2659                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2660                         if (rc)
2661                                 return rc;
2662                 }
2663         }
2664
2665         if (!rc)
2666                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2667
2668         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2669
2670         return rc;
2671 }
2672
2673 static int
2674 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2675 {
2676         struct bnxt *bp = dev->data->dev_private;
2677         uint16_t vlan = bp->vlan;
2678         int rc;
2679
2680         rc = is_bnxt_in_error(bp);
2681         if (rc)
2682                 return rc;
2683
2684         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2685                 PMD_DRV_LOG(ERR,
2686                         "PVID cannot be modified for this function\n");
2687                 return -ENOTSUP;
2688         }
2689         bp->vlan = on ? pvid : 0;
2690
2691         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2692         if (rc)
2693                 bp->vlan = vlan;
2694         return rc;
2695 }
2696
2697 static int
2698 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2699 {
2700         struct bnxt *bp = dev->data->dev_private;
2701         int rc;
2702
2703         rc = is_bnxt_in_error(bp);
2704         if (rc)
2705                 return rc;
2706
2707         return bnxt_hwrm_port_led_cfg(bp, true);
2708 }
2709
2710 static int
2711 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2712 {
2713         struct bnxt *bp = dev->data->dev_private;
2714         int rc;
2715
2716         rc = is_bnxt_in_error(bp);
2717         if (rc)
2718                 return rc;
2719
2720         return bnxt_hwrm_port_led_cfg(bp, false);
2721 }
2722
2723 static uint32_t
2724 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2725 {
2726         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2727         uint32_t desc = 0, raw_cons = 0, cons;
2728         struct bnxt_cp_ring_info *cpr;
2729         struct bnxt_rx_queue *rxq;
2730         struct rx_pkt_cmpl *rxcmp;
2731         int rc;
2732
2733         rc = is_bnxt_in_error(bp);
2734         if (rc)
2735                 return rc;
2736
2737         rxq = dev->data->rx_queues[rx_queue_id];
2738         cpr = rxq->cp_ring;
2739         raw_cons = cpr->cp_raw_cons;
2740
2741         while (1) {
2742                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2743                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2744                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2745
2746                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2747                         break;
2748                 } else {
2749                         raw_cons++;
2750                         desc++;
2751                 }
2752         }
2753
2754         return desc;
2755 }
2756
2757 static int
2758 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2759 {
2760         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2761         struct bnxt_rx_ring_info *rxr;
2762         struct bnxt_cp_ring_info *cpr;
2763         struct bnxt_sw_rx_bd *rx_buf;
2764         struct rx_pkt_cmpl *rxcmp;
2765         uint32_t cons, cp_cons;
2766         int rc;
2767
2768         if (!rxq)
2769                 return -EINVAL;
2770
2771         rc = is_bnxt_in_error(rxq->bp);
2772         if (rc)
2773                 return rc;
2774
2775         cpr = rxq->cp_ring;
2776         rxr = rxq->rx_ring;
2777
2778         if (offset >= rxq->nb_rx_desc)
2779                 return -EINVAL;
2780
2781         cons = RING_CMP(cpr->cp_ring_struct, offset);
2782         cp_cons = cpr->cp_raw_cons;
2783         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2784
2785         if (cons > cp_cons) {
2786                 if (CMPL_VALID(rxcmp, cpr->valid))
2787                         return RTE_ETH_RX_DESC_DONE;
2788         } else {
2789                 if (CMPL_VALID(rxcmp, !cpr->valid))
2790                         return RTE_ETH_RX_DESC_DONE;
2791         }
2792         rx_buf = &rxr->rx_buf_ring[cons];
2793         if (rx_buf->mbuf == NULL)
2794                 return RTE_ETH_RX_DESC_UNAVAIL;
2795
2796
2797         return RTE_ETH_RX_DESC_AVAIL;
2798 }
2799
2800 static int
2801 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2802 {
2803         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2804         struct bnxt_tx_ring_info *txr;
2805         struct bnxt_cp_ring_info *cpr;
2806         struct bnxt_sw_tx_bd *tx_buf;
2807         struct tx_pkt_cmpl *txcmp;
2808         uint32_t cons, cp_cons;
2809         int rc;
2810
2811         if (!txq)
2812                 return -EINVAL;
2813
2814         rc = is_bnxt_in_error(txq->bp);
2815         if (rc)
2816                 return rc;
2817
2818         cpr = txq->cp_ring;
2819         txr = txq->tx_ring;
2820
2821         if (offset >= txq->nb_tx_desc)
2822                 return -EINVAL;
2823
2824         cons = RING_CMP(cpr->cp_ring_struct, offset);
2825         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2826         cp_cons = cpr->cp_raw_cons;
2827
2828         if (cons > cp_cons) {
2829                 if (CMPL_VALID(txcmp, cpr->valid))
2830                         return RTE_ETH_TX_DESC_UNAVAIL;
2831         } else {
2832                 if (CMPL_VALID(txcmp, !cpr->valid))
2833                         return RTE_ETH_TX_DESC_UNAVAIL;
2834         }
2835         tx_buf = &txr->tx_buf_ring[cons];
2836         if (tx_buf->mbuf == NULL)
2837                 return RTE_ETH_TX_DESC_DONE;
2838
2839         return RTE_ETH_TX_DESC_FULL;
2840 }
2841
2842 static struct bnxt_filter_info *
2843 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2844                                 struct rte_eth_ethertype_filter *efilter,
2845                                 struct bnxt_vnic_info *vnic0,
2846                                 struct bnxt_vnic_info *vnic,
2847                                 int *ret)
2848 {
2849         struct bnxt_filter_info *mfilter = NULL;
2850         int match = 0;
2851         *ret = 0;
2852
2853         if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2854                 efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2855                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2856                         " ethertype filter.", efilter->ether_type);
2857                 *ret = -EINVAL;
2858                 goto exit;
2859         }
2860         if (efilter->queue >= bp->rx_nr_rings) {
2861                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2862                 *ret = -EINVAL;
2863                 goto exit;
2864         }
2865
2866         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2867         vnic = &bp->vnic_info[efilter->queue];
2868         if (vnic == NULL) {
2869                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2870                 *ret = -EINVAL;
2871                 goto exit;
2872         }
2873
2874         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2875                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2876                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2877                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2878                              mfilter->flags ==
2879                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2880                              mfilter->ethertype == efilter->ether_type)) {
2881                                 match = 1;
2882                                 break;
2883                         }
2884                 }
2885         } else {
2886                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
2887                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2888                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2889                              mfilter->ethertype == efilter->ether_type &&
2890                              mfilter->flags ==
2891                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2892                                 match = 1;
2893                                 break;
2894                         }
2895         }
2896
2897         if (match)
2898                 *ret = -EEXIST;
2899
2900 exit:
2901         return mfilter;
2902 }
2903
2904 static int
2905 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2906                         enum rte_filter_op filter_op,
2907                         void *arg)
2908 {
2909         struct bnxt *bp = dev->data->dev_private;
2910         struct rte_eth_ethertype_filter *efilter =
2911                         (struct rte_eth_ethertype_filter *)arg;
2912         struct bnxt_filter_info *bfilter, *filter1;
2913         struct bnxt_vnic_info *vnic, *vnic0;
2914         int ret;
2915
2916         if (filter_op == RTE_ETH_FILTER_NOP)
2917                 return 0;
2918
2919         if (arg == NULL) {
2920                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2921                             filter_op);
2922                 return -EINVAL;
2923         }
2924
2925         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2926         vnic = &bp->vnic_info[efilter->queue];
2927
2928         switch (filter_op) {
2929         case RTE_ETH_FILTER_ADD:
2930                 bnxt_match_and_validate_ether_filter(bp, efilter,
2931                                                         vnic0, vnic, &ret);
2932                 if (ret < 0)
2933                         return ret;
2934
2935                 bfilter = bnxt_get_unused_filter(bp);
2936                 if (bfilter == NULL) {
2937                         PMD_DRV_LOG(ERR,
2938                                 "Not enough resources for a new filter.\n");
2939                         return -ENOMEM;
2940                 }
2941                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2942                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2943                        RTE_ETHER_ADDR_LEN);
2944                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2945                        RTE_ETHER_ADDR_LEN);
2946                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2947                 bfilter->ethertype = efilter->ether_type;
2948                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2949
2950                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2951                 if (filter1 == NULL) {
2952                         ret = -EINVAL;
2953                         goto cleanup;
2954                 }
2955                 bfilter->enables |=
2956                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2957                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2958
2959                 bfilter->dst_id = vnic->fw_vnic_id;
2960
2961                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2962                         bfilter->flags =
2963                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2964                 }
2965
2966                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2967                 if (ret)
2968                         goto cleanup;
2969                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2970                 break;
2971         case RTE_ETH_FILTER_DELETE:
2972                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2973                                                         vnic0, vnic, &ret);
2974                 if (ret == -EEXIST) {
2975                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2976
2977                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2978                                       next);
2979                         bnxt_free_filter(bp, filter1);
2980                 } else if (ret == 0) {
2981                         PMD_DRV_LOG(ERR, "No matching filter found\n");
2982                 }
2983                 break;
2984         default:
2985                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2986                 ret = -EINVAL;
2987                 goto error;
2988         }
2989         return ret;
2990 cleanup:
2991         bnxt_free_filter(bp, bfilter);
2992 error:
2993         return ret;
2994 }
2995
2996 static inline int
2997 parse_ntuple_filter(struct bnxt *bp,
2998                     struct rte_eth_ntuple_filter *nfilter,
2999                     struct bnxt_filter_info *bfilter)
3000 {
3001         uint32_t en = 0;
3002
3003         if (nfilter->queue >= bp->rx_nr_rings) {
3004                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
3005                 return -EINVAL;
3006         }
3007
3008         switch (nfilter->dst_port_mask) {
3009         case UINT16_MAX:
3010                 bfilter->dst_port_mask = -1;
3011                 bfilter->dst_port = nfilter->dst_port;
3012                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
3013                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3014                 break;
3015         default:
3016                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3017                 return -EINVAL;
3018         }
3019
3020         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3021         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3022
3023         switch (nfilter->proto_mask) {
3024         case UINT8_MAX:
3025                 if (nfilter->proto == 17) /* IPPROTO_UDP */
3026                         bfilter->ip_protocol = 17;
3027                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
3028                         bfilter->ip_protocol = 6;
3029                 else
3030                         return -EINVAL;
3031                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3032                 break;
3033         default:
3034                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3035                 return -EINVAL;
3036         }
3037
3038         switch (nfilter->dst_ip_mask) {
3039         case UINT32_MAX:
3040                 bfilter->dst_ipaddr_mask[0] = -1;
3041                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
3042                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
3043                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3044                 break;
3045         default:
3046                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3047                 return -EINVAL;
3048         }
3049
3050         switch (nfilter->src_ip_mask) {
3051         case UINT32_MAX:
3052                 bfilter->src_ipaddr_mask[0] = -1;
3053                 bfilter->src_ipaddr[0] = nfilter->src_ip;
3054                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
3055                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3056                 break;
3057         default:
3058                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3059                 return -EINVAL;
3060         }
3061
3062         switch (nfilter->src_port_mask) {
3063         case UINT16_MAX:
3064                 bfilter->src_port_mask = -1;
3065                 bfilter->src_port = nfilter->src_port;
3066                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
3067                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3068                 break;
3069         default:
3070                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3071                 return -EINVAL;
3072         }
3073
3074         bfilter->enables = en;
3075         return 0;
3076 }
3077
3078 static struct bnxt_filter_info*
3079 bnxt_match_ntuple_filter(struct bnxt *bp,
3080                          struct bnxt_filter_info *bfilter,
3081                          struct bnxt_vnic_info **mvnic)
3082 {
3083         struct bnxt_filter_info *mfilter = NULL;
3084         int i;
3085
3086         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3087                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3088                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
3089                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
3090                             bfilter->src_ipaddr_mask[0] ==
3091                             mfilter->src_ipaddr_mask[0] &&
3092                             bfilter->src_port == mfilter->src_port &&
3093                             bfilter->src_port_mask == mfilter->src_port_mask &&
3094                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
3095                             bfilter->dst_ipaddr_mask[0] ==
3096                             mfilter->dst_ipaddr_mask[0] &&
3097                             bfilter->dst_port == mfilter->dst_port &&
3098                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
3099                             bfilter->flags == mfilter->flags &&
3100                             bfilter->enables == mfilter->enables) {
3101                                 if (mvnic)
3102                                         *mvnic = vnic;
3103                                 return mfilter;
3104                         }
3105                 }
3106         }
3107         return NULL;
3108 }
3109
3110 static int
3111 bnxt_cfg_ntuple_filter(struct bnxt *bp,
3112                        struct rte_eth_ntuple_filter *nfilter,
3113                        enum rte_filter_op filter_op)
3114 {
3115         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
3116         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
3117         int ret;
3118
3119         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
3120                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3121                 return -EINVAL;
3122         }
3123
3124         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
3125                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
3126                 return -EINVAL;
3127         }
3128
3129         bfilter = bnxt_get_unused_filter(bp);
3130         if (bfilter == NULL) {
3131                 PMD_DRV_LOG(ERR,
3132                         "Not enough resources for a new filter.\n");
3133                 return -ENOMEM;
3134         }
3135         ret = parse_ntuple_filter(bp, nfilter, bfilter);
3136         if (ret < 0)
3137                 goto free_filter;
3138
3139         vnic = &bp->vnic_info[nfilter->queue];
3140         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3141         filter1 = STAILQ_FIRST(&vnic0->filter);
3142         if (filter1 == NULL) {
3143                 ret = -EINVAL;
3144                 goto free_filter;
3145         }
3146
3147         bfilter->dst_id = vnic->fw_vnic_id;
3148         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3149         bfilter->enables |=
3150                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3151         bfilter->ethertype = 0x800;
3152         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3153
3154         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3155
3156         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3157             bfilter->dst_id == mfilter->dst_id) {
3158                 PMD_DRV_LOG(ERR, "filter exists.\n");
3159                 ret = -EEXIST;
3160                 goto free_filter;
3161         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3162                    bfilter->dst_id != mfilter->dst_id) {
3163                 mfilter->dst_id = vnic->fw_vnic_id;
3164                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3165                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3166                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3167                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3168                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3169                 goto free_filter;
3170         }
3171         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3172                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3173                 ret = -ENOENT;
3174                 goto free_filter;
3175         }
3176
3177         if (filter_op == RTE_ETH_FILTER_ADD) {
3178                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3179                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3180                 if (ret)
3181                         goto free_filter;
3182                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3183         } else {
3184                 if (mfilter == NULL) {
3185                         /* This should not happen. But for Coverity! */
3186                         ret = -ENOENT;
3187                         goto free_filter;
3188                 }
3189                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3190
3191                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3192                 bnxt_free_filter(bp, mfilter);
3193                 bnxt_free_filter(bp, bfilter);
3194         }
3195
3196         return 0;
3197 free_filter:
3198         bnxt_free_filter(bp, bfilter);
3199         return ret;
3200 }
3201
3202 static int
3203 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3204                         enum rte_filter_op filter_op,
3205                         void *arg)
3206 {
3207         struct bnxt *bp = dev->data->dev_private;
3208         int ret;
3209
3210         if (filter_op == RTE_ETH_FILTER_NOP)
3211                 return 0;
3212
3213         if (arg == NULL) {
3214                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3215                             filter_op);
3216                 return -EINVAL;
3217         }
3218
3219         switch (filter_op) {
3220         case RTE_ETH_FILTER_ADD:
3221                 ret = bnxt_cfg_ntuple_filter(bp,
3222                         (struct rte_eth_ntuple_filter *)arg,
3223                         filter_op);
3224                 break;
3225         case RTE_ETH_FILTER_DELETE:
3226                 ret = bnxt_cfg_ntuple_filter(bp,
3227                         (struct rte_eth_ntuple_filter *)arg,
3228                         filter_op);
3229                 break;
3230         default:
3231                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3232                 ret = -EINVAL;
3233                 break;
3234         }
3235         return ret;
3236 }
3237
3238 static int
3239 bnxt_parse_fdir_filter(struct bnxt *bp,
3240                        struct rte_eth_fdir_filter *fdir,
3241                        struct bnxt_filter_info *filter)
3242 {
3243         enum rte_fdir_mode fdir_mode =
3244                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
3245         struct bnxt_vnic_info *vnic0, *vnic;
3246         struct bnxt_filter_info *filter1;
3247         uint32_t en = 0;
3248         int i;
3249
3250         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3251                 return -EINVAL;
3252
3253         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3254         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3255
3256         switch (fdir->input.flow_type) {
3257         case RTE_ETH_FLOW_IPV4:
3258         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3259                 /* FALLTHROUGH */
3260                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3261                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3262                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3263                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3264                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3265                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3266                 filter->ip_addr_type =
3267                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3268                 filter->src_ipaddr_mask[0] = 0xffffffff;
3269                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3270                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3271                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3272                 filter->ethertype = 0x800;
3273                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3274                 break;
3275         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3276                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3277                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3278                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3279                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3280                 filter->dst_port_mask = 0xffff;
3281                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3282                 filter->src_port_mask = 0xffff;
3283                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3284                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3285                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3286                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3287                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3288                 filter->ip_protocol = 6;
3289                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3290                 filter->ip_addr_type =
3291                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3292                 filter->src_ipaddr_mask[0] = 0xffffffff;
3293                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3294                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3295                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3296                 filter->ethertype = 0x800;
3297                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3298                 break;
3299         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3300                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
3301                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3302                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3303                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3304                 filter->dst_port_mask = 0xffff;
3305                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3306                 filter->src_port_mask = 0xffff;
3307                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3308                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3309                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3310                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3311                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3312                 filter->ip_protocol = 17;
3313                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3314                 filter->ip_addr_type =
3315                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3316                 filter->src_ipaddr_mask[0] = 0xffffffff;
3317                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3318                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3319                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3320                 filter->ethertype = 0x800;
3321                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3322                 break;
3323         case RTE_ETH_FLOW_IPV6:
3324         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3325                 /* FALLTHROUGH */
3326                 filter->ip_addr_type =
3327                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3328                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3329                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3330                 rte_memcpy(filter->src_ipaddr,
3331                            fdir->input.flow.ipv6_flow.src_ip, 16);
3332                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3333                 rte_memcpy(filter->dst_ipaddr,
3334                            fdir->input.flow.ipv6_flow.dst_ip, 16);
3335                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3336                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3337                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3338                 memset(filter->src_ipaddr_mask, 0xff, 16);
3339                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3340                 filter->ethertype = 0x86dd;
3341                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3342                 break;
3343         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3344                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3345                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3346                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3347                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3348                 filter->dst_port_mask = 0xffff;
3349                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3350                 filter->src_port_mask = 0xffff;
3351                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3352                 filter->ip_addr_type =
3353                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3354                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3355                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3356                 rte_memcpy(filter->src_ipaddr,
3357                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3358                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3359                 rte_memcpy(filter->dst_ipaddr,
3360                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3361                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3362                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3363                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3364                 memset(filter->src_ipaddr_mask, 0xff, 16);
3365                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3366                 filter->ethertype = 0x86dd;
3367                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3368                 break;
3369         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3370                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
3371                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3372                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3373                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3374                 filter->dst_port_mask = 0xffff;
3375                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3376                 filter->src_port_mask = 0xffff;
3377                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3378                 filter->ip_addr_type =
3379                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3380                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3381                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3382                 rte_memcpy(filter->src_ipaddr,
3383                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
3384                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3385                 rte_memcpy(filter->dst_ipaddr,
3386                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3387                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3388                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3389                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3390                 memset(filter->src_ipaddr_mask, 0xff, 16);
3391                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3392                 filter->ethertype = 0x86dd;
3393                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3394                 break;
3395         case RTE_ETH_FLOW_L2_PAYLOAD:
3396                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3397                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3398                 break;
3399         case RTE_ETH_FLOW_VXLAN:
3400                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3401                         return -EINVAL;
3402                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3403                 filter->tunnel_type =
3404                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3405                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3406                 break;
3407         case RTE_ETH_FLOW_NVGRE:
3408                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3409                         return -EINVAL;
3410                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3411                 filter->tunnel_type =
3412                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3413                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3414                 break;
3415         case RTE_ETH_FLOW_UNKNOWN:
3416         case RTE_ETH_FLOW_RAW:
3417         case RTE_ETH_FLOW_FRAG_IPV4:
3418         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3419         case RTE_ETH_FLOW_FRAG_IPV6:
3420         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3421         case RTE_ETH_FLOW_IPV6_EX:
3422         case RTE_ETH_FLOW_IPV6_TCP_EX:
3423         case RTE_ETH_FLOW_IPV6_UDP_EX:
3424         case RTE_ETH_FLOW_GENEVE:
3425                 /* FALLTHROUGH */
3426         default:
3427                 return -EINVAL;
3428         }
3429
3430         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3431         vnic = &bp->vnic_info[fdir->action.rx_queue];
3432         if (vnic == NULL) {
3433                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3434                 return -EINVAL;
3435         }
3436
3437         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3438                 rte_memcpy(filter->dst_macaddr,
3439                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3440                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3441         }
3442
3443         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3444                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3445                 filter1 = STAILQ_FIRST(&vnic0->filter);
3446                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3447         } else {
3448                 filter->dst_id = vnic->fw_vnic_id;
3449                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3450                         if (filter->dst_macaddr[i] == 0x00)
3451                                 filter1 = STAILQ_FIRST(&vnic0->filter);
3452                         else
3453                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3454         }
3455
3456         if (filter1 == NULL)
3457                 return -EINVAL;
3458
3459         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3460         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3461
3462         filter->enables = en;
3463
3464         return 0;
3465 }
3466
3467 static struct bnxt_filter_info *
3468 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3469                 struct bnxt_vnic_info **mvnic)
3470 {
3471         struct bnxt_filter_info *mf = NULL;
3472         int i;
3473
3474         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3475                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3476
3477                 STAILQ_FOREACH(mf, &vnic->filter, next) {
3478                         if (mf->filter_type == nf->filter_type &&
3479                             mf->flags == nf->flags &&
3480                             mf->src_port == nf->src_port &&
3481                             mf->src_port_mask == nf->src_port_mask &&
3482                             mf->dst_port == nf->dst_port &&
3483                             mf->dst_port_mask == nf->dst_port_mask &&
3484                             mf->ip_protocol == nf->ip_protocol &&
3485                             mf->ip_addr_type == nf->ip_addr_type &&
3486                             mf->ethertype == nf->ethertype &&
3487                             mf->vni == nf->vni &&
3488                             mf->tunnel_type == nf->tunnel_type &&
3489                             mf->l2_ovlan == nf->l2_ovlan &&
3490                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3491                             mf->l2_ivlan == nf->l2_ivlan &&
3492                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3493                             !memcmp(mf->l2_addr, nf->l2_addr,
3494                                     RTE_ETHER_ADDR_LEN) &&
3495                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3496                                     RTE_ETHER_ADDR_LEN) &&
3497                             !memcmp(mf->src_macaddr, nf->src_macaddr,
3498                                     RTE_ETHER_ADDR_LEN) &&
3499                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3500                                     RTE_ETHER_ADDR_LEN) &&
3501                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3502                                     sizeof(nf->src_ipaddr)) &&
3503                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3504                                     sizeof(nf->src_ipaddr_mask)) &&
3505                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3506                                     sizeof(nf->dst_ipaddr)) &&
3507                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3508                                     sizeof(nf->dst_ipaddr_mask))) {
3509                                 if (mvnic)
3510                                         *mvnic = vnic;
3511                                 return mf;
3512                         }
3513                 }
3514         }
3515         return NULL;
3516 }
3517
3518 static int
3519 bnxt_fdir_filter(struct rte_eth_dev *dev,
3520                  enum rte_filter_op filter_op,
3521                  void *arg)
3522 {
3523         struct bnxt *bp = dev->data->dev_private;
3524         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3525         struct bnxt_filter_info *filter, *match;
3526         struct bnxt_vnic_info *vnic, *mvnic;
3527         int ret = 0, i;
3528
3529         if (filter_op == RTE_ETH_FILTER_NOP)
3530                 return 0;
3531
3532         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3533                 return -EINVAL;
3534
3535         switch (filter_op) {
3536         case RTE_ETH_FILTER_ADD:
3537         case RTE_ETH_FILTER_DELETE:
3538                 /* FALLTHROUGH */
3539                 filter = bnxt_get_unused_filter(bp);
3540                 if (filter == NULL) {
3541                         PMD_DRV_LOG(ERR,
3542                                 "Not enough resources for a new flow.\n");
3543                         return -ENOMEM;
3544                 }
3545
3546                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3547                 if (ret != 0)
3548                         goto free_filter;
3549                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3550
3551                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3552                         vnic = &bp->vnic_info[0];
3553                 else
3554                         vnic = &bp->vnic_info[fdir->action.rx_queue];
3555
3556                 match = bnxt_match_fdir(bp, filter, &mvnic);
3557                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3558                         if (match->dst_id == vnic->fw_vnic_id) {
3559                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
3560                                 ret = -EEXIST;
3561                                 goto free_filter;
3562                         } else {
3563                                 match->dst_id = vnic->fw_vnic_id;
3564                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
3565                                                                   match->dst_id,
3566                                                                   match);
3567                                 STAILQ_REMOVE(&mvnic->filter, match,
3568                                               bnxt_filter_info, next);
3569                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3570                                 PMD_DRV_LOG(ERR,
3571                                         "Filter with matching pattern exist\n");
3572                                 PMD_DRV_LOG(ERR,
3573                                         "Updated it to new destination q\n");
3574                                 goto free_filter;
3575                         }
3576                 }
3577                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3578                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3579                         ret = -ENOENT;
3580                         goto free_filter;
3581                 }
3582
3583                 if (filter_op == RTE_ETH_FILTER_ADD) {
3584                         ret = bnxt_hwrm_set_ntuple_filter(bp,
3585                                                           filter->dst_id,
3586                                                           filter);
3587                         if (ret)
3588                                 goto free_filter;
3589                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3590                 } else {
3591                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3592                         STAILQ_REMOVE(&vnic->filter, match,
3593                                       bnxt_filter_info, next);
3594                         bnxt_free_filter(bp, match);
3595                         bnxt_free_filter(bp, filter);
3596                 }
3597                 break;
3598         case RTE_ETH_FILTER_FLUSH:
3599                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
3600                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3601
3602                         STAILQ_FOREACH(filter, &vnic->filter, next) {
3603                                 if (filter->filter_type ==
3604                                     HWRM_CFA_NTUPLE_FILTER) {
3605                                         ret =
3606                                         bnxt_hwrm_clear_ntuple_filter(bp,
3607                                                                       filter);
3608                                         STAILQ_REMOVE(&vnic->filter, filter,
3609                                                       bnxt_filter_info, next);
3610                                 }
3611                         }
3612                 }
3613                 return ret;
3614         case RTE_ETH_FILTER_UPDATE:
3615         case RTE_ETH_FILTER_STATS:
3616         case RTE_ETH_FILTER_INFO:
3617                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3618                 break;
3619         default:
3620                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3621                 ret = -EINVAL;
3622                 break;
3623         }
3624         return ret;
3625
3626 free_filter:
3627         bnxt_free_filter(bp, filter);
3628         return ret;
3629 }
3630
3631 static int
3632 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3633                     enum rte_filter_type filter_type,
3634                     enum rte_filter_op filter_op, void *arg)
3635 {
3636         struct bnxt *bp = dev->data->dev_private;
3637         int ret = 0;
3638
3639         ret = is_bnxt_in_error(dev->data->dev_private);
3640         if (ret)
3641                 return ret;
3642
3643         switch (filter_type) {
3644         case RTE_ETH_FILTER_TUNNEL:
3645                 PMD_DRV_LOG(ERR,
3646                         "filter type: %d: To be implemented\n", filter_type);
3647                 break;
3648         case RTE_ETH_FILTER_FDIR:
3649                 ret = bnxt_fdir_filter(dev, filter_op, arg);
3650                 break;
3651         case RTE_ETH_FILTER_NTUPLE:
3652                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
3653                 break;
3654         case RTE_ETH_FILTER_ETHERTYPE:
3655                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
3656                 break;
3657         case RTE_ETH_FILTER_GENERIC:
3658                 if (filter_op != RTE_ETH_FILTER_GET)
3659                         return -EINVAL;
3660                 if (BNXT_TRUFLOW_EN(bp))
3661                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3662                 else
3663                         *(const void **)arg = &bnxt_flow_ops;
3664                 break;
3665         default:
3666                 PMD_DRV_LOG(ERR,
3667                         "Filter type (%d) not supported", filter_type);
3668                 ret = -EINVAL;
3669                 break;
3670         }
3671         return ret;
3672 }
3673
3674 static const uint32_t *
3675 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3676 {
3677         static const uint32_t ptypes[] = {
3678                 RTE_PTYPE_L2_ETHER_VLAN,
3679                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3680                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3681                 RTE_PTYPE_L4_ICMP,
3682                 RTE_PTYPE_L4_TCP,
3683                 RTE_PTYPE_L4_UDP,
3684                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3685                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3686                 RTE_PTYPE_INNER_L4_ICMP,
3687                 RTE_PTYPE_INNER_L4_TCP,
3688                 RTE_PTYPE_INNER_L4_UDP,
3689                 RTE_PTYPE_UNKNOWN
3690         };
3691
3692         if (!dev->rx_pkt_burst)
3693                 return NULL;
3694
3695         return ptypes;
3696 }
3697
3698 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3699                          int reg_win)
3700 {
3701         uint32_t reg_base = *reg_arr & 0xfffff000;
3702         uint32_t win_off;
3703         int i;
3704
3705         for (i = 0; i < count; i++) {
3706                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3707                         return -ERANGE;
3708         }
3709         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3710         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3711         return 0;
3712 }
3713
3714 static int bnxt_map_ptp_regs(struct bnxt *bp)
3715 {
3716         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3717         uint32_t *reg_arr;
3718         int rc, i;
3719
3720         reg_arr = ptp->rx_regs;
3721         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3722         if (rc)
3723                 return rc;
3724
3725         reg_arr = ptp->tx_regs;
3726         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3727         if (rc)
3728                 return rc;
3729
3730         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3731                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3732
3733         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3734                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3735
3736         return 0;
3737 }
3738
3739 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3740 {
3741         rte_write32(0, (uint8_t *)bp->bar0 +
3742                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3743         rte_write32(0, (uint8_t *)bp->bar0 +
3744                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3745 }
3746
3747 static uint64_t bnxt_cc_read(struct bnxt *bp)
3748 {
3749         uint64_t ns;
3750
3751         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3752                               BNXT_GRCPF_REG_SYNC_TIME));
3753         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3754                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3755         return ns;
3756 }
3757
3758 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3759 {
3760         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3761         uint32_t fifo;
3762
3763         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3764                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3765         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3766                 return -EAGAIN;
3767
3768         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3769                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3770         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3771                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3772         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3773                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3774
3775         return 0;
3776 }
3777
3778 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3779 {
3780         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3781         struct bnxt_pf_info *pf = bp->pf;
3782         uint16_t port_id;
3783         uint32_t fifo;
3784
3785         if (!ptp)
3786                 return -ENODEV;
3787
3788         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3789                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3790         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3791                 return -EAGAIN;
3792
3793         port_id = pf->port_id;
3794         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3795                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3796
3797         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3798                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3799         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3800 /*              bnxt_clr_rx_ts(bp);       TBD  */
3801                 return -EBUSY;
3802         }
3803
3804         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3805                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3806         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3807                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3808
3809         return 0;
3810 }
3811
3812 static int
3813 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3814 {
3815         uint64_t ns;
3816         struct bnxt *bp = dev->data->dev_private;
3817         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3818
3819         if (!ptp)
3820                 return 0;
3821
3822         ns = rte_timespec_to_ns(ts);
3823         /* Set the timecounters to a new value. */
3824         ptp->tc.nsec = ns;
3825
3826         return 0;
3827 }
3828
3829 static int
3830 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3831 {
3832         struct bnxt *bp = dev->data->dev_private;
3833         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3834         uint64_t ns, systime_cycles = 0;
3835         int rc = 0;
3836
3837         if (!ptp)
3838                 return 0;
3839
3840         if (BNXT_CHIP_THOR(bp))
3841                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3842                                              &systime_cycles);
3843         else
3844                 systime_cycles = bnxt_cc_read(bp);
3845
3846         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3847         *ts = rte_ns_to_timespec(ns);
3848
3849         return rc;
3850 }
3851 static int
3852 bnxt_timesync_enable(struct rte_eth_dev *dev)
3853 {
3854         struct bnxt *bp = dev->data->dev_private;
3855         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3856         uint32_t shift = 0;
3857         int rc;
3858
3859         if (!ptp)
3860                 return 0;
3861
3862         ptp->rx_filter = 1;
3863         ptp->tx_tstamp_en = 1;
3864         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3865
3866         rc = bnxt_hwrm_ptp_cfg(bp);
3867         if (rc)
3868                 return rc;
3869
3870         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3871         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3872         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3873
3874         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3875         ptp->tc.cc_shift = shift;
3876         ptp->tc.nsec_mask = (1ULL << shift) - 1;
3877
3878         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3879         ptp->rx_tstamp_tc.cc_shift = shift;
3880         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3881
3882         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3883         ptp->tx_tstamp_tc.cc_shift = shift;
3884         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3885
3886         if (!BNXT_CHIP_THOR(bp))
3887                 bnxt_map_ptp_regs(bp);
3888
3889         return 0;
3890 }
3891
3892 static int
3893 bnxt_timesync_disable(struct rte_eth_dev *dev)
3894 {
3895         struct bnxt *bp = dev->data->dev_private;
3896         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3897
3898         if (!ptp)
3899                 return 0;
3900
3901         ptp->rx_filter = 0;
3902         ptp->tx_tstamp_en = 0;
3903         ptp->rxctl = 0;
3904
3905         bnxt_hwrm_ptp_cfg(bp);
3906
3907         if (!BNXT_CHIP_THOR(bp))
3908                 bnxt_unmap_ptp_regs(bp);
3909
3910         return 0;
3911 }
3912
3913 static int
3914 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3915                                  struct timespec *timestamp,
3916                                  uint32_t flags __rte_unused)
3917 {
3918         struct bnxt *bp = dev->data->dev_private;
3919         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3920         uint64_t rx_tstamp_cycles = 0;
3921         uint64_t ns;
3922
3923         if (!ptp)
3924                 return 0;
3925
3926         if (BNXT_CHIP_THOR(bp))
3927                 rx_tstamp_cycles = ptp->rx_timestamp;
3928         else
3929                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3930
3931         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3932         *timestamp = rte_ns_to_timespec(ns);
3933         return  0;
3934 }
3935
3936 static int
3937 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3938                                  struct timespec *timestamp)
3939 {
3940         struct bnxt *bp = dev->data->dev_private;
3941         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3942         uint64_t tx_tstamp_cycles = 0;
3943         uint64_t ns;
3944         int rc = 0;
3945
3946         if (!ptp)
3947                 return 0;
3948
3949         if (BNXT_CHIP_THOR(bp))
3950                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3951                                              &tx_tstamp_cycles);
3952         else
3953                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3954
3955         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3956         *timestamp = rte_ns_to_timespec(ns);
3957
3958         return rc;
3959 }
3960
3961 static int
3962 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3963 {
3964         struct bnxt *bp = dev->data->dev_private;
3965         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3966
3967         if (!ptp)
3968                 return 0;
3969
3970         ptp->tc.nsec += delta;
3971
3972         return 0;
3973 }
3974
3975 static int
3976 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3977 {
3978         struct bnxt *bp = dev->data->dev_private;
3979         int rc;
3980         uint32_t dir_entries;
3981         uint32_t entry_length;
3982
3983         rc = is_bnxt_in_error(bp);
3984         if (rc)
3985                 return rc;
3986
3987         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3988                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3989                     bp->pdev->addr.devid, bp->pdev->addr.function);
3990
3991         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3992         if (rc != 0)
3993                 return rc;
3994
3995         return dir_entries * entry_length;
3996 }
3997
3998 static int
3999 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
4000                 struct rte_dev_eeprom_info *in_eeprom)
4001 {
4002         struct bnxt *bp = dev->data->dev_private;
4003         uint32_t index;
4004         uint32_t offset;
4005         int rc;
4006
4007         rc = is_bnxt_in_error(bp);
4008         if (rc)
4009                 return rc;
4010
4011         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4012                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4013                     bp->pdev->addr.devid, bp->pdev->addr.function,
4014                     in_eeprom->offset, in_eeprom->length);
4015
4016         if (in_eeprom->offset == 0) /* special offset value to get directory */
4017                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
4018                                                 in_eeprom->data);
4019
4020         index = in_eeprom->offset >> 24;
4021         offset = in_eeprom->offset & 0xffffff;
4022
4023         if (index != 0)
4024                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
4025                                            in_eeprom->length, in_eeprom->data);
4026
4027         return 0;
4028 }
4029
4030 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
4031 {
4032         switch (dir_type) {
4033         case BNX_DIR_TYPE_CHIMP_PATCH:
4034         case BNX_DIR_TYPE_BOOTCODE:
4035         case BNX_DIR_TYPE_BOOTCODE_2:
4036         case BNX_DIR_TYPE_APE_FW:
4037         case BNX_DIR_TYPE_APE_PATCH:
4038         case BNX_DIR_TYPE_KONG_FW:
4039         case BNX_DIR_TYPE_KONG_PATCH:
4040         case BNX_DIR_TYPE_BONO_FW:
4041         case BNX_DIR_TYPE_BONO_PATCH:
4042                 /* FALLTHROUGH */
4043                 return true;
4044         }
4045
4046         return false;
4047 }
4048
4049 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
4050 {
4051         switch (dir_type) {
4052         case BNX_DIR_TYPE_AVS:
4053         case BNX_DIR_TYPE_EXP_ROM_MBA:
4054         case BNX_DIR_TYPE_PCIE:
4055         case BNX_DIR_TYPE_TSCF_UCODE:
4056         case BNX_DIR_TYPE_EXT_PHY:
4057         case BNX_DIR_TYPE_CCM:
4058         case BNX_DIR_TYPE_ISCSI_BOOT:
4059         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
4060         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
4061                 /* FALLTHROUGH */
4062                 return true;
4063         }
4064
4065         return false;
4066 }
4067
4068 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
4069 {
4070         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
4071                 bnxt_dir_type_is_other_exec_format(dir_type);
4072 }
4073
4074 static int
4075 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
4076                 struct rte_dev_eeprom_info *in_eeprom)
4077 {
4078         struct bnxt *bp = dev->data->dev_private;
4079         uint8_t index, dir_op;
4080         uint16_t type, ext, ordinal, attr;
4081         int rc;
4082
4083         rc = is_bnxt_in_error(bp);
4084         if (rc)
4085                 return rc;
4086
4087         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4088                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4089                     bp->pdev->addr.devid, bp->pdev->addr.function,
4090                     in_eeprom->offset, in_eeprom->length);
4091
4092         if (!BNXT_PF(bp)) {
4093                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
4094                 return -EINVAL;
4095         }
4096
4097         type = in_eeprom->magic >> 16;
4098
4099         if (type == 0xffff) { /* special value for directory operations */
4100                 index = in_eeprom->magic & 0xff;
4101                 dir_op = in_eeprom->magic >> 8;
4102                 if (index == 0)
4103                         return -EINVAL;
4104                 switch (dir_op) {
4105                 case 0x0e: /* erase */
4106                         if (in_eeprom->offset != ~in_eeprom->magic)
4107                                 return -EINVAL;
4108                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
4109                 default:
4110                         return -EINVAL;
4111                 }
4112         }
4113
4114         /* Create or re-write an NVM item: */
4115         if (bnxt_dir_type_is_executable(type) == true)
4116                 return -EOPNOTSUPP;
4117         ext = in_eeprom->magic & 0xffff;
4118         ordinal = in_eeprom->offset >> 16;
4119         attr = in_eeprom->offset & 0xffff;
4120
4121         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
4122                                      in_eeprom->data, in_eeprom->length);
4123 }
4124
4125 /*
4126  * Initialization
4127  */
4128
4129 static const struct eth_dev_ops bnxt_dev_ops = {
4130         .dev_infos_get = bnxt_dev_info_get_op,
4131         .dev_close = bnxt_dev_close_op,
4132         .dev_configure = bnxt_dev_configure_op,
4133         .dev_start = bnxt_dev_start_op,
4134         .dev_stop = bnxt_dev_stop_op,
4135         .dev_set_link_up = bnxt_dev_set_link_up_op,
4136         .dev_set_link_down = bnxt_dev_set_link_down_op,
4137         .stats_get = bnxt_stats_get_op,
4138         .stats_reset = bnxt_stats_reset_op,
4139         .rx_queue_setup = bnxt_rx_queue_setup_op,
4140         .rx_queue_release = bnxt_rx_queue_release_op,
4141         .tx_queue_setup = bnxt_tx_queue_setup_op,
4142         .tx_queue_release = bnxt_tx_queue_release_op,
4143         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4144         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4145         .reta_update = bnxt_reta_update_op,
4146         .reta_query = bnxt_reta_query_op,
4147         .rss_hash_update = bnxt_rss_hash_update_op,
4148         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4149         .link_update = bnxt_link_update_op,
4150         .promiscuous_enable = bnxt_promiscuous_enable_op,
4151         .promiscuous_disable = bnxt_promiscuous_disable_op,
4152         .allmulticast_enable = bnxt_allmulticast_enable_op,
4153         .allmulticast_disable = bnxt_allmulticast_disable_op,
4154         .mac_addr_add = bnxt_mac_addr_add_op,
4155         .mac_addr_remove = bnxt_mac_addr_remove_op,
4156         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
4157         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
4158         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4159         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4160         .vlan_filter_set = bnxt_vlan_filter_set_op,
4161         .vlan_offload_set = bnxt_vlan_offload_set_op,
4162         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
4163         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
4164         .mtu_set = bnxt_mtu_set_op,
4165         .mac_addr_set = bnxt_set_default_mac_addr_op,
4166         .xstats_get = bnxt_dev_xstats_get_op,
4167         .xstats_get_names = bnxt_dev_xstats_get_names_op,
4168         .xstats_reset = bnxt_dev_xstats_reset_op,
4169         .fw_version_get = bnxt_fw_version_get,
4170         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4171         .rxq_info_get = bnxt_rxq_info_get_op,
4172         .txq_info_get = bnxt_txq_info_get_op,
4173         .dev_led_on = bnxt_dev_led_on_op,
4174         .dev_led_off = bnxt_dev_led_off_op,
4175         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4176         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4177         .rx_queue_count = bnxt_rx_queue_count_op,
4178         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
4179         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
4180         .rx_queue_start = bnxt_rx_queue_start,
4181         .rx_queue_stop = bnxt_rx_queue_stop,
4182         .tx_queue_start = bnxt_tx_queue_start,
4183         .tx_queue_stop = bnxt_tx_queue_stop,
4184         .filter_ctrl = bnxt_filter_ctrl_op,
4185         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4186         .get_eeprom_length    = bnxt_get_eeprom_length_op,
4187         .get_eeprom           = bnxt_get_eeprom_op,
4188         .set_eeprom           = bnxt_set_eeprom_op,
4189         .timesync_enable      = bnxt_timesync_enable,
4190         .timesync_disable     = bnxt_timesync_disable,
4191         .timesync_read_time   = bnxt_timesync_read_time,
4192         .timesync_write_time   = bnxt_timesync_write_time,
4193         .timesync_adjust_time = bnxt_timesync_adjust_time,
4194         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4195         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4196 };
4197
4198 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4199 {
4200         uint32_t offset;
4201
4202         /* Only pre-map the reset GRC registers using window 3 */
4203         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4204                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4205
4206         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4207
4208         return offset;
4209 }
4210
4211 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4212 {
4213         struct bnxt_error_recovery_info *info = bp->recovery_info;
4214         uint32_t reg_base = 0xffffffff;
4215         int i;
4216
4217         /* Only pre-map the monitoring GRC registers using window 2 */
4218         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4219                 uint32_t reg = info->status_regs[i];
4220
4221                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4222                         continue;
4223
4224                 if (reg_base == 0xffffffff)
4225                         reg_base = reg & 0xfffff000;
4226                 if ((reg & 0xfffff000) != reg_base)
4227                         return -ERANGE;
4228
4229                 /* Use mask 0xffc as the Lower 2 bits indicates
4230                  * address space location
4231                  */
4232                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4233                                                 (reg & 0xffc);
4234         }
4235
4236         if (reg_base == 0xffffffff)
4237                 return 0;
4238
4239         rte_write32(reg_base, (uint8_t *)bp->bar0 +
4240                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4241
4242         return 0;
4243 }
4244
4245 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4246 {
4247         struct bnxt_error_recovery_info *info = bp->recovery_info;
4248         uint32_t delay = info->delay_after_reset[index];
4249         uint32_t val = info->reset_reg_val[index];
4250         uint32_t reg = info->reset_reg[index];
4251         uint32_t type, offset;
4252
4253         type = BNXT_FW_STATUS_REG_TYPE(reg);
4254         offset = BNXT_FW_STATUS_REG_OFF(reg);
4255
4256         switch (type) {
4257         case BNXT_FW_STATUS_REG_TYPE_CFG:
4258                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4259                 break;
4260         case BNXT_FW_STATUS_REG_TYPE_GRC:
4261                 offset = bnxt_map_reset_regs(bp, offset);
4262                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4263                 break;
4264         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4265                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4266                 break;
4267         }
4268         /* wait on a specific interval of time until core reset is complete */
4269         if (delay)
4270                 rte_delay_ms(delay);
4271 }
4272
4273 static void bnxt_dev_cleanup(struct bnxt *bp)
4274 {
4275         bnxt_set_hwrm_link_config(bp, false);
4276         bp->link_info->link_up = 0;
4277         if (bp->eth_dev->data->dev_started)
4278                 bnxt_dev_stop_op(bp->eth_dev);
4279
4280         bnxt_uninit_resources(bp, true);
4281 }
4282
4283 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4284 {
4285         struct rte_eth_dev *dev = bp->eth_dev;
4286         struct rte_vlan_filter_conf *vfc;
4287         int vidx, vbit, rc;
4288         uint16_t vlan_id;
4289
4290         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4291                 vfc = &dev->data->vlan_filter_conf;
4292                 vidx = vlan_id / 64;
4293                 vbit = vlan_id % 64;
4294
4295                 /* Each bit corresponds to a VLAN id */
4296                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4297                         rc = bnxt_add_vlan_filter(bp, vlan_id);
4298                         if (rc)
4299                                 return rc;
4300                 }
4301         }
4302
4303         return 0;
4304 }
4305
4306 static int bnxt_restore_mac_filters(struct bnxt *bp)
4307 {
4308         struct rte_eth_dev *dev = bp->eth_dev;
4309         struct rte_eth_dev_info dev_info;
4310         struct rte_ether_addr *addr;
4311         uint64_t pool_mask;
4312         uint32_t pool = 0;
4313         uint16_t i;
4314         int rc;
4315
4316         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
4317                 return 0;
4318
4319         rc = bnxt_dev_info_get_op(dev, &dev_info);
4320         if (rc)
4321                 return rc;
4322
4323         /* replay MAC address configuration */
4324         for (i = 1; i < dev_info.max_mac_addrs; i++) {
4325                 addr = &dev->data->mac_addrs[i];
4326
4327                 /* skip zero address */
4328                 if (rte_is_zero_ether_addr(addr))
4329                         continue;
4330
4331                 pool = 0;
4332                 pool_mask = dev->data->mac_pool_sel[i];
4333
4334                 do {
4335                         if (pool_mask & 1ULL) {
4336                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4337                                 if (rc)
4338                                         return rc;
4339                         }
4340                         pool_mask >>= 1;
4341                         pool++;
4342                 } while (pool_mask);
4343         }
4344
4345         return 0;
4346 }
4347
4348 static int bnxt_restore_filters(struct bnxt *bp)
4349 {
4350         struct rte_eth_dev *dev = bp->eth_dev;
4351         int ret = 0;
4352
4353         if (dev->data->all_multicast) {
4354                 ret = bnxt_allmulticast_enable_op(dev);
4355                 if (ret)
4356                         return ret;
4357         }
4358         if (dev->data->promiscuous) {
4359                 ret = bnxt_promiscuous_enable_op(dev);
4360                 if (ret)
4361                         return ret;
4362         }
4363
4364         ret = bnxt_restore_mac_filters(bp);
4365         if (ret)
4366                 return ret;
4367
4368         ret = bnxt_restore_vlan_filters(bp);
4369         /* TODO restore other filters as well */
4370         return ret;
4371 }
4372
4373 static void bnxt_dev_recover(void *arg)
4374 {
4375         struct bnxt *bp = arg;
4376         int timeout = bp->fw_reset_max_msecs;
4377         int rc = 0;
4378
4379         /* Clear Error flag so that device re-init should happen */
4380         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4381
4382         do {
4383                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4384                 if (rc == 0)
4385                         break;
4386                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4387                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4388         } while (rc && timeout);
4389
4390         if (rc) {
4391                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4392                 goto err;
4393         }
4394
4395         rc = bnxt_init_resources(bp, true);
4396         if (rc) {
4397                 PMD_DRV_LOG(ERR,
4398                             "Failed to initialize resources after reset\n");
4399                 goto err;
4400         }
4401         /* clear reset flag as the device is initialized now */
4402         bp->flags &= ~BNXT_FLAG_FW_RESET;
4403
4404         rc = bnxt_dev_start_op(bp->eth_dev);
4405         if (rc) {
4406                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4407                 goto err_start;
4408         }
4409
4410         rc = bnxt_restore_filters(bp);
4411         if (rc)
4412                 goto err_start;
4413
4414         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4415         return;
4416 err_start:
4417         bnxt_dev_stop_op(bp->eth_dev);
4418 err:
4419         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4420         bnxt_uninit_resources(bp, false);
4421         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4422 }
4423
4424 void bnxt_dev_reset_and_resume(void *arg)
4425 {
4426         struct bnxt *bp = arg;
4427         int rc;
4428
4429         bnxt_dev_cleanup(bp);
4430
4431         bnxt_wait_for_device_shutdown(bp);
4432
4433         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4434                                bnxt_dev_recover, (void *)bp);
4435         if (rc)
4436                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4437 }
4438
4439 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4440 {
4441         struct bnxt_error_recovery_info *info = bp->recovery_info;
4442         uint32_t reg = info->status_regs[index];
4443         uint32_t type, offset, val = 0;
4444
4445         type = BNXT_FW_STATUS_REG_TYPE(reg);
4446         offset = BNXT_FW_STATUS_REG_OFF(reg);
4447
4448         switch (type) {
4449         case BNXT_FW_STATUS_REG_TYPE_CFG:
4450                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4451                 break;
4452         case BNXT_FW_STATUS_REG_TYPE_GRC:
4453                 offset = info->mapped_status_regs[index];
4454                 /* FALLTHROUGH */
4455         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4456                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4457                                        offset));
4458                 break;
4459         }
4460
4461         return val;
4462 }
4463
4464 static int bnxt_fw_reset_all(struct bnxt *bp)
4465 {
4466         struct bnxt_error_recovery_info *info = bp->recovery_info;
4467         uint32_t i;
4468         int rc = 0;
4469
4470         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4471                 /* Reset through master function driver */
4472                 for (i = 0; i < info->reg_array_cnt; i++)
4473                         bnxt_write_fw_reset_reg(bp, i);
4474                 /* Wait for time specified by FW after triggering reset */
4475                 rte_delay_ms(info->master_func_wait_period_after_reset);
4476         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4477                 /* Reset with the help of Kong processor */
4478                 rc = bnxt_hwrm_fw_reset(bp);
4479                 if (rc)
4480                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4481         }
4482
4483         return rc;
4484 }
4485
4486 static void bnxt_fw_reset_cb(void *arg)
4487 {
4488         struct bnxt *bp = arg;
4489         struct bnxt_error_recovery_info *info = bp->recovery_info;
4490         int rc = 0;
4491
4492         /* Only Master function can do FW reset */
4493         if (bnxt_is_master_func(bp) &&
4494             bnxt_is_recovery_enabled(bp)) {
4495                 rc = bnxt_fw_reset_all(bp);
4496                 if (rc) {
4497                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4498                         return;
4499                 }
4500         }
4501
4502         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4503          * EXCEPTION_FATAL_ASYNC event to all the functions
4504          * (including MASTER FUNC). After receiving this Async, all the active
4505          * drivers should treat this case as FW initiated recovery
4506          */
4507         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4508                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4509                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4510
4511                 /* To recover from error */
4512                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4513                                   (void *)bp);
4514         }
4515 }
4516
4517 /* Driver should poll FW heartbeat, reset_counter with the frequency
4518  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4519  * When the driver detects heartbeat stop or change in reset_counter,
4520  * it has to trigger a reset to recover from the error condition.
4521  * A “master PF” is the function who will have the privilege to
4522  * initiate the chimp reset. The master PF will be elected by the
4523  * firmware and will be notified through async message.
4524  */
4525 static void bnxt_check_fw_health(void *arg)
4526 {
4527         struct bnxt *bp = arg;
4528         struct bnxt_error_recovery_info *info = bp->recovery_info;
4529         uint32_t val = 0, wait_msec;
4530
4531         if (!info || !bnxt_is_recovery_enabled(bp) ||
4532             is_bnxt_in_error(bp))
4533                 return;
4534
4535         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4536         if (val == info->last_heart_beat)
4537                 goto reset;
4538
4539         info->last_heart_beat = val;
4540
4541         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4542         if (val != info->last_reset_counter)
4543                 goto reset;
4544
4545         info->last_reset_counter = val;
4546
4547         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4548                           bnxt_check_fw_health, (void *)bp);
4549
4550         return;
4551 reset:
4552         /* Stop DMA to/from device */
4553         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4554         bp->flags |= BNXT_FLAG_FW_RESET;
4555
4556         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4557
4558         if (bnxt_is_master_func(bp))
4559                 wait_msec = info->master_func_wait_period;
4560         else
4561                 wait_msec = info->normal_func_wait_period;
4562
4563         rte_eal_alarm_set(US_PER_MS * wait_msec,
4564                           bnxt_fw_reset_cb, (void *)bp);
4565 }
4566
4567 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4568 {
4569         uint32_t polling_freq;
4570
4571         if (!bnxt_is_recovery_enabled(bp))
4572                 return;
4573
4574         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4575                 return;
4576
4577         polling_freq = bp->recovery_info->driver_polling_freq;
4578
4579         rte_eal_alarm_set(US_PER_MS * polling_freq,
4580                           bnxt_check_fw_health, (void *)bp);
4581         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4582 }
4583
4584 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4585 {
4586         if (!bnxt_is_recovery_enabled(bp))
4587                 return;
4588
4589         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4590         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4591 }
4592
4593 static bool bnxt_vf_pciid(uint16_t device_id)
4594 {
4595         switch (device_id) {
4596         case BROADCOM_DEV_ID_57304_VF:
4597         case BROADCOM_DEV_ID_57406_VF:
4598         case BROADCOM_DEV_ID_5731X_VF:
4599         case BROADCOM_DEV_ID_5741X_VF:
4600         case BROADCOM_DEV_ID_57414_VF:
4601         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4602         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4603         case BROADCOM_DEV_ID_58802_VF:
4604         case BROADCOM_DEV_ID_57500_VF1:
4605         case BROADCOM_DEV_ID_57500_VF2:
4606                 /* FALLTHROUGH */
4607                 return true;
4608         default:
4609                 return false;
4610         }
4611 }
4612
4613 static bool bnxt_thor_device(uint16_t device_id)
4614 {
4615         switch (device_id) {
4616         case BROADCOM_DEV_ID_57508:
4617         case BROADCOM_DEV_ID_57504:
4618         case BROADCOM_DEV_ID_57502:
4619         case BROADCOM_DEV_ID_57508_MF1:
4620         case BROADCOM_DEV_ID_57504_MF1:
4621         case BROADCOM_DEV_ID_57502_MF1:
4622         case BROADCOM_DEV_ID_57508_MF2:
4623         case BROADCOM_DEV_ID_57504_MF2:
4624         case BROADCOM_DEV_ID_57502_MF2:
4625         case BROADCOM_DEV_ID_57500_VF1:
4626         case BROADCOM_DEV_ID_57500_VF2:
4627                 /* FALLTHROUGH */
4628                 return true;
4629         default:
4630                 return false;
4631         }
4632 }
4633
4634 bool bnxt_stratus_device(struct bnxt *bp)
4635 {
4636         uint16_t device_id = bp->pdev->id.device_id;
4637
4638         switch (device_id) {
4639         case BROADCOM_DEV_ID_STRATUS_NIC:
4640         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4641         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4642                 /* FALLTHROUGH */
4643                 return true;
4644         default:
4645                 return false;
4646         }
4647 }
4648
4649 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4650 {
4651         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4652         struct bnxt *bp = eth_dev->data->dev_private;
4653
4654         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4655         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4656         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4657         if (!bp->bar0 || !bp->doorbell_base) {
4658                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4659                 return -ENODEV;
4660         }
4661
4662         bp->eth_dev = eth_dev;
4663         bp->pdev = pci_dev;
4664
4665         return 0;
4666 }
4667
4668 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4669                                   struct bnxt_ctx_pg_info *ctx_pg,
4670                                   uint32_t mem_size,
4671                                   const char *suffix,
4672                                   uint16_t idx)
4673 {
4674         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4675         const struct rte_memzone *mz = NULL;
4676         char mz_name[RTE_MEMZONE_NAMESIZE];
4677         rte_iova_t mz_phys_addr;
4678         uint64_t valid_bits = 0;
4679         uint32_t sz;
4680         int i;
4681
4682         if (!mem_size)
4683                 return 0;
4684
4685         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4686                          BNXT_PAGE_SIZE;
4687         rmem->page_size = BNXT_PAGE_SIZE;
4688         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4689         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4690         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4691
4692         valid_bits = PTU_PTE_VALID;
4693
4694         if (rmem->nr_pages > 1) {
4695                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4696                          "bnxt_ctx_pg_tbl%s_%x_%d",
4697                          suffix, idx, bp->eth_dev->data->port_id);
4698                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4699                 mz = rte_memzone_lookup(mz_name);
4700                 if (!mz) {
4701                         mz = rte_memzone_reserve_aligned(mz_name,
4702                                                 rmem->nr_pages * 8,
4703                                                 SOCKET_ID_ANY,
4704                                                 RTE_MEMZONE_2MB |
4705                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4706                                                 RTE_MEMZONE_IOVA_CONTIG,
4707                                                 BNXT_PAGE_SIZE);
4708                         if (mz == NULL)
4709                                 return -ENOMEM;
4710                 }
4711
4712                 memset(mz->addr, 0, mz->len);
4713                 mz_phys_addr = mz->iova;
4714
4715                 rmem->pg_tbl = mz->addr;
4716                 rmem->pg_tbl_map = mz_phys_addr;
4717                 rmem->pg_tbl_mz = mz;
4718         }
4719
4720         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4721                  suffix, idx, bp->eth_dev->data->port_id);
4722         mz = rte_memzone_lookup(mz_name);
4723         if (!mz) {
4724                 mz = rte_memzone_reserve_aligned(mz_name,
4725                                                  mem_size,
4726                                                  SOCKET_ID_ANY,
4727                                                  RTE_MEMZONE_1GB |
4728                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4729                                                  RTE_MEMZONE_IOVA_CONTIG,
4730                                                  BNXT_PAGE_SIZE);
4731                 if (mz == NULL)
4732                         return -ENOMEM;
4733         }
4734
4735         memset(mz->addr, 0, mz->len);
4736         mz_phys_addr = mz->iova;
4737
4738         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4739                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4740                 rmem->dma_arr[i] = mz_phys_addr + sz;
4741
4742                 if (rmem->nr_pages > 1) {
4743                         if (i == rmem->nr_pages - 2 &&
4744                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4745                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4746                         else if (i == rmem->nr_pages - 1 &&
4747                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4748                                 valid_bits |= PTU_PTE_LAST;
4749
4750                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4751                                                            valid_bits);
4752                 }
4753         }
4754
4755         rmem->mz = mz;
4756         if (rmem->vmem_size)
4757                 rmem->vmem = (void **)mz->addr;
4758         rmem->dma_arr[0] = mz_phys_addr;
4759         return 0;
4760 }
4761
4762 static void bnxt_free_ctx_mem(struct bnxt *bp)
4763 {
4764         int i;
4765
4766         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4767                 return;
4768
4769         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4770         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4771         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4772         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4773         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4774         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4775         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4776         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4777         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4778         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4779         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4780
4781         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4782                 if (bp->ctx->tqm_mem[i])
4783                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4784         }
4785
4786         rte_free(bp->ctx);
4787         bp->ctx = NULL;
4788 }
4789
4790 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4791
4792 #define min_t(type, x, y) ({                    \
4793         type __min1 = (x);                      \
4794         type __min2 = (y);                      \
4795         __min1 < __min2 ? __min1 : __min2; })
4796
4797 #define max_t(type, x, y) ({                    \
4798         type __max1 = (x);                      \
4799         type __max2 = (y);                      \
4800         __max1 > __max2 ? __max1 : __max2; })
4801
4802 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4803
4804 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4805 {
4806         struct bnxt_ctx_pg_info *ctx_pg;
4807         struct bnxt_ctx_mem_info *ctx;
4808         uint32_t mem_size, ena, entries;
4809         uint32_t entries_sp, min;
4810         int i, rc;
4811
4812         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4813         if (rc) {
4814                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4815                 return rc;
4816         }
4817         ctx = bp->ctx;
4818         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4819                 return 0;
4820
4821         ctx_pg = &ctx->qp_mem;
4822         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4823         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4824         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4825         if (rc)
4826                 return rc;
4827
4828         ctx_pg = &ctx->srq_mem;
4829         ctx_pg->entries = ctx->srq_max_l2_entries;
4830         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4831         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4832         if (rc)
4833                 return rc;
4834
4835         ctx_pg = &ctx->cq_mem;
4836         ctx_pg->entries = ctx->cq_max_l2_entries;
4837         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4838         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4839         if (rc)
4840                 return rc;
4841
4842         ctx_pg = &ctx->vnic_mem;
4843         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4844                 ctx->vnic_max_ring_table_entries;
4845         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4846         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4847         if (rc)
4848                 return rc;
4849
4850         ctx_pg = &ctx->stat_mem;
4851         ctx_pg->entries = ctx->stat_max_entries;
4852         mem_size = ctx->stat_entry_size * ctx_pg->entries;
4853         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4854         if (rc)
4855                 return rc;
4856
4857         min = ctx->tqm_min_entries_per_ring;
4858
4859         entries_sp = ctx->qp_max_l2_entries +
4860                      ctx->vnic_max_vnic_entries +
4861                      2 * ctx->qp_min_qp1_entries + min;
4862         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4863
4864         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4865         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4866         entries = clamp_t(uint32_t, entries, min,
4867                           ctx->tqm_max_entries_per_ring);
4868         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4869                 ctx_pg = ctx->tqm_mem[i];
4870                 ctx_pg->entries = i ? entries : entries_sp;
4871                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4872                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4873                 if (rc)
4874                         return rc;
4875                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4876         }
4877
4878         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4879         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4880         if (rc)
4881                 PMD_DRV_LOG(ERR,
4882                             "Failed to configure context mem: rc = %d\n", rc);
4883         else
4884                 ctx->flags |= BNXT_CTX_FLAG_INITED;
4885
4886         return rc;
4887 }
4888
4889 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4890 {
4891         struct rte_pci_device *pci_dev = bp->pdev;
4892         char mz_name[RTE_MEMZONE_NAMESIZE];
4893         const struct rte_memzone *mz = NULL;
4894         uint32_t total_alloc_len;
4895         rte_iova_t mz_phys_addr;
4896
4897         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4898                 return 0;
4899
4900         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4901                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4902                  pci_dev->addr.bus, pci_dev->addr.devid,
4903                  pci_dev->addr.function, "rx_port_stats");
4904         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4905         mz = rte_memzone_lookup(mz_name);
4906         total_alloc_len =
4907                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4908                                        sizeof(struct rx_port_stats_ext) + 512);
4909         if (!mz) {
4910                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
4911                                          SOCKET_ID_ANY,
4912                                          RTE_MEMZONE_2MB |
4913                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4914                                          RTE_MEMZONE_IOVA_CONTIG);
4915                 if (mz == NULL)
4916                         return -ENOMEM;
4917         }
4918         memset(mz->addr, 0, mz->len);
4919         mz_phys_addr = mz->iova;
4920
4921         bp->rx_mem_zone = (const void *)mz;
4922         bp->hw_rx_port_stats = mz->addr;
4923         bp->hw_rx_port_stats_map = mz_phys_addr;
4924
4925         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4926                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4927                  pci_dev->addr.bus, pci_dev->addr.devid,
4928                  pci_dev->addr.function, "tx_port_stats");
4929         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4930         mz = rte_memzone_lookup(mz_name);
4931         total_alloc_len =
4932                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4933                                        sizeof(struct tx_port_stats_ext) + 512);
4934         if (!mz) {
4935                 mz = rte_memzone_reserve(mz_name,
4936                                          total_alloc_len,
4937                                          SOCKET_ID_ANY,
4938                                          RTE_MEMZONE_2MB |
4939                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4940                                          RTE_MEMZONE_IOVA_CONTIG);
4941                 if (mz == NULL)
4942                         return -ENOMEM;
4943         }
4944         memset(mz->addr, 0, mz->len);
4945         mz_phys_addr = mz->iova;
4946
4947         bp->tx_mem_zone = (const void *)mz;
4948         bp->hw_tx_port_stats = mz->addr;
4949         bp->hw_tx_port_stats_map = mz_phys_addr;
4950         bp->flags |= BNXT_FLAG_PORT_STATS;
4951
4952         /* Display extended statistics if FW supports it */
4953         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4954             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4955             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4956                 return 0;
4957
4958         bp->hw_rx_port_stats_ext = (void *)
4959                 ((uint8_t *)bp->hw_rx_port_stats +
4960                  sizeof(struct rx_port_stats));
4961         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4962                 sizeof(struct rx_port_stats);
4963         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4964
4965         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4966             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4967                 bp->hw_tx_port_stats_ext = (void *)
4968                         ((uint8_t *)bp->hw_tx_port_stats +
4969                          sizeof(struct tx_port_stats));
4970                 bp->hw_tx_port_stats_ext_map =
4971                         bp->hw_tx_port_stats_map +
4972                         sizeof(struct tx_port_stats);
4973                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4974         }
4975
4976         return 0;
4977 }
4978
4979 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4980 {
4981         struct bnxt *bp = eth_dev->data->dev_private;
4982         int rc = 0;
4983
4984         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4985                                                RTE_ETHER_ADDR_LEN *
4986                                                bp->max_l2_ctx,
4987                                                0);
4988         if (eth_dev->data->mac_addrs == NULL) {
4989                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4990                 return -ENOMEM;
4991         }
4992
4993         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
4994                 if (BNXT_PF(bp))
4995                         return -EINVAL;
4996
4997                 /* Generate a random MAC address, if none was assigned by PF */
4998                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4999                 bnxt_eth_hw_addr_random(bp->mac_addr);
5000                 PMD_DRV_LOG(INFO,
5001                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
5002                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
5003                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
5004
5005                 rc = bnxt_hwrm_set_mac(bp);
5006                 if (rc)
5007                         return rc;
5008         }
5009
5010         /* Copy the permanent MAC from the FUNC_QCAPS response */
5011         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
5012
5013         return rc;
5014 }
5015
5016 static int bnxt_restore_dflt_mac(struct bnxt *bp)
5017 {
5018         int rc = 0;
5019
5020         /* MAC is already configured in FW */
5021         if (BNXT_HAS_DFLT_MAC_SET(bp))
5022                 return 0;
5023
5024         /* Restore the old MAC configured */
5025         rc = bnxt_hwrm_set_mac(bp);
5026         if (rc)
5027                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
5028
5029         return rc;
5030 }
5031
5032 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
5033 {
5034         if (!BNXT_PF(bp))
5035                 return;
5036
5037 #define ALLOW_FUNC(x)   \
5038         { \
5039                 uint32_t arg = (x); \
5040                 bp->pf->vf_req_fwd[((arg) >> 5)] &= \
5041                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
5042         }
5043
5044         /* Forward all requests if firmware is new enough */
5045         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
5046              (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
5047             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
5048                 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
5049         } else {
5050                 PMD_DRV_LOG(WARNING,
5051                             "Firmware too old for VF mailbox functionality\n");
5052                 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
5053         }
5054
5055         /*
5056          * The following are used for driver cleanup. If we disallow these,
5057          * VF drivers can't clean up cleanly.
5058          */
5059         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
5060         ALLOW_FUNC(HWRM_VNIC_FREE);
5061         ALLOW_FUNC(HWRM_RING_FREE);
5062         ALLOW_FUNC(HWRM_RING_GRP_FREE);
5063         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
5064         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
5065         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
5066         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
5067         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
5068 }
5069
5070 uint16_t
5071 bnxt_get_svif(uint16_t port_id, bool func_svif,
5072               enum bnxt_ulp_intf_type type)
5073 {
5074         struct rte_eth_dev *eth_dev;
5075         struct bnxt *bp;
5076
5077         eth_dev = &rte_eth_devices[port_id];
5078         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5079                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5080                 if (!vfr)
5081                         return 0;
5082
5083                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5084                         return vfr->svif;
5085
5086                 eth_dev = vfr->parent_dev;
5087         }
5088
5089         bp = eth_dev->data->dev_private;
5090
5091         return func_svif ? bp->func_svif : bp->port_svif;
5092 }
5093
5094 uint16_t
5095 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
5096 {
5097         struct rte_eth_dev *eth_dev;
5098         struct bnxt_vnic_info *vnic;
5099         struct bnxt *bp;
5100
5101         eth_dev = &rte_eth_devices[port];
5102         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5103                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5104                 if (!vfr)
5105                         return 0;
5106
5107                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5108                         return vfr->dflt_vnic_id;
5109
5110                 eth_dev = vfr->parent_dev;
5111         }
5112
5113         bp = eth_dev->data->dev_private;
5114
5115         vnic = BNXT_GET_DEFAULT_VNIC(bp);
5116
5117         return vnic->fw_vnic_id;
5118 }
5119
5120 uint16_t
5121 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
5122 {
5123         struct rte_eth_dev *eth_dev;
5124         struct bnxt *bp;
5125
5126         eth_dev = &rte_eth_devices[port];
5127         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5128                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5129                 if (!vfr)
5130                         return 0;
5131
5132                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5133                         return vfr->fw_fid;
5134
5135                 eth_dev = vfr->parent_dev;
5136         }
5137
5138         bp = eth_dev->data->dev_private;
5139
5140         return bp->fw_fid;
5141 }
5142
5143 enum bnxt_ulp_intf_type
5144 bnxt_get_interface_type(uint16_t port)
5145 {
5146         struct rte_eth_dev *eth_dev;
5147         struct bnxt *bp;
5148
5149         eth_dev = &rte_eth_devices[port];
5150         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
5151                 return BNXT_ULP_INTF_TYPE_VF_REP;
5152
5153         bp = eth_dev->data->dev_private;
5154         if (BNXT_PF(bp))
5155                 return BNXT_ULP_INTF_TYPE_PF;
5156         else if (BNXT_VF_IS_TRUSTED(bp))
5157                 return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
5158         else if (BNXT_VF(bp))
5159                 return BNXT_ULP_INTF_TYPE_VF;
5160
5161         return BNXT_ULP_INTF_TYPE_INVALID;
5162 }
5163
5164 uint16_t
5165 bnxt_get_phy_port_id(uint16_t port_id)
5166 {
5167         struct bnxt_vf_representor *vfr;
5168         struct rte_eth_dev *eth_dev;
5169         struct bnxt *bp;
5170
5171         eth_dev = &rte_eth_devices[port_id];
5172         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5173                 vfr = eth_dev->data->dev_private;
5174                 if (!vfr)
5175                         return 0;
5176
5177                 eth_dev = vfr->parent_dev;
5178         }
5179
5180         bp = eth_dev->data->dev_private;
5181
5182         return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
5183 }
5184
5185 uint16_t
5186 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
5187 {
5188         struct rte_eth_dev *eth_dev;
5189         struct bnxt *bp;
5190
5191         eth_dev = &rte_eth_devices[port_id];
5192         if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
5193                 struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
5194                 if (!vfr)
5195                         return 0;
5196
5197                 if (type == BNXT_ULP_INTF_TYPE_VF_REP)
5198                         return vfr->fw_fid - 1;
5199
5200                 eth_dev = vfr->parent_dev;
5201         }
5202
5203         bp = eth_dev->data->dev_private;
5204
5205         return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
5206 }
5207
5208 uint16_t
5209 bnxt_get_vport(uint16_t port_id)
5210 {
5211         return (1 << bnxt_get_phy_port_id(port_id));
5212 }
5213
5214 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
5215 {
5216         struct bnxt_error_recovery_info *info = bp->recovery_info;
5217
5218         if (info) {
5219                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
5220                         memset(info, 0, sizeof(*info));
5221                 return;
5222         }
5223
5224         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5225                 return;
5226
5227         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5228                            sizeof(*info), 0);
5229         if (!info)
5230                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5231
5232         bp->recovery_info = info;
5233 }
5234
5235 static void bnxt_check_fw_status(struct bnxt *bp)
5236 {
5237         uint32_t fw_status;
5238
5239         if (!(bp->recovery_info &&
5240               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5241                 return;
5242
5243         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5244         if (fw_status != BNXT_FW_STATUS_HEALTHY)
5245                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5246                             fw_status);
5247 }
5248
5249 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5250 {
5251         struct bnxt_error_recovery_info *info = bp->recovery_info;
5252         uint32_t status_loc;
5253         uint32_t sig_ver;
5254
5255         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5256                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5257         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5258                                    BNXT_GRCP_WINDOW_2_BASE +
5259                                    offsetof(struct hcomm_status,
5260                                             sig_ver)));
5261         /* If the signature is absent, then FW does not support this feature */
5262         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5263             HCOMM_STATUS_SIGNATURE_VAL)
5264                 return 0;
5265
5266         if (!info) {
5267                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5268                                    sizeof(*info), 0);
5269                 if (!info)
5270                         return -ENOMEM;
5271                 bp->recovery_info = info;
5272         } else {
5273                 memset(info, 0, sizeof(*info));
5274         }
5275
5276         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5277                                       BNXT_GRCP_WINDOW_2_BASE +
5278                                       offsetof(struct hcomm_status,
5279                                                fw_status_loc)));
5280
5281         /* Only pre-map the FW health status GRC register */
5282         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5283                 return 0;
5284
5285         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5286         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5287                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5288
5289         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5290                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5291
5292         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5293
5294         return 0;
5295 }
5296
5297 static int bnxt_init_fw(struct bnxt *bp)
5298 {
5299         uint16_t mtu;
5300         int rc = 0;
5301
5302         bp->fw_cap = 0;
5303
5304         rc = bnxt_map_hcomm_fw_status_reg(bp);
5305         if (rc)
5306                 return rc;
5307
5308         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5309         if (rc) {
5310                 bnxt_check_fw_status(bp);
5311                 return rc;
5312         }
5313
5314         rc = bnxt_hwrm_func_reset(bp);
5315         if (rc)
5316                 return -EIO;
5317
5318         rc = bnxt_hwrm_vnic_qcaps(bp);
5319         if (rc)
5320                 return rc;
5321
5322         rc = bnxt_hwrm_queue_qportcfg(bp);
5323         if (rc)
5324                 return rc;
5325
5326         /* Get the MAX capabilities for this function.
5327          * This function also allocates context memory for TQM rings and
5328          * informs the firmware about this allocated backing store memory.
5329          */
5330         rc = bnxt_hwrm_func_qcaps(bp);
5331         if (rc)
5332                 return rc;
5333
5334         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5335         if (rc)
5336                 return rc;
5337
5338         bnxt_hwrm_port_mac_qcfg(bp);
5339
5340         bnxt_hwrm_parent_pf_qcfg(bp);
5341
5342         bnxt_hwrm_port_phy_qcaps(bp);
5343
5344         rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
5345         if (rc)
5346                 return rc;
5347
5348         bnxt_alloc_error_recovery_info(bp);
5349         /* Get the adapter error recovery support info */
5350         rc = bnxt_hwrm_error_recovery_qcfg(bp);
5351         if (rc)
5352                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5353
5354         bnxt_hwrm_port_led_qcaps(bp);
5355
5356         return 0;
5357 }
5358
5359 static int
5360 bnxt_init_locks(struct bnxt *bp)
5361 {
5362         int err;
5363
5364         err = pthread_mutex_init(&bp->flow_lock, NULL);
5365         if (err) {
5366                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5367                 return err;
5368         }
5369
5370         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5371         if (err)
5372                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5373         return err;
5374 }
5375
5376 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5377 {
5378         int rc = 0;
5379
5380         rc = bnxt_init_fw(bp);
5381         if (rc)
5382                 return rc;
5383
5384         if (!reconfig_dev) {
5385                 rc = bnxt_setup_mac_addr(bp->eth_dev);
5386                 if (rc)
5387                         return rc;
5388         } else {
5389                 rc = bnxt_restore_dflt_mac(bp);
5390                 if (rc)
5391                         return rc;
5392         }
5393
5394         bnxt_config_vf_req_fwd(bp);
5395
5396         rc = bnxt_hwrm_func_driver_register(bp);
5397         if (rc) {
5398                 PMD_DRV_LOG(ERR, "Failed to register driver");
5399                 return -EBUSY;
5400         }
5401
5402         if (BNXT_PF(bp)) {
5403                 if (bp->pdev->max_vfs) {
5404                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5405                         if (rc) {
5406                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5407                                 return rc;
5408                         }
5409                 } else {
5410                         rc = bnxt_hwrm_allocate_pf_only(bp);
5411                         if (rc) {
5412                                 PMD_DRV_LOG(ERR,
5413                                             "Failed to allocate PF resources");
5414                                 return rc;
5415                         }
5416                 }
5417         }
5418
5419         rc = bnxt_alloc_mem(bp, reconfig_dev);
5420         if (rc)
5421                 return rc;
5422
5423         rc = bnxt_setup_int(bp);
5424         if (rc)
5425                 return rc;
5426
5427         rc = bnxt_request_int(bp);
5428         if (rc)
5429                 return rc;
5430
5431         rc = bnxt_init_ctx_mem(bp);
5432         if (rc) {
5433                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5434                 return rc;
5435         }
5436
5437         rc = bnxt_init_locks(bp);
5438         if (rc)
5439                 return rc;
5440
5441         return 0;
5442 }
5443
5444 static int
5445 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5446                           const char *value, void *opaque_arg)
5447 {
5448         struct bnxt *bp = opaque_arg;
5449         unsigned long truflow;
5450         char *end = NULL;
5451
5452         if (!value || !opaque_arg) {
5453                 PMD_DRV_LOG(ERR,
5454                             "Invalid parameter passed to truflow devargs.\n");
5455                 return -EINVAL;
5456         }
5457
5458         truflow = strtoul(value, &end, 10);
5459         if (end == NULL || *end != '\0' ||
5460             (truflow == ULONG_MAX && errno == ERANGE)) {
5461                 PMD_DRV_LOG(ERR,
5462                             "Invalid parameter passed to truflow devargs.\n");
5463                 return -EINVAL;
5464         }
5465
5466         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5467                 PMD_DRV_LOG(ERR,
5468                             "Invalid value passed to truflow devargs.\n");
5469                 return -EINVAL;
5470         }
5471
5472         bp->flags |= BNXT_FLAG_TRUFLOW_EN;
5473         if (BNXT_TRUFLOW_EN(bp))
5474                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5475
5476         return 0;
5477 }
5478
5479 static int
5480 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5481                              const char *value, void *opaque_arg)
5482 {
5483         struct bnxt *bp = opaque_arg;
5484         unsigned long flow_xstat;
5485         char *end = NULL;
5486
5487         if (!value || !opaque_arg) {
5488                 PMD_DRV_LOG(ERR,
5489                             "Invalid parameter passed to flow_xstat devarg.\n");
5490                 return -EINVAL;
5491         }
5492
5493         flow_xstat = strtoul(value, &end, 10);
5494         if (end == NULL || *end != '\0' ||
5495             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5496                 PMD_DRV_LOG(ERR,
5497                             "Invalid parameter passed to flow_xstat devarg.\n");
5498                 return -EINVAL;
5499         }
5500
5501         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5502                 PMD_DRV_LOG(ERR,
5503                             "Invalid value passed to flow_xstat devarg.\n");
5504                 return -EINVAL;
5505         }
5506
5507         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
5508         if (BNXT_FLOW_XSTATS_EN(bp))
5509                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5510
5511         return 0;
5512 }
5513
5514 static int
5515 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
5516                                         const char *value, void *opaque_arg)
5517 {
5518         struct bnxt *bp = opaque_arg;
5519         unsigned long max_num_kflows;
5520         char *end = NULL;
5521
5522         if (!value || !opaque_arg) {
5523                 PMD_DRV_LOG(ERR,
5524                         "Invalid parameter passed to max_num_kflows devarg.\n");
5525                 return -EINVAL;
5526         }
5527
5528         max_num_kflows = strtoul(value, &end, 10);
5529         if (end == NULL || *end != '\0' ||
5530                 (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
5531                 PMD_DRV_LOG(ERR,
5532                         "Invalid parameter passed to max_num_kflows devarg.\n");
5533                 return -EINVAL;
5534         }
5535
5536         if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
5537                 PMD_DRV_LOG(ERR,
5538                         "Invalid value passed to max_num_kflows devarg.\n");
5539                 return -EINVAL;
5540         }
5541
5542         bp->max_num_kflows = max_num_kflows;
5543         if (bp->max_num_kflows)
5544                 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
5545                                 max_num_kflows);
5546
5547         return 0;
5548 }
5549
5550 static void
5551 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5552 {
5553         struct rte_kvargs *kvlist;
5554
5555         if (devargs == NULL)
5556                 return;
5557
5558         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5559         if (kvlist == NULL)
5560                 return;
5561
5562         /*
5563          * Handler for "truflow" devarg.
5564          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
5565          */
5566         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5567                            bnxt_parse_devarg_truflow, bp);
5568
5569         /*
5570          * Handler for "flow_xstat" devarg.
5571          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
5572          */
5573         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5574                            bnxt_parse_devarg_flow_xstat, bp);
5575
5576         /*
5577          * Handler for "max_num_kflows" devarg.
5578          * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
5579          */
5580         rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5581                            bnxt_parse_devarg_max_num_kflows, bp);
5582
5583         rte_kvargs_free(kvlist);
5584 }
5585
5586 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5587 {
5588         int rc = 0;
5589
5590         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5591                 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5592                 if (rc)
5593                         PMD_DRV_LOG(ERR,
5594                                     "Failed to alloc switch domain: %d\n", rc);
5595                 else
5596                         PMD_DRV_LOG(INFO,
5597                                     "Switch domain allocated %d\n",
5598                                     bp->switch_domain_id);
5599         }
5600
5601         return rc;
5602 }
5603
5604 static int
5605 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
5606 {
5607         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5608         static int version_printed;
5609         struct bnxt *bp;
5610         int rc;
5611
5612         if (version_printed++ == 0)
5613                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5614
5615         eth_dev->dev_ops = &bnxt_dev_ops;
5616         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5617         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5618
5619         /*
5620          * For secondary processes, we don't initialise any further
5621          * as primary has already done this work.
5622          */
5623         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5624                 return 0;
5625
5626         rte_eth_copy_pci_info(eth_dev, pci_dev);
5627
5628         bp = eth_dev->data->dev_private;
5629
5630         /* Parse dev arguments passed on when starting the DPDK application. */
5631         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5632
5633         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5634
5635         if (bnxt_vf_pciid(pci_dev->id.device_id))
5636                 bp->flags |= BNXT_FLAG_VF;
5637
5638         if (bnxt_thor_device(pci_dev->id.device_id))
5639                 bp->flags |= BNXT_FLAG_THOR_CHIP;
5640
5641         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5642             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5643             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5644             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5645                 bp->flags |= BNXT_FLAG_STINGRAY;
5646
5647         rc = bnxt_init_board(eth_dev);
5648         if (rc) {
5649                 PMD_DRV_LOG(ERR,
5650                             "Failed to initialize board rc: %x\n", rc);
5651                 return rc;
5652         }
5653
5654         rc = bnxt_alloc_pf_info(bp);
5655         if (rc)
5656                 goto error_free;
5657
5658         rc = bnxt_alloc_link_info(bp);
5659         if (rc)
5660                 goto error_free;
5661
5662         rc = bnxt_alloc_parent_info(bp);
5663         if (rc)
5664                 goto error_free;
5665
5666         rc = bnxt_alloc_hwrm_resources(bp);
5667         if (rc) {
5668                 PMD_DRV_LOG(ERR,
5669                             "Failed to allocate hwrm resource rc: %x\n", rc);
5670                 goto error_free;
5671         }
5672         rc = bnxt_alloc_leds_info(bp);
5673         if (rc)
5674                 goto error_free;
5675
5676         rc = bnxt_alloc_cos_queues(bp);
5677         if (rc)
5678                 goto error_free;
5679
5680         rc = bnxt_init_resources(bp, false);
5681         if (rc)
5682                 goto error_free;
5683
5684         rc = bnxt_alloc_stats_mem(bp);
5685         if (rc)
5686                 goto error_free;
5687
5688         bnxt_alloc_switch_domain(bp);
5689
5690         /* Pass the information to the rte_eth_dev_close() that it should also
5691          * release the private port resources.
5692          */
5693         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5694
5695         PMD_DRV_LOG(INFO,
5696                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5697                     pci_dev->mem_resource[0].phys_addr,
5698                     pci_dev->mem_resource[0].addr);
5699
5700         return 0;
5701
5702 error_free:
5703         bnxt_dev_uninit(eth_dev);
5704         return rc;
5705 }
5706
5707
5708 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5709 {
5710         if (!ctx)
5711                 return;
5712
5713         if (ctx->va)
5714                 rte_free(ctx->va);
5715
5716         ctx->va = NULL;
5717         ctx->dma = RTE_BAD_IOVA;
5718         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5719 }
5720
5721 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5722 {
5723         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5724                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5725                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
5726                                   bp->flow_stat->max_fc,
5727                                   false);
5728
5729         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5730                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5731                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
5732                                   bp->flow_stat->max_fc,
5733                                   false);
5734
5735         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5736                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5737         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5738
5739         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5740                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5741         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5742
5743         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5744                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5745         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5746
5747         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5748                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5749         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5750 }
5751
5752 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5753 {
5754         bnxt_unregister_fc_ctx_mem(bp);
5755
5756         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5757         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5758         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5759         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5760 }
5761
5762 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5763 {
5764         if (BNXT_FLOW_XSTATS_EN(bp))
5765                 bnxt_uninit_fc_ctx_mem(bp);
5766 }
5767
5768 static void
5769 bnxt_free_error_recovery_info(struct bnxt *bp)
5770 {
5771         rte_free(bp->recovery_info);
5772         bp->recovery_info = NULL;
5773         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5774 }
5775
5776 static void
5777 bnxt_uninit_locks(struct bnxt *bp)
5778 {
5779         pthread_mutex_destroy(&bp->flow_lock);
5780         pthread_mutex_destroy(&bp->def_cp_lock);
5781         if (bp->rep_info)
5782                 pthread_mutex_destroy(&bp->rep_info->vfr_lock);
5783 }
5784
5785 static int
5786 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5787 {
5788         int rc;
5789
5790         bnxt_free_int(bp);
5791         bnxt_free_mem(bp, reconfig_dev);
5792         bnxt_hwrm_func_buf_unrgtr(bp);
5793         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5794         bp->flags &= ~BNXT_FLAG_REGISTERED;
5795         bnxt_free_ctx_mem(bp);
5796         if (!reconfig_dev) {
5797                 bnxt_free_hwrm_resources(bp);
5798                 bnxt_free_error_recovery_info(bp);
5799         }
5800
5801         bnxt_uninit_ctx_mem(bp);
5802
5803         bnxt_uninit_locks(bp);
5804         bnxt_free_flow_stats_info(bp);
5805         bnxt_free_rep_info(bp);
5806         rte_free(bp->ptp_cfg);
5807         bp->ptp_cfg = NULL;
5808         return rc;
5809 }
5810
5811 static int
5812 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5813 {
5814         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5815                 return -EPERM;
5816
5817         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5818
5819         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5820                 bnxt_dev_close_op(eth_dev);
5821
5822         return 0;
5823 }
5824
5825 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
5826 {
5827         struct bnxt *bp = eth_dev->data->dev_private;
5828         struct rte_eth_dev *vf_rep_eth_dev;
5829         int ret = 0, i;
5830
5831         if (!bp)
5832                 return -EINVAL;
5833
5834         for (i = 0; i < bp->num_reps; i++) {
5835                 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
5836                 if (!vf_rep_eth_dev)
5837                         continue;
5838                 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_vf_representor_uninit);
5839         }
5840         ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
5841
5842         return ret;
5843 }
5844
5845 static void bnxt_free_rep_info(struct bnxt *bp)
5846 {
5847         rte_free(bp->rep_info);
5848         bp->rep_info = NULL;
5849         rte_free(bp->cfa_code_map);
5850         bp->cfa_code_map = NULL;
5851 }
5852
5853 static int bnxt_init_rep_info(struct bnxt *bp)
5854 {
5855         int i = 0, rc;
5856
5857         if (bp->rep_info)
5858                 return 0;
5859
5860         bp->rep_info = rte_zmalloc("bnxt_rep_info",
5861                                    sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
5862                                    0);
5863         if (!bp->rep_info) {
5864                 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
5865                 return -ENOMEM;
5866         }
5867         bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
5868                                        sizeof(*bp->cfa_code_map) *
5869                                        BNXT_MAX_CFA_CODE, 0);
5870         if (!bp->cfa_code_map) {
5871                 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
5872                 bnxt_free_rep_info(bp);
5873                 return -ENOMEM;
5874         }
5875
5876         for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
5877                 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
5878
5879         rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
5880         if (rc) {
5881                 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
5882                 bnxt_free_rep_info(bp);
5883                 return rc;
5884         }
5885         return rc;
5886 }
5887
5888 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
5889                                struct rte_eth_devargs eth_da,
5890                                struct rte_eth_dev *backing_eth_dev)
5891 {
5892         struct rte_eth_dev *vf_rep_eth_dev;
5893         char name[RTE_ETH_NAME_MAX_LEN];
5894         struct bnxt *backing_bp;
5895         uint16_t num_rep;
5896         int i, ret = 0;
5897
5898         num_rep = eth_da.nb_representor_ports;
5899         if (num_rep > BNXT_MAX_VF_REPS) {
5900                 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
5901                             num_rep, BNXT_MAX_VF_REPS);
5902                 return -EINVAL;
5903         }
5904
5905         if (num_rep > RTE_MAX_ETHPORTS) {
5906                 PMD_DRV_LOG(ERR,
5907                             "nb_representor_ports = %d > %d MAX ETHPORTS\n",
5908                             num_rep, RTE_MAX_ETHPORTS);
5909                 return -EINVAL;
5910         }
5911
5912         backing_bp = backing_eth_dev->data->dev_private;
5913
5914         if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
5915                 PMD_DRV_LOG(ERR,
5916                             "Not a PF or trusted VF. No Representor support\n");
5917                 /* Returning an error is not an option.
5918                  * Applications are not handling this correctly
5919                  */
5920                 return 0;
5921         }
5922
5923         if (bnxt_init_rep_info(backing_bp))
5924                 return 0;
5925
5926         for (i = 0; i < num_rep; i++) {
5927                 struct bnxt_vf_representor representor = {
5928                         .vf_id = eth_da.representor_ports[i],
5929                         .switch_domain_id = backing_bp->switch_domain_id,
5930                         .parent_dev = backing_eth_dev
5931                 };
5932
5933                 if (representor.vf_id >= BNXT_MAX_VF_REPS) {
5934                         PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
5935                                     representor.vf_id, BNXT_MAX_VF_REPS);
5936                         continue;
5937                 }
5938
5939                 /* representor port net_bdf_port */
5940                 snprintf(name, sizeof(name), "net_%s_representor_%d",
5941                          pci_dev->device.name, eth_da.representor_ports[i]);
5942
5943                 ret = rte_eth_dev_create(&pci_dev->device, name,
5944                                          sizeof(struct bnxt_vf_representor),
5945                                          NULL, NULL,
5946                                          bnxt_vf_representor_init,
5947                                          &representor);
5948
5949                 if (!ret) {
5950                         vf_rep_eth_dev = rte_eth_dev_allocated(name);
5951                         if (!vf_rep_eth_dev) {
5952                                 PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
5953                                             " for VF-Rep: %s.", name);
5954                                 bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5955                                 ret = -ENODEV;
5956                                 return ret;
5957                         }
5958                         backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
5959                                 vf_rep_eth_dev;
5960                         backing_bp->num_reps++;
5961                 } else {
5962                         PMD_DRV_LOG(ERR, "failed to create bnxt vf "
5963                                     "representor %s.", name);
5964                         bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5965                 }
5966         }
5967
5968         return ret;
5969 }
5970
5971 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5972                           struct rte_pci_device *pci_dev)
5973 {
5974         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
5975         struct rte_eth_dev *backing_eth_dev;
5976         uint16_t num_rep;
5977         int ret = 0;
5978
5979         if (pci_dev->device.devargs) {
5980                 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
5981                                             &eth_da);
5982                 if (ret)
5983                         return ret;
5984         }
5985
5986         num_rep = eth_da.nb_representor_ports;
5987         PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
5988                     num_rep);
5989
5990         /* We could come here after first level of probe is already invoked
5991          * as part of an application bringup(OVS-DPDK vswitchd), so first check
5992          * for already allocated eth_dev for the backing device (PF/Trusted VF)
5993          */
5994         backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5995         if (backing_eth_dev == NULL) {
5996                 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
5997                                          sizeof(struct bnxt),
5998                                          eth_dev_pci_specific_init, pci_dev,
5999                                          bnxt_dev_init, NULL);
6000
6001                 if (ret || !num_rep)
6002                         return ret;
6003
6004                 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6005         }
6006
6007         /* probe representor ports now */
6008         ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev);
6009
6010         return ret;
6011 }
6012
6013 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
6014 {
6015         struct rte_eth_dev *eth_dev;
6016
6017         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
6018         if (!eth_dev)
6019                 return 0; /* Invoked typically only by OVS-DPDK, by the
6020                            * time it comes here the eth_dev is already
6021                            * deleted by rte_eth_dev_close(), so returning
6022                            * +ve value will at least help in proper cleanup
6023                            */
6024
6025         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
6026                 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
6027                         return rte_eth_dev_destroy(eth_dev,
6028                                                    bnxt_vf_representor_uninit);
6029                 else
6030                         return rte_eth_dev_destroy(eth_dev,
6031                                                    bnxt_dev_uninit);
6032         } else {
6033                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
6034         }
6035 }
6036
6037 static struct rte_pci_driver bnxt_rte_pmd = {
6038         .id_table = bnxt_pci_id_map,
6039         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
6040                         RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
6041                                                   * and OVS-DPDK
6042                                                   */
6043         .probe = bnxt_pci_probe,
6044         .remove = bnxt_pci_remove,
6045 };
6046
6047 static bool
6048 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
6049 {
6050         if (strcmp(dev->device->driver->name, drv->driver.name))
6051                 return false;
6052
6053         return true;
6054 }
6055
6056 bool is_bnxt_supported(struct rte_eth_dev *dev)
6057 {
6058         return is_device_supported(dev, &bnxt_rte_pmd);
6059 }
6060
6061 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
6062 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
6063 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
6064 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");