85746963e2085ff286ec378c2888eb868d522628
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35 #include <stdbool.h>
36
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_irq.h"
48 #include "bnxt_ring.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_stats.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #define DRV_MODULE_NAME         "bnxt"
58 static const char bnxt_version[] =
59         "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
60
61 #define PCI_VENDOR_ID_BROADCOM 0x14E4
62
63 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
64 #define BROADCOM_DEV_ID_57414_VF 0x16c1
65 #define BROADCOM_DEV_ID_57301 0x16c8
66 #define BROADCOM_DEV_ID_57302 0x16c9
67 #define BROADCOM_DEV_ID_57304_PF 0x16ca
68 #define BROADCOM_DEV_ID_57304_VF 0x16cb
69 #define BROADCOM_DEV_ID_57417_MF 0x16cc
70 #define BROADCOM_DEV_ID_NS2 0x16cd
71 #define BROADCOM_DEV_ID_57311 0x16ce
72 #define BROADCOM_DEV_ID_57312 0x16cf
73 #define BROADCOM_DEV_ID_57402 0x16d0
74 #define BROADCOM_DEV_ID_57404 0x16d1
75 #define BROADCOM_DEV_ID_57406_PF 0x16d2
76 #define BROADCOM_DEV_ID_57406_VF 0x16d3
77 #define BROADCOM_DEV_ID_57402_MF 0x16d4
78 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
79 #define BROADCOM_DEV_ID_57412 0x16d6
80 #define BROADCOM_DEV_ID_57414 0x16d7
81 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
82 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
83 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
84 #define BROADCOM_DEV_ID_57412_MF 0x16de
85 #define BROADCOM_DEV_ID_57314 0x16df
86 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
87 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
88 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
89 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
90 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
91 #define BROADCOM_DEV_ID_57404_MF 0x16e7
92 #define BROADCOM_DEV_ID_57406_MF 0x16e8
93 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
94 #define BROADCOM_DEV_ID_57407_MF 0x16ea
95 #define BROADCOM_DEV_ID_57414_MF 0x16ec
96 #define BROADCOM_DEV_ID_57416_MF 0x16ee
97
98 static const struct rte_pci_id bnxt_pci_id_map[] = {
99         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
100         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
101         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
102         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
103         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
104         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
105         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
106         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
107         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
108         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
109         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
110         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
111         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
112         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
113         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
114         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
115         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
116         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
117         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
118         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
119         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
120         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
121         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
122         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
123         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
124         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
129         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
130         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
131         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
132         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
133         { .vendor_id = 0, /* sentinel */ },
134 };
135
136 #define BNXT_ETH_RSS_SUPPORT (  \
137         ETH_RSS_IPV4 |          \
138         ETH_RSS_NONFRAG_IPV4_TCP |      \
139         ETH_RSS_NONFRAG_IPV4_UDP |      \
140         ETH_RSS_IPV6 |          \
141         ETH_RSS_NONFRAG_IPV6_TCP |      \
142         ETH_RSS_NONFRAG_IPV6_UDP)
143
144 /***********************/
145
146 /*
147  * High level utility functions
148  */
149
150 static void bnxt_free_mem(struct bnxt *bp)
151 {
152         bnxt_free_filter_mem(bp);
153         bnxt_free_vnic_attributes(bp);
154         bnxt_free_vnic_mem(bp);
155
156         bnxt_free_stats(bp);
157         bnxt_free_tx_rings(bp);
158         bnxt_free_rx_rings(bp);
159         bnxt_free_def_cp_ring(bp);
160 }
161
162 static int bnxt_alloc_mem(struct bnxt *bp)
163 {
164         int rc;
165
166         /* Default completion ring */
167         rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
168         if (rc)
169                 goto alloc_mem_err;
170
171         rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
172                               bp->def_cp_ring, "def_cp");
173         if (rc)
174                 goto alloc_mem_err;
175
176         rc = bnxt_alloc_vnic_mem(bp);
177         if (rc)
178                 goto alloc_mem_err;
179
180         rc = bnxt_alloc_vnic_attributes(bp);
181         if (rc)
182                 goto alloc_mem_err;
183
184         rc = bnxt_alloc_filter_mem(bp);
185         if (rc)
186                 goto alloc_mem_err;
187
188         return 0;
189
190 alloc_mem_err:
191         bnxt_free_mem(bp);
192         return rc;
193 }
194
195 static int bnxt_init_chip(struct bnxt *bp)
196 {
197         unsigned int i, rss_idx, fw_idx;
198         struct rte_eth_link new;
199         int rc;
200
201         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
202         if (rc) {
203                 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
204                 goto err_out;
205         }
206
207         rc = bnxt_alloc_hwrm_rings(bp);
208         if (rc) {
209                 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
210                 goto err_out;
211         }
212
213         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
214         if (rc) {
215                 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
216                 goto err_out;
217         }
218
219         rc = bnxt_mq_rx_configure(bp);
220         if (rc) {
221                 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
222                 goto err_out;
223         }
224
225         /* VNIC configuration */
226         for (i = 0; i < bp->nr_vnics; i++) {
227                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
228
229                 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
230                 if (rc) {
231                         RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
232                                 i, rc);
233                         goto err_out;
234                 }
235
236                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
237                 if (rc) {
238                         RTE_LOG(ERR, PMD,
239                                 "HWRM vnic %d ctx alloc failure rc: %x\n",
240                                 i, rc);
241                         goto err_out;
242                 }
243
244                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
245                 if (rc) {
246                         RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
247                                 i, rc);
248                         goto err_out;
249                 }
250
251                 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
252                 if (rc) {
253                         RTE_LOG(ERR, PMD,
254                                 "HWRM vnic %d filter failure rc: %x\n",
255                                 i, rc);
256                         goto err_out;
257                 }
258                 if (vnic->rss_table && vnic->hash_type) {
259                         /*
260                          * Fill the RSS hash & redirection table with
261                          * ring group ids for all VNICs
262                          */
263                         for (rss_idx = 0, fw_idx = 0;
264                              rss_idx < HW_HASH_INDEX_SIZE;
265                              rss_idx++, fw_idx++) {
266                                 if (vnic->fw_grp_ids[fw_idx] ==
267                                     INVALID_HW_RING_ID)
268                                         fw_idx = 0;
269                                 vnic->rss_table[rss_idx] =
270                                                 vnic->fw_grp_ids[fw_idx];
271                         }
272                         rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
273                         if (rc) {
274                                 RTE_LOG(ERR, PMD,
275                                         "HWRM vnic %d set RSS failure rc: %x\n",
276                                         i, rc);
277                                 goto err_out;
278                         }
279                 }
280         }
281         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
282         if (rc) {
283                 RTE_LOG(ERR, PMD,
284                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
285                 goto err_out;
286         }
287
288         rc = bnxt_get_hwrm_link_config(bp, &new);
289         if (rc) {
290                 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
291                 goto err_out;
292         }
293
294         if (!bp->link_info.link_up) {
295                 rc = bnxt_set_hwrm_link_config(bp, true);
296                 if (rc) {
297                         RTE_LOG(ERR, PMD,
298                                 "HWRM link config failure rc: %x\n", rc);
299                         goto err_out;
300                 }
301         }
302
303         return 0;
304
305 err_out:
306         bnxt_free_all_hwrm_resources(bp);
307
308         return rc;
309 }
310
311 static int bnxt_shutdown_nic(struct bnxt *bp)
312 {
313         bnxt_free_all_hwrm_resources(bp);
314         bnxt_free_all_filters(bp);
315         bnxt_free_all_vnics(bp);
316         return 0;
317 }
318
319 static int bnxt_init_nic(struct bnxt *bp)
320 {
321         int rc;
322
323         bnxt_init_ring_grps(bp);
324         bnxt_init_vnics(bp);
325         bnxt_init_filters(bp);
326
327         rc = bnxt_init_chip(bp);
328         if (rc)
329                 return rc;
330
331         return 0;
332 }
333
334 /*
335  * Device configuration and status function
336  */
337
338 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
339                                   struct rte_eth_dev_info *dev_info)
340 {
341         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
342         uint16_t max_vnics, i, j, vpool, vrxq;
343
344         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
345
346         /* MAC Specifics */
347         dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
348         dev_info->max_hash_mac_addrs = 0;
349
350         /* PF/VF specifics */
351         if (BNXT_PF(bp))
352                 dev_info->max_vfs = bp->pdev->max_vfs;
353         dev_info->max_rx_queues = bp->max_rx_rings;
354         dev_info->max_tx_queues = bp->max_tx_rings;
355         dev_info->reta_size = bp->max_rsscos_ctx;
356         max_vnics = bp->max_vnics;
357
358         /* Fast path specifics */
359         dev_info->min_rx_bufsize = 1;
360         dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
361                                   + VLAN_TAG_SIZE;
362         dev_info->rx_offload_capa = 0;
363         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
364                                         DEV_TX_OFFLOAD_TCP_CKSUM |
365                                         DEV_TX_OFFLOAD_UDP_CKSUM |
366                                         DEV_TX_OFFLOAD_TCP_TSO;
367
368         /* *INDENT-OFF* */
369         dev_info->default_rxconf = (struct rte_eth_rxconf) {
370                 .rx_thresh = {
371                         .pthresh = 8,
372                         .hthresh = 8,
373                         .wthresh = 0,
374                 },
375                 .rx_free_thresh = 32,
376                 .rx_drop_en = 0,
377         };
378
379         dev_info->default_txconf = (struct rte_eth_txconf) {
380                 .tx_thresh = {
381                         .pthresh = 32,
382                         .hthresh = 0,
383                         .wthresh = 0,
384                 },
385                 .tx_free_thresh = 32,
386                 .tx_rs_thresh = 32,
387                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
388                              ETH_TXQ_FLAGS_NOOFFLOADS,
389         };
390         eth_dev->data->dev_conf.intr_conf.lsc = 1;
391
392         /* *INDENT-ON* */
393
394         /*
395          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
396          *       need further investigation.
397          */
398
399         /* VMDq resources */
400         vpool = 64; /* ETH_64_POOLS */
401         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
402         for (i = 0; i < 4; vpool >>= 1, i++) {
403                 if (max_vnics > vpool) {
404                         for (j = 0; j < 5; vrxq >>= 1, j++) {
405                                 if (dev_info->max_rx_queues > vrxq) {
406                                         if (vpool > vrxq)
407                                                 vpool = vrxq;
408                                         goto found;
409                                 }
410                         }
411                         /* Not enough resources to support VMDq */
412                         break;
413                 }
414         }
415         /* Not enough resources to support VMDq */
416         vpool = 0;
417         vrxq = 0;
418 found:
419         dev_info->max_vmdq_pools = vpool;
420         dev_info->vmdq_queue_num = vrxq;
421
422         dev_info->vmdq_pool_base = 0;
423         dev_info->vmdq_queue_base = 0;
424 }
425
426 /* Configure the device based on the configuration provided */
427 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
428 {
429         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
430
431         bp->rx_queues = (void *)eth_dev->data->rx_queues;
432         bp->tx_queues = (void *)eth_dev->data->tx_queues;
433
434         /* Inherit new configurations */
435         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
436         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
437         bp->rx_cp_nr_rings = bp->rx_nr_rings;
438         bp->tx_cp_nr_rings = bp->tx_nr_rings;
439
440         if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
441                 eth_dev->data->mtu =
442                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
443                                 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
444         return 0;
445 }
446
447 static inline int
448 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
449                                 struct rte_eth_link *link)
450 {
451         struct rte_eth_link *dst = &eth_dev->data->dev_link;
452         struct rte_eth_link *src = link;
453
454         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
455                                         *(uint64_t *)src) == 0)
456                 return 1;
457
458         return 0;
459 }
460
461 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
462 {
463         struct rte_eth_link *link = &eth_dev->data->dev_link;
464
465         if (link->link_status)
466                 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
467                         (uint8_t)(eth_dev->data->port_id),
468                         (uint32_t)link->link_speed,
469                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
470                         ("full-duplex") : ("half-duplex\n"));
471         else
472                 RTE_LOG(INFO, PMD, "Port %d Link Down\n",
473                         (uint8_t)(eth_dev->data->port_id));
474 }
475
476 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
477 {
478         bnxt_print_link_info(eth_dev);
479         return 0;
480 }
481
482 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
483 {
484         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
485         int rc;
486
487         bp->dev_stopped = 0;
488
489         rc = bnxt_init_nic(bp);
490         if (rc)
491                 goto error;
492
493         bnxt_link_update_op(eth_dev, 0);
494         return 0;
495
496 error:
497         bnxt_shutdown_nic(bp);
498         bnxt_free_tx_mbufs(bp);
499         bnxt_free_rx_mbufs(bp);
500         return rc;
501 }
502
503 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
504 {
505         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
506
507         eth_dev->data->dev_link.link_status = 1;
508         bnxt_set_hwrm_link_config(bp, true);
509         return 0;
510 }
511
512 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
513 {
514         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
515
516         eth_dev->data->dev_link.link_status = 0;
517         bnxt_set_hwrm_link_config(bp, false);
518         return 0;
519 }
520
521 /* Unload the driver, release resources */
522 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
523 {
524         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
525
526         if (bp->eth_dev->data->dev_started) {
527                 /* TBD: STOP HW queues DMA */
528                 eth_dev->data->dev_link.link_status = 0;
529         }
530         bnxt_set_hwrm_link_config(bp, false);
531         bnxt_shutdown_nic(bp);
532         bp->dev_stopped = 1;
533 }
534
535 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
536 {
537         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
538
539         if (bp->dev_stopped == 0)
540                 bnxt_dev_stop_op(eth_dev);
541
542         bnxt_free_tx_mbufs(bp);
543         bnxt_free_rx_mbufs(bp);
544         bnxt_free_mem(bp);
545         if (eth_dev->data->mac_addrs != NULL) {
546                 rte_free(eth_dev->data->mac_addrs);
547                 eth_dev->data->mac_addrs = NULL;
548         }
549         if (bp->grp_info != NULL) {
550                 rte_free(bp->grp_info);
551                 bp->grp_info = NULL;
552         }
553 }
554
555 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
556                                     uint32_t index)
557 {
558         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
559         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
560         struct bnxt_vnic_info *vnic;
561         struct bnxt_filter_info *filter, *temp_filter;
562         int i;
563
564         /*
565          * Loop through all VNICs from the specified filter flow pools to
566          * remove the corresponding MAC addr filter
567          */
568         for (i = 0; i < MAX_FF_POOLS; i++) {
569                 if (!(pool_mask & (1ULL << i)))
570                         continue;
571
572                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
573                         filter = STAILQ_FIRST(&vnic->filter);
574                         while (filter) {
575                                 temp_filter = STAILQ_NEXT(filter, next);
576                                 if (filter->mac_index == index) {
577                                         STAILQ_REMOVE(&vnic->filter, filter,
578                                                       bnxt_filter_info, next);
579                                         bnxt_hwrm_clear_filter(bp, filter);
580                                         filter->mac_index = INVALID_MAC_INDEX;
581                                         memset(&filter->l2_addr, 0,
582                                                ETHER_ADDR_LEN);
583                                         STAILQ_INSERT_TAIL(
584                                                         &bp->free_filter_list,
585                                                         filter, next);
586                                 }
587                                 filter = temp_filter;
588                         }
589                 }
590         }
591 }
592
593 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
594                                 struct ether_addr *mac_addr,
595                                 uint32_t index, uint32_t pool)
596 {
597         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
598         struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
599         struct bnxt_filter_info *filter;
600
601         if (BNXT_VF(bp)) {
602                 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
603                 return -ENOTSUP;
604         }
605
606         if (!vnic) {
607                 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
608                 return -EINVAL;
609         }
610         /* Attach requested MAC address to the new l2_filter */
611         STAILQ_FOREACH(filter, &vnic->filter, next) {
612                 if (filter->mac_index == index) {
613                         RTE_LOG(ERR, PMD,
614                                 "MAC addr already existed for pool %d\n", pool);
615                         return -EINVAL;
616                 }
617         }
618         filter = bnxt_alloc_filter(bp);
619         if (!filter) {
620                 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
621                 return -ENODEV;
622         }
623         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
624         filter->mac_index = index;
625         memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
626         return bnxt_hwrm_set_filter(bp, vnic, filter);
627 }
628
629 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
630 {
631         int rc = 0;
632         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
633         struct rte_eth_link new;
634         unsigned int cnt = BNXT_LINK_WAIT_CNT;
635
636         memset(&new, 0, sizeof(new));
637         do {
638                 /* Retrieve link info from hardware */
639                 rc = bnxt_get_hwrm_link_config(bp, &new);
640                 if (rc) {
641                         new.link_speed = ETH_LINK_SPEED_100M;
642                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
643                         RTE_LOG(ERR, PMD,
644                                 "Failed to retrieve link rc = 0x%x!\n", rc);
645                         goto out;
646                 }
647                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
648
649                 if (!wait_to_complete)
650                         break;
651         } while (!new.link_status && cnt--);
652
653 out:
654         /* Timed out or success */
655         if (new.link_status != eth_dev->data->dev_link.link_status ||
656         new.link_speed != eth_dev->data->dev_link.link_speed) {
657                 rte_bnxt_atomic_write_link_status(eth_dev, &new);
658                 bnxt_print_link_info(eth_dev);
659         }
660
661         return rc;
662 }
663
664 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
665 {
666         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
667         struct bnxt_vnic_info *vnic;
668
669         if (bp->vnic_info == NULL)
670                 return;
671
672         vnic = &bp->vnic_info[0];
673
674         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
675         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
676 }
677
678 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
679 {
680         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
681         struct bnxt_vnic_info *vnic;
682
683         if (bp->vnic_info == NULL)
684                 return;
685
686         vnic = &bp->vnic_info[0];
687
688         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
689         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
690 }
691
692 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
693 {
694         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
695         struct bnxt_vnic_info *vnic;
696
697         if (bp->vnic_info == NULL)
698                 return;
699
700         vnic = &bp->vnic_info[0];
701
702         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
703         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
704 }
705
706 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
707 {
708         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
709         struct bnxt_vnic_info *vnic;
710
711         if (bp->vnic_info == NULL)
712                 return;
713
714         vnic = &bp->vnic_info[0];
715
716         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
717         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
718 }
719
720 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
721                             struct rte_eth_rss_reta_entry64 *reta_conf,
722                             uint16_t reta_size)
723 {
724         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
725         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
726         struct bnxt_vnic_info *vnic;
727         int i;
728
729         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
730                 return -EINVAL;
731
732         if (reta_size != HW_HASH_INDEX_SIZE) {
733                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
734                         "(%d) must equal the size supported by the hardware "
735                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
736                 return -EINVAL;
737         }
738         /* Update the RSS VNIC(s) */
739         for (i = 0; i < MAX_FF_POOLS; i++) {
740                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
741                         memcpy(vnic->rss_table, reta_conf, reta_size);
742
743                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
744                 }
745         }
746         return 0;
747 }
748
749 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
750                               struct rte_eth_rss_reta_entry64 *reta_conf,
751                               uint16_t reta_size)
752 {
753         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
754         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
755         struct rte_intr_handle *intr_handle
756                 = &bp->pdev->intr_handle;
757
758         /* Retrieve from the default VNIC */
759         if (!vnic)
760                 return -EINVAL;
761         if (!vnic->rss_table)
762                 return -EINVAL;
763
764         if (reta_size != HW_HASH_INDEX_SIZE) {
765                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
766                         "(%d) must equal the size supported by the hardware "
767                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
768                 return -EINVAL;
769         }
770         /* EW - need to revisit here copying from u64 to u16 */
771         memcpy(reta_conf, vnic->rss_table, reta_size);
772
773         if (rte_intr_allow_others(intr_handle)) {
774                 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
775                         bnxt_dev_lsc_intr_setup(eth_dev);
776         }
777
778         return 0;
779 }
780
781 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
782                                    struct rte_eth_rss_conf *rss_conf)
783 {
784         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
785         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
786         struct bnxt_vnic_info *vnic;
787         uint16_t hash_type = 0;
788         int i;
789
790         /*
791          * If RSS enablement were different than dev_configure,
792          * then return -EINVAL
793          */
794         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
795                 if (!rss_conf->rss_hf)
796                         return -EINVAL;
797         } else {
798                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
799                         return -EINVAL;
800         }
801         if (rss_conf->rss_hf & ETH_RSS_IPV4)
802                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
803         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
804                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
805         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
806                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
807         if (rss_conf->rss_hf & ETH_RSS_IPV6)
808                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
809         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
810                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
811         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
812                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
813
814         /* Update the RSS VNIC(s) */
815         for (i = 0; i < MAX_FF_POOLS; i++) {
816                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
817                         vnic->hash_type = hash_type;
818
819                         /*
820                          * Use the supplied key if the key length is
821                          * acceptable and the rss_key is not NULL
822                          */
823                         if (rss_conf->rss_key &&
824                             rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
825                                 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
826                                        rss_conf->rss_key_len);
827
828                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
829                 }
830         }
831         return 0;
832 }
833
834 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
835                                      struct rte_eth_rss_conf *rss_conf)
836 {
837         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
838         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
839         int len;
840         uint32_t hash_types;
841
842         /* RSS configuration is the same for all VNICs */
843         if (vnic && vnic->rss_hash_key) {
844                 if (rss_conf->rss_key) {
845                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
846                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
847                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
848                 }
849
850                 hash_types = vnic->hash_type;
851                 rss_conf->rss_hf = 0;
852                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
853                         rss_conf->rss_hf |= ETH_RSS_IPV4;
854                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
855                 }
856                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
857                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
858                         hash_types &=
859                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
860                 }
861                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
862                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
863                         hash_types &=
864                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
865                 }
866                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
867                         rss_conf->rss_hf |= ETH_RSS_IPV6;
868                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
869                 }
870                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
871                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
872                         hash_types &=
873                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
874                 }
875                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
876                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
877                         hash_types &=
878                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
879                 }
880                 if (hash_types) {
881                         RTE_LOG(ERR, PMD,
882                                 "Unknwon RSS config from firmware (%08x), RSS disabled",
883                                 vnic->hash_type);
884                         return -ENOTSUP;
885                 }
886         } else {
887                 rss_conf->rss_hf = 0;
888         }
889         return 0;
890 }
891
892 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
893                                struct rte_eth_fc_conf *fc_conf)
894 {
895         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
896         struct rte_eth_link link_info;
897         int rc;
898
899         rc = bnxt_get_hwrm_link_config(bp, &link_info);
900         if (rc)
901                 return rc;
902
903         memset(fc_conf, 0, sizeof(*fc_conf));
904         if (bp->link_info.auto_pause)
905                 fc_conf->autoneg = 1;
906         switch (bp->link_info.pause) {
907         case 0:
908                 fc_conf->mode = RTE_FC_NONE;
909                 break;
910         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
911                 fc_conf->mode = RTE_FC_TX_PAUSE;
912                 break;
913         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
914                 fc_conf->mode = RTE_FC_RX_PAUSE;
915                 break;
916         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
917                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
918                 fc_conf->mode = RTE_FC_FULL;
919                 break;
920         }
921         return 0;
922 }
923
924 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
925                                struct rte_eth_fc_conf *fc_conf)
926 {
927         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
928
929         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
930                 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
931                 return -ENOTSUP;
932         }
933
934         switch (fc_conf->mode) {
935         case RTE_FC_NONE:
936                 bp->link_info.auto_pause = 0;
937                 bp->link_info.force_pause = 0;
938                 break;
939         case RTE_FC_RX_PAUSE:
940                 if (fc_conf->autoneg) {
941                         bp->link_info.auto_pause =
942                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
943                         bp->link_info.force_pause = 0;
944                 } else {
945                         bp->link_info.auto_pause = 0;
946                         bp->link_info.force_pause =
947                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
948                 }
949                 break;
950         case RTE_FC_TX_PAUSE:
951                 if (fc_conf->autoneg) {
952                         bp->link_info.auto_pause =
953                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
954                         bp->link_info.force_pause = 0;
955                 } else {
956                         bp->link_info.auto_pause = 0;
957                         bp->link_info.force_pause =
958                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
959                 }
960                 break;
961         case RTE_FC_FULL:
962                 if (fc_conf->autoneg) {
963                         bp->link_info.auto_pause =
964                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
965                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
966                         bp->link_info.force_pause = 0;
967                 } else {
968                         bp->link_info.auto_pause = 0;
969                         bp->link_info.force_pause =
970                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
971                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
972                 }
973                 break;
974         }
975         return bnxt_set_hwrm_link_config(bp, true);
976 }
977
978 /*
979  * Initialization
980  */
981
982 static const struct eth_dev_ops bnxt_dev_ops = {
983         .dev_infos_get = bnxt_dev_info_get_op,
984         .dev_close = bnxt_dev_close_op,
985         .dev_configure = bnxt_dev_configure_op,
986         .dev_start = bnxt_dev_start_op,
987         .dev_stop = bnxt_dev_stop_op,
988         .dev_set_link_up = bnxt_dev_set_link_up_op,
989         .dev_set_link_down = bnxt_dev_set_link_down_op,
990         .stats_get = bnxt_stats_get_op,
991         .stats_reset = bnxt_stats_reset_op,
992         .rx_queue_setup = bnxt_rx_queue_setup_op,
993         .rx_queue_release = bnxt_rx_queue_release_op,
994         .tx_queue_setup = bnxt_tx_queue_setup_op,
995         .tx_queue_release = bnxt_tx_queue_release_op,
996         .reta_update = bnxt_reta_update_op,
997         .reta_query = bnxt_reta_query_op,
998         .rss_hash_update = bnxt_rss_hash_update_op,
999         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
1000         .link_update = bnxt_link_update_op,
1001         .promiscuous_enable = bnxt_promiscuous_enable_op,
1002         .promiscuous_disable = bnxt_promiscuous_disable_op,
1003         .allmulticast_enable = bnxt_allmulticast_enable_op,
1004         .allmulticast_disable = bnxt_allmulticast_disable_op,
1005         .mac_addr_add = bnxt_mac_addr_add_op,
1006         .mac_addr_remove = bnxt_mac_addr_remove_op,
1007         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
1008         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
1009 };
1010
1011 static bool bnxt_vf_pciid(uint16_t id)
1012 {
1013         if (id == BROADCOM_DEV_ID_57304_VF ||
1014             id == BROADCOM_DEV_ID_57406_VF ||
1015             id == BROADCOM_DEV_ID_5731X_VF ||
1016             id == BROADCOM_DEV_ID_5741X_VF ||
1017             id == BROADCOM_DEV_ID_57414_VF)
1018                 return true;
1019         return false;
1020 }
1021
1022 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
1023 {
1024         struct bnxt *bp = eth_dev->data->dev_private;
1025         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1026         int rc;
1027
1028         /* enable device (incl. PCI PM wakeup), and bus-mastering */
1029         if (!pci_dev->mem_resource[0].addr) {
1030                 RTE_LOG(ERR, PMD,
1031                         "Cannot find PCI device base address, aborting\n");
1032                 rc = -ENODEV;
1033                 goto init_err_disable;
1034         }
1035
1036         bp->eth_dev = eth_dev;
1037         bp->pdev = pci_dev;
1038
1039         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
1040         if (!bp->bar0) {
1041                 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
1042                 rc = -ENOMEM;
1043                 goto init_err_release;
1044         }
1045         return 0;
1046
1047 init_err_release:
1048         if (bp->bar0)
1049                 bp->bar0 = NULL;
1050
1051 init_err_disable:
1052
1053         return rc;
1054 }
1055
1056 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
1057
1058 #define ALLOW_FUNC(x)   \
1059         { \
1060                 typeof(x) arg = (x); \
1061                 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
1062                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
1063         }
1064 static int
1065 bnxt_dev_init(struct rte_eth_dev *eth_dev)
1066 {
1067         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1068         static int version_printed;
1069         struct bnxt *bp;
1070         int rc;
1071
1072         if (version_printed++ == 0)
1073                 RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
1074
1075         rte_eth_copy_pci_info(eth_dev, pci_dev);
1076         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1077
1078         bp = eth_dev->data->dev_private;
1079         bp->dev_stopped = 1;
1080
1081         if (bnxt_vf_pciid(pci_dev->id.device_id))
1082                 bp->flags |= BNXT_FLAG_VF;
1083
1084         rc = bnxt_init_board(eth_dev);
1085         if (rc) {
1086                 RTE_LOG(ERR, PMD,
1087                         "Board initialization failed rc: %x\n", rc);
1088                 goto error;
1089         }
1090         eth_dev->dev_ops = &bnxt_dev_ops;
1091         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1092         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1093
1094         rc = bnxt_alloc_hwrm_resources(bp);
1095         if (rc) {
1096                 RTE_LOG(ERR, PMD,
1097                         "hwrm resource allocation failure rc: %x\n", rc);
1098                 goto error_free;
1099         }
1100         rc = bnxt_hwrm_ver_get(bp);
1101         if (rc)
1102                 goto error_free;
1103         bnxt_hwrm_queue_qportcfg(bp);
1104
1105         bnxt_hwrm_func_qcfg(bp);
1106
1107         /* Get the MAX capabilities for this function */
1108         rc = bnxt_hwrm_func_qcaps(bp);
1109         if (rc) {
1110                 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
1111                 goto error_free;
1112         }
1113         if (bp->max_tx_rings == 0) {
1114                 RTE_LOG(ERR, PMD, "No TX rings available!\n");
1115                 rc = -EBUSY;
1116                 goto error_free;
1117         }
1118         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
1119                                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
1120         if (eth_dev->data->mac_addrs == NULL) {
1121                 RTE_LOG(ERR, PMD,
1122                         "Failed to alloc %u bytes needed to store MAC addr tbl",
1123                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
1124                 rc = -ENOMEM;
1125                 goto error_free;
1126         }
1127         /* Copy the permanent MAC from the qcap response address now. */
1128         memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
1129         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1130         bp->grp_info = rte_zmalloc("bnxt_grp_info",
1131                                 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
1132         if (!bp->grp_info) {
1133                 RTE_LOG(ERR, PMD,
1134                         "Failed to alloc %zu bytes needed to store group info table\n",
1135                         sizeof(*bp->grp_info) * bp->max_ring_grps);
1136                 rc = -ENOMEM;
1137                 goto error_free;
1138         }
1139
1140         /* Forward all requests if firmware is new enough */
1141         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
1142             (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
1143             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
1144                 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
1145         } else {
1146                 RTE_LOG(WARNING, PMD,
1147                         "Firmware too old for VF mailbox functionality\n");
1148                 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
1149         }
1150
1151         /*
1152          * The following are used for driver cleanup.  If we disallow these,
1153          * VF drivers can't clean up cleanly.
1154          */
1155         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
1156         ALLOW_FUNC(HWRM_VNIC_FREE);
1157         ALLOW_FUNC(HWRM_RING_FREE);
1158         ALLOW_FUNC(HWRM_RING_GRP_FREE);
1159         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1160         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
1161         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
1162         rc = bnxt_hwrm_func_driver_register(bp);
1163         if (rc) {
1164                 RTE_LOG(ERR, PMD,
1165                         "Failed to register driver");
1166                 rc = -EBUSY;
1167                 goto error_free;
1168         }
1169
1170         RTE_LOG(INFO, PMD,
1171                 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1172                 pci_dev->mem_resource[0].phys_addr,
1173                 pci_dev->mem_resource[0].addr);
1174
1175         rc = bnxt_hwrm_func_reset(bp);
1176         if (rc) {
1177                 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
1178                 rc = -1;
1179                 goto error_free;
1180         }
1181
1182         if (BNXT_PF(bp)) {
1183                 //if (bp->pf.active_vfs) {
1184                         // TODO: Deallocate VF resources?
1185                 //}
1186                 if (bp->pdev->max_vfs) {
1187                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
1188                         if (rc) {
1189                                 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
1190                                 goto error_free;
1191                         }
1192                 } else {
1193                         rc = bnxt_hwrm_allocate_pf_only(bp);
1194                         if (rc) {
1195                                 RTE_LOG(ERR, PMD,
1196                                         "Failed to allocate PF resources\n");
1197                                 goto error_free;
1198                         }
1199                 }
1200         }
1201
1202         rc = bnxt_setup_int(bp);
1203         if (rc)
1204                 goto error_free;
1205
1206         rc = bnxt_alloc_mem(bp);
1207         if (rc)
1208                 goto error_free_int;
1209
1210         rc = bnxt_request_int(bp);
1211         if (rc)
1212                 goto error_free_int;
1213
1214         rc = bnxt_alloc_def_cp_ring(bp);
1215         if (rc)
1216                 goto error_free_int;
1217
1218         bnxt_enable_int(bp);
1219
1220         return 0;
1221
1222 error_free_int:
1223         bnxt_disable_int(bp);
1224         bnxt_free_def_cp_ring(bp);
1225         bnxt_hwrm_func_buf_unrgtr(bp);
1226         bnxt_free_int(bp);
1227         bnxt_free_mem(bp);
1228 error_free:
1229         bnxt_dev_uninit(eth_dev);
1230 error:
1231         return rc;
1232 }
1233
1234 static int
1235 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1236         struct bnxt *bp = eth_dev->data->dev_private;
1237         int rc;
1238
1239         bnxt_disable_int(bp);
1240         bnxt_free_int(bp);
1241         bnxt_free_mem(bp);
1242         if (eth_dev->data->mac_addrs != NULL) {
1243                 rte_free(eth_dev->data->mac_addrs);
1244                 eth_dev->data->mac_addrs = NULL;
1245         }
1246         if (bp->grp_info != NULL) {
1247                 rte_free(bp->grp_info);
1248                 bp->grp_info = NULL;
1249         }
1250         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1251         bnxt_free_hwrm_resources(bp);
1252         if (bp->dev_stopped == 0)
1253                 bnxt_dev_close_op(eth_dev);
1254         if (bp->pf.vf_info)
1255                 rte_free(bp->pf.vf_info);
1256         eth_dev->dev_ops = NULL;
1257         eth_dev->rx_pkt_burst = NULL;
1258         eth_dev->tx_pkt_burst = NULL;
1259
1260         return rc;
1261 }
1262
1263 int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
1264 {
1265         struct rte_pmd_bnxt_mb_event_param cb_param;
1266
1267         cb_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED;
1268         cb_param.vf_id = vf_id;
1269         cb_param.msg = msg;
1270
1271         _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
1272                         &cb_param);
1273
1274         /* Default to approve */
1275         if (cb_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
1276                 cb_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK;
1277
1278         return cb_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ? true : false;
1279 }
1280
1281 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1282         struct rte_pci_device *pci_dev)
1283 {
1284         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
1285                 bnxt_dev_init);
1286 }
1287
1288 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
1289 {
1290         return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
1291 }
1292
1293 static struct rte_pci_driver bnxt_rte_pmd = {
1294         .id_table = bnxt_pci_id_map,
1295         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
1296                 RTE_PCI_DRV_INTR_LSC,
1297         .probe = bnxt_pci_probe,
1298         .remove = bnxt_pci_remove,
1299 };
1300
1301 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
1302 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
1303 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");