net/bnxt: support async link notification
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
181                                     mask);
182
183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
184
185         HWRM_CHECK_RESULT;
186
187         return rc;
188 }
189
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191                            struct bnxt_filter_info *filter)
192 {
193         int rc = 0;
194         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
196
197         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
198
199         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
200
201         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
202
203         HWRM_CHECK_RESULT;
204
205         filter->fw_l2_filter_id = -1;
206
207         return 0;
208 }
209
210 int bnxt_hwrm_set_filter(struct bnxt *bp,
211                          struct bnxt_vnic_info *vnic,
212                          struct bnxt_filter_info *filter)
213 {
214         int rc = 0;
215         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
216         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
217         uint32_t enables = 0;
218
219         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
220
221         req.flags = rte_cpu_to_le_32(filter->flags);
222
223         enables = filter->enables |
224               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
225         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226
227         if (enables &
228             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
229                 memcpy(req.l2_addr, filter->l2_addr,
230                        ETHER_ADDR_LEN);
231         if (enables &
232             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
233                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
234                        ETHER_ADDR_LEN);
235         if (enables &
236             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
237                 req.l2_ovlan = filter->l2_ovlan;
238         if (enables &
239             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
240                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
241
242         req.enables = rte_cpu_to_le_32(enables);
243
244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245
246         HWRM_CHECK_RESULT;
247
248         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
249
250         return rc;
251 }
252
253 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
254 {
255         int rc;
256         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
257         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
258
259         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
260
261         memcpy(req.encap_request, fwd_cmd,
262                sizeof(req.encap_request));
263
264         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
265
266         HWRM_CHECK_RESULT;
267
268         return rc;
269 }
270
271 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
272 {
273         int rc = 0;
274         struct hwrm_func_qcaps_input req = {.req_type = 0 };
275         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
276
277         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
278
279         req.fid = rte_cpu_to_le_16(0xffff);
280
281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283         HWRM_CHECK_RESULT;
284
285         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
286         if (BNXT_PF(bp)) {
287                 struct bnxt_pf_info *pf = &bp->pf;
288
289                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
290                 pf->port_id = resp->port_id;
291                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
292                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
293                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
294                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
295                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
296                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
297                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
298                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
299                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
300         } else {
301                 struct bnxt_vf_info *vf = &bp->vf;
302
303                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
304                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
305                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
311         }
312
313         return rc;
314 }
315
316 int bnxt_hwrm_func_reset(struct bnxt *bp)
317 {
318         int rc = 0;
319         struct hwrm_func_reset_input req = {.req_type = 0 };
320         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
321
322         HWRM_PREP(req, FUNC_RESET, -1, resp);
323
324         req.enables = rte_cpu_to_le_32(0);
325
326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
327
328         HWRM_CHECK_RESULT;
329
330         return rc;
331 }
332
333 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
334                                    uint32_t *vf_req_fwd)
335 {
336         int rc;
337         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
338         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
339
340         if (bp->flags & BNXT_FLAG_REGISTERED)
341                 return 0;
342
343         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
344         req.flags = flags;
345         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
346                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
347         req.ver_maj = RTE_VER_YEAR;
348         req.ver_min = RTE_VER_MONTH;
349         req.ver_upd = RTE_VER_MINOR;
350
351         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
352
353         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
354
355         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
356
357         HWRM_CHECK_RESULT;
358
359         bp->flags |= BNXT_FLAG_REGISTERED;
360
361         return rc;
362 }
363
364 int bnxt_hwrm_ver_get(struct bnxt *bp)
365 {
366         int rc = 0;
367         struct hwrm_ver_get_input req = {.req_type = 0 };
368         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
369         uint32_t my_version;
370         uint32_t fw_version;
371         uint16_t max_resp_len;
372         char type[RTE_MEMZONE_NAMESIZE];
373
374         HWRM_PREP(req, VER_GET, -1, resp);
375
376         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
377         req.hwrm_intf_min = HWRM_VERSION_MINOR;
378         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
379
380         /*
381          * Hold the lock since we may be adjusting the response pointers.
382          */
383         rte_spinlock_lock(&bp->hwrm_lock);
384         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
385
386         HWRM_CHECK_RESULT;
387
388         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
389                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
390                 resp->hwrm_intf_upd,
391                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
392         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
393                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
394
395         my_version = HWRM_VERSION_MAJOR << 16;
396         my_version |= HWRM_VERSION_MINOR << 8;
397         my_version |= HWRM_VERSION_UPDATE;
398
399         fw_version = resp->hwrm_intf_maj << 16;
400         fw_version |= resp->hwrm_intf_min << 8;
401         fw_version |= resp->hwrm_intf_upd;
402
403         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
404                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
405                 rc = -EINVAL;
406                 goto error;
407         }
408
409         if (my_version != fw_version) {
410                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
411                 if (my_version < fw_version) {
412                         RTE_LOG(INFO, PMD,
413                                 "Firmware API version is newer than driver.\n");
414                         RTE_LOG(INFO, PMD,
415                                 "The driver may be missing features.\n");
416                 } else {
417                         RTE_LOG(INFO, PMD,
418                                 "Firmware API version is older than driver.\n");
419                         RTE_LOG(INFO, PMD,
420                                 "Not all driver features may be functional.\n");
421                 }
422         }
423
424         if (bp->max_req_len > resp->max_req_win_len) {
425                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
426                 rc = -EINVAL;
427         }
428         bp->max_req_len = resp->max_req_win_len;
429         max_resp_len = resp->max_resp_len;
430         if (bp->max_resp_len != max_resp_len) {
431                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
432                         bp->pdev->addr.domain, bp->pdev->addr.bus,
433                         bp->pdev->addr.devid, bp->pdev->addr.function);
434
435                 rte_free(bp->hwrm_cmd_resp_addr);
436
437                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
438                 if (bp->hwrm_cmd_resp_addr == NULL) {
439                         rc = -ENOMEM;
440                         goto error;
441                 }
442                 bp->hwrm_cmd_resp_dma_addr =
443                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
444                 bp->max_resp_len = max_resp_len;
445         }
446
447 error:
448         rte_spinlock_unlock(&bp->hwrm_lock);
449         return rc;
450 }
451
452 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
453 {
454         int rc;
455         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
456         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
457
458         if (!(bp->flags & BNXT_FLAG_REGISTERED))
459                 return 0;
460
461         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
462         req.flags = flags;
463
464         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
465
466         HWRM_CHECK_RESULT;
467
468         bp->flags &= ~BNXT_FLAG_REGISTERED;
469
470         return rc;
471 }
472
473 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
474 {
475         int rc = 0;
476         struct hwrm_port_phy_cfg_input req = {0};
477         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
478         uint32_t enables = 0;
479
480         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
481
482         if (conf->link_up) {
483                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
484                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
485                 /*
486                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
487                  * any auto mode, even "none".
488                  */
489                 if (!conf->link_speed) {
490                         req.auto_mode |= conf->auto_mode;
491                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
492                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
493                         enables |=
494                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
495                         req.auto_link_speed = bp->link_info.auto_link_speed;
496                         enables |=
497                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
498                 }
499                 req.auto_duplex = conf->duplex;
500                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
501                 req.auto_pause = conf->auto_pause;
502                 req.force_pause = conf->force_pause;
503                 /* Set force_pause if there is no auto or if there is a force */
504                 if (req.auto_pause && !req.force_pause)
505                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
506                 else
507                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
508
509                 req.enables = rte_cpu_to_le_32(enables);
510         } else {
511                 req.flags =
512                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
513                 RTE_LOG(INFO, PMD, "Force Link Down\n");
514         }
515
516         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
517
518         HWRM_CHECK_RESULT;
519
520         return rc;
521 }
522
523 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
524                                    struct bnxt_link_info *link_info)
525 {
526         int rc = 0;
527         struct hwrm_port_phy_qcfg_input req = {0};
528         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
529
530         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
531
532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
533
534         HWRM_CHECK_RESULT;
535
536         link_info->phy_link_status = resp->link;
537         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
538                 link_info->link_up = 1;
539                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
540         } else {
541                 link_info->link_up = 0;
542                 link_info->link_speed = 0;
543         }
544         link_info->duplex = resp->duplex;
545         link_info->pause = resp->pause;
546         link_info->auto_pause = resp->auto_pause;
547         link_info->force_pause = resp->force_pause;
548         link_info->auto_mode = resp->auto_mode;
549
550         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
551         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
552         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
553         link_info->phy_ver[0] = resp->phy_maj;
554         link_info->phy_ver[1] = resp->phy_min;
555         link_info->phy_ver[2] = resp->phy_bld;
556
557         return rc;
558 }
559
560 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
561 {
562         int rc = 0;
563         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
564         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
565
566         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
567
568         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
569
570         HWRM_CHECK_RESULT;
571
572 #define GET_QUEUE_INFO(x) \
573         bp->cos_queue[x].id = resp->queue_id##x; \
574         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
575
576         GET_QUEUE_INFO(0);
577         GET_QUEUE_INFO(1);
578         GET_QUEUE_INFO(2);
579         GET_QUEUE_INFO(3);
580         GET_QUEUE_INFO(4);
581         GET_QUEUE_INFO(5);
582         GET_QUEUE_INFO(6);
583         GET_QUEUE_INFO(7);
584
585         return rc;
586 }
587
588 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
589                          struct bnxt_ring *ring,
590                          uint32_t ring_type, uint32_t map_index,
591                          uint32_t stats_ctx_id)
592 {
593         int rc = 0;
594         struct hwrm_ring_alloc_input req = {.req_type = 0 };
595         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
596
597         HWRM_PREP(req, RING_ALLOC, -1, resp);
598
599         req.enables = rte_cpu_to_le_32(0);
600
601         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
602         req.fbo = rte_cpu_to_le_32(0);
603         /* Association of ring index with doorbell index */
604         req.logical_id = rte_cpu_to_le_16(map_index);
605
606         switch (ring_type) {
607         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
608                 req.queue_id = bp->cos_queue[0].id;
609         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
610                 req.ring_type = ring_type;
611                 req.cmpl_ring_id =
612                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
613                 req.length = rte_cpu_to_le_32(ring->ring_size);
614                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
615                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
616                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
617                 break;
618         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
619                 req.ring_type = ring_type;
620                 /*
621                  * TODO: Some HWRM versions crash with
622                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
623                  */
624                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
625                 req.length = rte_cpu_to_le_32(ring->ring_size);
626                 break;
627         default:
628                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
629                         ring_type);
630                 return -1;
631         }
632
633         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
634
635         if (rc || resp->error_code) {
636                 if (rc == 0 && resp->error_code)
637                         rc = rte_le_to_cpu_16(resp->error_code);
638                 switch (ring_type) {
639                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
640                         RTE_LOG(ERR, PMD,
641                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
642                         return rc;
643                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
644                         RTE_LOG(ERR, PMD,
645                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
646                         return rc;
647                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
648                         RTE_LOG(ERR, PMD,
649                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
650                         return rc;
651                 default:
652                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
653                         return rc;
654                 }
655         }
656
657         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
658         return rc;
659 }
660
661 int bnxt_hwrm_ring_free(struct bnxt *bp,
662                         struct bnxt_ring *ring, uint32_t ring_type)
663 {
664         int rc;
665         struct hwrm_ring_free_input req = {.req_type = 0 };
666         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
667
668         HWRM_PREP(req, RING_FREE, -1, resp);
669
670         req.ring_type = ring_type;
671         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
672
673         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
674
675         if (rc || resp->error_code) {
676                 if (rc == 0 && resp->error_code)
677                         rc = rte_le_to_cpu_16(resp->error_code);
678
679                 switch (ring_type) {
680                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
681                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
682                                 rc);
683                         return rc;
684                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
685                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
686                                 rc);
687                         return rc;
688                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
689                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
690                                 rc);
691                         return rc;
692                 default:
693                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
694                         return rc;
695                 }
696         }
697         return 0;
698 }
699
700 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
701 {
702         int rc = 0;
703         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
704         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
705
706         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
707
708         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
709         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
710         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
711         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
712
713         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
714
715         HWRM_CHECK_RESULT;
716
717         bp->grp_info[idx].fw_grp_id =
718             rte_le_to_cpu_16(resp->ring_group_id);
719
720         return rc;
721 }
722
723 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
724 {
725         int rc;
726         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
727         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
728
729         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
730
731         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
732
733         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
734
735         HWRM_CHECK_RESULT;
736
737         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
738         return rc;
739 }
740
741 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
742 {
743         int rc = 0;
744         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
745         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
746
747         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
748
749         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
750                 return rc;
751
752         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
753         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
754
755         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
756
757         HWRM_CHECK_RESULT;
758
759         return rc;
760 }
761
762 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
763                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
764 {
765         int rc;
766         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
767         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
768
769         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
770
771         req.update_period_ms = rte_cpu_to_le_32(1000);
772
773         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
774         req.stats_dma_addr =
775             rte_cpu_to_le_64(cpr->hw_stats_map);
776
777         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
778
779         HWRM_CHECK_RESULT;
780
781         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
782         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
783
784         return rc;
785 }
786
787 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
788                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
789 {
790         int rc;
791         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
792         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
793
794         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
795
796         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
797         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
798
799         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
800
801         HWRM_CHECK_RESULT;
802
803         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
804         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
805
806         return rc;
807 }
808
809 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
810 {
811         int rc = 0, i, j;
812         struct hwrm_vnic_alloc_input req = { 0 };
813         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
814
815         /* map ring groups to this vnic */
816         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
817                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
818                         RTE_LOG(ERR, PMD,
819                                 "Not enough ring groups avail:%x req:%x\n", j,
820                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
821                         break;
822                 }
823                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
824         }
825
826         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
827         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
828
829         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
830
831         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
832
833         HWRM_CHECK_RESULT;
834
835         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
836         return rc;
837 }
838
839 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
840 {
841         int rc = 0;
842         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
843         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
844
845         HWRM_PREP(req, VNIC_CFG, -1, resp);
846
847         /* Only RSS support for now TBD: COS & LB */
848         req.enables =
849             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
850                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
851                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
852         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
853         req.dflt_ring_grp =
854                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
855         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
856         req.cos_rule = rte_cpu_to_le_16(0xffff);
857         req.lb_rule = rte_cpu_to_le_16(0xffff);
858         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
859                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
860         if (vnic->func_default)
861                 req.flags = 1;
862         if (vnic->vlan_strip)
863                 req.flags |=
864                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
865
866         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
867
868         HWRM_CHECK_RESULT;
869
870         return rc;
871 }
872
873 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
874 {
875         int rc = 0;
876         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
877         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
878                                                 bp->hwrm_cmd_resp_addr;
879
880         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
881
882         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
883
884         HWRM_CHECK_RESULT;
885
886         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
887
888         return rc;
889 }
890
891 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
892 {
893         int rc = 0;
894         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
895         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
896                                                 bp->hwrm_cmd_resp_addr;
897
898         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
899
900         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
901
902         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
903
904         HWRM_CHECK_RESULT;
905
906         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
907
908         return rc;
909 }
910
911 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
912 {
913         int rc = 0;
914         struct hwrm_vnic_free_input req = {.req_type = 0 };
915         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
916
917         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
918                 return rc;
919
920         HWRM_PREP(req, VNIC_FREE, -1, resp);
921
922         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
923
924         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
925
926         HWRM_CHECK_RESULT;
927
928         vnic->fw_vnic_id = INVALID_HW_RING_ID;
929         return rc;
930 }
931
932 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
933                            struct bnxt_vnic_info *vnic)
934 {
935         int rc = 0;
936         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
937         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
938
939         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
940
941         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
942
943         req.ring_grp_tbl_addr =
944             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
945         req.hash_key_tbl_addr =
946             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
947         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
948
949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
950
951         HWRM_CHECK_RESULT;
952
953         return rc;
954 }
955
956 /*
957  * HWRM utility functions
958  */
959
960 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
961 {
962         unsigned int i;
963         int rc = 0;
964
965         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
966                 struct bnxt_tx_queue *txq;
967                 struct bnxt_rx_queue *rxq;
968                 struct bnxt_cp_ring_info *cpr;
969
970                 if (i >= bp->rx_cp_nr_rings) {
971                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
972                         cpr = txq->cp_ring;
973                 } else {
974                         rxq = bp->rx_queues[i];
975                         cpr = rxq->cp_ring;
976                 }
977
978                 rc = bnxt_hwrm_stat_clear(bp, cpr);
979                 if (rc)
980                         return rc;
981         }
982         return 0;
983 }
984
985 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
986 {
987         int rc;
988         unsigned int i;
989         struct bnxt_cp_ring_info *cpr;
990
991         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
992                 unsigned int idx = i + 1;
993
994                 if (i >= bp->rx_cp_nr_rings)
995                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
996                 else
997                         cpr = bp->rx_queues[i]->cp_ring;
998                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
999                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1000                         if (rc)
1001                                 return rc;
1002                 }
1003         }
1004         return 0;
1005 }
1006
1007 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1008 {
1009         unsigned int i;
1010         int rc = 0;
1011
1012         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1013                 struct bnxt_tx_queue *txq;
1014                 struct bnxt_rx_queue *rxq;
1015                 struct bnxt_cp_ring_info *cpr;
1016                 unsigned int idx = i + 1;
1017
1018                 if (i >= bp->rx_cp_nr_rings) {
1019                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1020                         cpr = txq->cp_ring;
1021                 } else {
1022                         rxq = bp->rx_queues[i];
1023                         cpr = rxq->cp_ring;
1024                 }
1025
1026                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1027
1028                 if (rc)
1029                         return rc;
1030         }
1031         return rc;
1032 }
1033
1034 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1035 {
1036         uint16_t i;
1037         uint32_t rc = 0;
1038
1039         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1040                 unsigned int idx = i + 1;
1041
1042                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1043                         RTE_LOG(ERR, PMD,
1044                                 "Attempt to free invalid ring group %d\n",
1045                                 idx);
1046                         continue;
1047                 }
1048
1049                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1050
1051                 if (rc)
1052                         return rc;
1053         }
1054         return rc;
1055 }
1056
1057 static void bnxt_free_cp_ring(struct bnxt *bp,
1058                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1059 {
1060         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1061
1062         bnxt_hwrm_ring_free(bp, cp_ring,
1063                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1064         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1065         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1066         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1067                         sizeof(*cpr->cp_desc_ring));
1068         cpr->cp_raw_cons = 0;
1069 }
1070
1071 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1072 {
1073         unsigned int i;
1074         int rc = 0;
1075
1076         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1077                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1078                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1079                 struct bnxt_ring *ring = txr->tx_ring_struct;
1080                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1081                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1082
1083                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1084                         bnxt_hwrm_ring_free(bp, ring,
1085                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1086                         ring->fw_ring_id = INVALID_HW_RING_ID;
1087                         memset(txr->tx_desc_ring, 0,
1088                                         txr->tx_ring_struct->ring_size *
1089                                         sizeof(*txr->tx_desc_ring));
1090                         memset(txr->tx_buf_ring, 0,
1091                                         txr->tx_ring_struct->ring_size *
1092                                         sizeof(*txr->tx_buf_ring));
1093                         txr->tx_prod = 0;
1094                         txr->tx_cons = 0;
1095                 }
1096                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1097                         bnxt_free_cp_ring(bp, cpr, idx);
1098         }
1099
1100         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1101                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1102                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1103                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1104                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1105                 unsigned int idx = i + 1;
1106
1107                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1108                         bnxt_hwrm_ring_free(bp, ring,
1109                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1110                         ring->fw_ring_id = INVALID_HW_RING_ID;
1111                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1112                         memset(rxr->rx_desc_ring, 0,
1113                                         rxr->rx_ring_struct->ring_size *
1114                                         sizeof(*rxr->rx_desc_ring));
1115                         memset(rxr->rx_buf_ring, 0,
1116                                         rxr->rx_ring_struct->ring_size *
1117                                         sizeof(*rxr->rx_buf_ring));
1118                         rxr->rx_prod = 0;
1119                 }
1120                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1121                         bnxt_free_cp_ring(bp, cpr, idx);
1122         }
1123
1124         /* Default completion ring */
1125         {
1126                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1127
1128                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1129                         bnxt_free_cp_ring(bp, cpr, 0);
1130         }
1131
1132         return rc;
1133 }
1134
1135 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1136 {
1137         uint16_t i;
1138         uint32_t rc = 0;
1139
1140         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1141                 unsigned int idx = i + 1;
1142
1143                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1144                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1145                         continue;
1146
1147                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1148
1149                 if (rc)
1150                         return rc;
1151         }
1152         return rc;
1153 }
1154
1155 void bnxt_free_hwrm_resources(struct bnxt *bp)
1156 {
1157         /* Release memzone */
1158         rte_free(bp->hwrm_cmd_resp_addr);
1159         bp->hwrm_cmd_resp_addr = NULL;
1160         bp->hwrm_cmd_resp_dma_addr = 0;
1161 }
1162
1163 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1164 {
1165         struct rte_pci_device *pdev = bp->pdev;
1166         char type[RTE_MEMZONE_NAMESIZE];
1167
1168         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1169                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1170         bp->max_req_len = HWRM_MAX_REQ_LEN;
1171         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1172         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1173         if (bp->hwrm_cmd_resp_addr == NULL)
1174                 return -ENOMEM;
1175         bp->hwrm_cmd_resp_dma_addr =
1176                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1177         rte_spinlock_init(&bp->hwrm_lock);
1178
1179         return 0;
1180 }
1181
1182 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1183 {
1184         struct bnxt_filter_info *filter;
1185         int rc = 0;
1186
1187         STAILQ_FOREACH(filter, &vnic->filter, next) {
1188                 rc = bnxt_hwrm_clear_filter(bp, filter);
1189                 if (rc)
1190                         break;
1191         }
1192         return rc;
1193 }
1194
1195 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1196 {
1197         struct bnxt_filter_info *filter;
1198         int rc = 0;
1199
1200         STAILQ_FOREACH(filter, &vnic->filter, next) {
1201                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1202                 if (rc)
1203                         break;
1204         }
1205         return rc;
1206 }
1207
1208 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1209 {
1210         struct bnxt_vnic_info *vnic;
1211         unsigned int i;
1212
1213         if (bp->vnic_info == NULL)
1214                 return;
1215
1216         vnic = &bp->vnic_info[0];
1217         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1218
1219         /* VNIC resources */
1220         for (i = 0; i < bp->nr_vnics; i++) {
1221                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1222
1223                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1224
1225                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1226                 bnxt_hwrm_vnic_free(bp, vnic);
1227         }
1228         /* Ring resources */
1229         bnxt_free_all_hwrm_rings(bp);
1230         bnxt_free_all_hwrm_ring_grps(bp);
1231         bnxt_free_all_hwrm_stat_ctxs(bp);
1232 }
1233
1234 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1235 {
1236         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1237
1238         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1239                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1240
1241         switch (conf_link_speed) {
1242         case ETH_LINK_SPEED_10M_HD:
1243         case ETH_LINK_SPEED_100M_HD:
1244                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1245         }
1246         return hw_link_duplex;
1247 }
1248
1249 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1250 {
1251         uint16_t eth_link_speed = 0;
1252
1253         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1254                 return ETH_LINK_SPEED_AUTONEG;
1255
1256         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1257         case ETH_LINK_SPEED_100M:
1258         case ETH_LINK_SPEED_100M_HD:
1259                 eth_link_speed =
1260                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1261                 break;
1262         case ETH_LINK_SPEED_1G:
1263                 eth_link_speed =
1264                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1265                 break;
1266         case ETH_LINK_SPEED_2_5G:
1267                 eth_link_speed =
1268                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1269                 break;
1270         case ETH_LINK_SPEED_10G:
1271                 eth_link_speed =
1272                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1273                 break;
1274         case ETH_LINK_SPEED_20G:
1275                 eth_link_speed =
1276                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1277                 break;
1278         case ETH_LINK_SPEED_25G:
1279                 eth_link_speed =
1280                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1281                 break;
1282         case ETH_LINK_SPEED_40G:
1283                 eth_link_speed =
1284                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1285                 break;
1286         case ETH_LINK_SPEED_50G:
1287                 eth_link_speed =
1288                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1289                 break;
1290         default:
1291                 RTE_LOG(ERR, PMD,
1292                         "Unsupported link speed %d; default to AUTO\n",
1293                         conf_link_speed);
1294                 break;
1295         }
1296         return eth_link_speed;
1297 }
1298
1299 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1300                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1301                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1302                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1303
1304 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1305 {
1306         uint32_t one_speed;
1307
1308         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1309                 return 0;
1310
1311         if (link_speed & ETH_LINK_SPEED_FIXED) {
1312                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1313
1314                 if (one_speed & (one_speed - 1)) {
1315                         RTE_LOG(ERR, PMD,
1316                                 "Invalid advertised speeds (%u) for port %u\n",
1317                                 link_speed, port_id);
1318                         return -EINVAL;
1319                 }
1320                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1321                         RTE_LOG(ERR, PMD,
1322                                 "Unsupported advertised speed (%u) for port %u\n",
1323                                 link_speed, port_id);
1324                         return -EINVAL;
1325                 }
1326         } else {
1327                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1328                         RTE_LOG(ERR, PMD,
1329                                 "Unsupported advertised speeds (%u) for port %u\n",
1330                                 link_speed, port_id);
1331                         return -EINVAL;
1332                 }
1333         }
1334         return 0;
1335 }
1336
1337 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1338 {
1339         uint16_t ret = 0;
1340
1341         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1342                 link_speed = BNXT_SUPPORTED_SPEEDS;
1343
1344         if (link_speed & ETH_LINK_SPEED_100M)
1345                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1346         if (link_speed & ETH_LINK_SPEED_100M_HD)
1347                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1348         if (link_speed & ETH_LINK_SPEED_1G)
1349                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1350         if (link_speed & ETH_LINK_SPEED_2_5G)
1351                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1352         if (link_speed & ETH_LINK_SPEED_10G)
1353                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1354         if (link_speed & ETH_LINK_SPEED_20G)
1355                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1356         if (link_speed & ETH_LINK_SPEED_25G)
1357                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1358         if (link_speed & ETH_LINK_SPEED_40G)
1359                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1360         if (link_speed & ETH_LINK_SPEED_50G)
1361                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1362         return ret;
1363 }
1364
1365 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1366 {
1367         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1368
1369         switch (hw_link_speed) {
1370         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1371                 eth_link_speed = ETH_SPEED_NUM_100M;
1372                 break;
1373         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1374                 eth_link_speed = ETH_SPEED_NUM_1G;
1375                 break;
1376         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1377                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1378                 break;
1379         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1380                 eth_link_speed = ETH_SPEED_NUM_10G;
1381                 break;
1382         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1383                 eth_link_speed = ETH_SPEED_NUM_20G;
1384                 break;
1385         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1386                 eth_link_speed = ETH_SPEED_NUM_25G;
1387                 break;
1388         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1389                 eth_link_speed = ETH_SPEED_NUM_40G;
1390                 break;
1391         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1392                 eth_link_speed = ETH_SPEED_NUM_50G;
1393                 break;
1394         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1395         default:
1396                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1397                         hw_link_speed);
1398                 break;
1399         }
1400         return eth_link_speed;
1401 }
1402
1403 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1404 {
1405         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1406
1407         switch (hw_link_duplex) {
1408         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1409         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1410                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1411                 break;
1412         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1413                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1414                 break;
1415         default:
1416                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1417                         hw_link_duplex);
1418                 break;
1419         }
1420         return eth_link_duplex;
1421 }
1422
1423 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1424 {
1425         int rc = 0;
1426         struct bnxt_link_info *link_info = &bp->link_info;
1427
1428         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1429         if (rc) {
1430                 RTE_LOG(ERR, PMD,
1431                         "Get link config failed with rc %d\n", rc);
1432                 goto exit;
1433         }
1434         if (link_info->link_up)
1435                 link->link_speed =
1436                         bnxt_parse_hw_link_speed(link_info->link_speed);
1437         else
1438                 link->link_speed = ETH_LINK_SPEED_10M;
1439         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1440         link->link_status = link_info->link_up;
1441         link->link_autoneg = link_info->auto_mode ==
1442                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1443                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1444 exit:
1445         return rc;
1446 }
1447
1448 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1449 {
1450         int rc = 0;
1451         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1452         struct bnxt_link_info link_req;
1453         uint16_t speed;
1454
1455         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1456                 return 0;
1457
1458         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1459                         bp->eth_dev->data->port_id);
1460         if (rc)
1461                 goto error;
1462
1463         memset(&link_req, 0, sizeof(link_req));
1464         link_req.link_up = link_up;
1465         if (!link_up)
1466                 goto port_phy_cfg;
1467
1468         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1469         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1470         if (speed == 0) {
1471                 link_req.phy_flags |=
1472                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1473                 link_req.auto_mode =
1474                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1475                 link_req.auto_link_speed_mask =
1476                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1477         } else {
1478                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1479                 link_req.link_speed = speed;
1480                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1481         }
1482         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1483         link_req.auto_pause = bp->link_info.auto_pause;
1484         link_req.force_pause = bp->link_info.force_pause;
1485
1486 port_phy_cfg:
1487         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1488         if (rc) {
1489                 RTE_LOG(ERR, PMD,
1490                         "Set link config failed with rc %d\n", rc);
1491         }
1492
1493         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1494 error:
1495         return rc;
1496 }
1497
1498 /* JIRA 22088 */
1499 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1500 {
1501         struct hwrm_func_qcfg_input req = {0};
1502         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1503         int rc = 0;
1504
1505         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1506         req.fid = rte_cpu_to_le_16(0xffff);
1507
1508         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1509
1510         HWRM_CHECK_RESULT;
1511
1512         if (BNXT_VF(bp)) {
1513                 struct bnxt_vf_info *vf = &bp->vf;
1514
1515                 /* Hard Coded.. 0xfff VLAN ID mask */
1516                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1517         }
1518
1519         switch (resp->port_partition_type) {
1520         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1521         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1522         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1523                 bp->port_partition_type = resp->port_partition_type;
1524                 break;
1525         default:
1526                 bp->port_partition_type = 0;
1527                 break;
1528         }
1529
1530         return rc;
1531 }