net/bnxt: refactor for 1.5.1 HWRM API
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
181                                     mask);
182
183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
184
185         HWRM_CHECK_RESULT;
186
187         return rc;
188 }
189
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191                            struct bnxt_filter_info *filter)
192 {
193         int rc = 0;
194         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
196
197         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
198
199         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
200
201         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
202
203         HWRM_CHECK_RESULT;
204
205         filter->fw_l2_filter_id = -1;
206
207         return 0;
208 }
209
210 int bnxt_hwrm_set_filter(struct bnxt *bp,
211                          struct bnxt_vnic_info *vnic,
212                          struct bnxt_filter_info *filter)
213 {
214         int rc = 0;
215         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
216         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
217         uint32_t enables = 0;
218
219         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
220
221         req.flags = rte_cpu_to_le_32(filter->flags);
222
223         enables = filter->enables |
224               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
225         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226
227         if (enables &
228             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
229                 memcpy(req.l2_addr, filter->l2_addr,
230                        ETHER_ADDR_LEN);
231         if (enables &
232             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
233                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
234                        ETHER_ADDR_LEN);
235         if (enables &
236             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
237                 req.l2_ovlan = filter->l2_ovlan;
238         if (enables &
239             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
240                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
241
242         req.enables = rte_cpu_to_le_32(enables);
243
244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245
246         HWRM_CHECK_RESULT;
247
248         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
249
250         return rc;
251 }
252
253 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
254 {
255         int rc;
256         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
257         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
258
259         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
260
261         memcpy(req.encap_request, fwd_cmd,
262                sizeof(req.encap_request));
263
264         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
265
266         HWRM_CHECK_RESULT;
267
268         return rc;
269 }
270
271 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
272 {
273         int rc = 0;
274         struct hwrm_func_qcaps_input req = {.req_type = 0 };
275         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
276
277         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
278
279         req.fid = rte_cpu_to_le_16(0xffff);
280
281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283         HWRM_CHECK_RESULT;
284
285         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
286         if (BNXT_PF(bp)) {
287                 struct bnxt_pf_info *pf = &bp->pf;
288
289                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
290                 pf->port_id = resp->port_id;
291                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
292                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
293                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
294                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
295                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
296                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
297                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
298                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
299                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
300         } else {
301                 struct bnxt_vf_info *vf = &bp->vf;
302
303                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
304                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
305                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
311         }
312
313         return rc;
314 }
315
316 int bnxt_hwrm_func_reset(struct bnxt *bp)
317 {
318         int rc = 0;
319         struct hwrm_func_reset_input req = {.req_type = 0 };
320         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
321
322         HWRM_PREP(req, FUNC_RESET, -1, resp);
323
324         req.enables = rte_cpu_to_le_32(0);
325
326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
327
328         HWRM_CHECK_RESULT;
329
330         return rc;
331 }
332
333 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
334                                    uint32_t *vf_req_fwd)
335 {
336         int rc;
337         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
338         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
339
340         if (bp->flags & BNXT_FLAG_REGISTERED)
341                 return 0;
342
343         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
344         req.flags = flags;
345         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
346         req.ver_maj = RTE_VER_YEAR;
347         req.ver_min = RTE_VER_MONTH;
348         req.ver_upd = RTE_VER_MINOR;
349
350         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
351
352         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
353
354         HWRM_CHECK_RESULT;
355
356         bp->flags |= BNXT_FLAG_REGISTERED;
357
358         return rc;
359 }
360
361 int bnxt_hwrm_ver_get(struct bnxt *bp)
362 {
363         int rc = 0;
364         struct hwrm_ver_get_input req = {.req_type = 0 };
365         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
366         uint32_t my_version;
367         uint32_t fw_version;
368         uint16_t max_resp_len;
369         char type[RTE_MEMZONE_NAMESIZE];
370
371         HWRM_PREP(req, VER_GET, -1, resp);
372
373         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
374         req.hwrm_intf_min = HWRM_VERSION_MINOR;
375         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
376
377         /*
378          * Hold the lock since we may be adjusting the response pointers.
379          */
380         rte_spinlock_lock(&bp->hwrm_lock);
381         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
382
383         HWRM_CHECK_RESULT;
384
385         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
386                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
387                 resp->hwrm_intf_upd,
388                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
389         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
390                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
391
392         my_version = HWRM_VERSION_MAJOR << 16;
393         my_version |= HWRM_VERSION_MINOR << 8;
394         my_version |= HWRM_VERSION_UPDATE;
395
396         fw_version = resp->hwrm_intf_maj << 16;
397         fw_version |= resp->hwrm_intf_min << 8;
398         fw_version |= resp->hwrm_intf_upd;
399
400         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
401                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
402                 rc = -EINVAL;
403                 goto error;
404         }
405
406         if (my_version != fw_version) {
407                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
408                 if (my_version < fw_version) {
409                         RTE_LOG(INFO, PMD,
410                                 "Firmware API version is newer than driver.\n");
411                         RTE_LOG(INFO, PMD,
412                                 "The driver may be missing features.\n");
413                 } else {
414                         RTE_LOG(INFO, PMD,
415                                 "Firmware API version is older than driver.\n");
416                         RTE_LOG(INFO, PMD,
417                                 "Not all driver features may be functional.\n");
418                 }
419         }
420
421         if (bp->max_req_len > resp->max_req_win_len) {
422                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
423                 rc = -EINVAL;
424         }
425         bp->max_req_len = resp->max_req_win_len;
426         max_resp_len = resp->max_resp_len;
427         if (bp->max_resp_len != max_resp_len) {
428                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
429                         bp->pdev->addr.domain, bp->pdev->addr.bus,
430                         bp->pdev->addr.devid, bp->pdev->addr.function);
431
432                 rte_free(bp->hwrm_cmd_resp_addr);
433
434                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
435                 if (bp->hwrm_cmd_resp_addr == NULL) {
436                         rc = -ENOMEM;
437                         goto error;
438                 }
439                 bp->hwrm_cmd_resp_dma_addr =
440                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
441                 bp->max_resp_len = max_resp_len;
442         }
443
444 error:
445         rte_spinlock_unlock(&bp->hwrm_lock);
446         return rc;
447 }
448
449 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
450 {
451         int rc;
452         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
453         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
454
455         if (!(bp->flags & BNXT_FLAG_REGISTERED))
456                 return 0;
457
458         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
459         req.flags = flags;
460
461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
462
463         HWRM_CHECK_RESULT;
464
465         bp->flags &= ~BNXT_FLAG_REGISTERED;
466
467         return rc;
468 }
469
470 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
471 {
472         int rc = 0;
473         struct hwrm_port_phy_cfg_input req = {.req_type = 0};
474         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
475
476         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
477
478         req.flags = conf->phy_flags;
479         if (conf->link_up) {
480                 req.force_link_speed = conf->link_speed;
481                 /*
482                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
483                  * any auto mode, even "none".
484                  */
485                 if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
486                         req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
487                 } else {
488                         req.auto_mode = conf->auto_mode;
489                         req.enables |=
490                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
491                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
492                         req.enables |=
493                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
494                         req.auto_link_speed = conf->auto_link_speed;
495                         req.enables |=
496                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
497                 }
498                 req.auto_duplex = conf->duplex;
499                 req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
500                 req.auto_pause = conf->auto_pause;
501                 /* Set force_pause if there is no auto or if there is a force */
502                 if (req.auto_pause)
503                         req.enables |=
504                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
505                 else
506                         req.enables |=
507                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
508                 req.force_pause = conf->force_pause;
509                 if (req.force_pause)
510                         req.enables |=
511                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
512         } else {
513                 req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
514                 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
515                 req.force_link_speed = 0;
516         }
517
518         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
519
520         HWRM_CHECK_RESULT;
521
522         return rc;
523 }
524
525 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
526                                    struct bnxt_link_info *link_info)
527 {
528         int rc = 0;
529         struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
530         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
531
532         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
533
534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
535
536         HWRM_CHECK_RESULT;
537
538         link_info->phy_link_status = resp->link;
539         if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
540                 link_info->link_up = 1;
541                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
542         } else {
543                 link_info->link_up = 0;
544                 link_info->link_speed = 0;
545         }
546         link_info->duplex = resp->duplex;
547         link_info->pause = resp->pause;
548         link_info->auto_pause = resp->auto_pause;
549         link_info->force_pause = resp->force_pause;
550         link_info->auto_mode = resp->auto_mode;
551
552         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
553         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
554         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
555         link_info->phy_ver[0] = resp->phy_maj;
556         link_info->phy_ver[1] = resp->phy_min;
557         link_info->phy_ver[2] = resp->phy_bld;
558
559         return rc;
560 }
561
562 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
563 {
564         int rc = 0;
565         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
566         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
567
568         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
569
570         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
571
572         HWRM_CHECK_RESULT;
573
574 #define GET_QUEUE_INFO(x) \
575         bp->cos_queue[x].id = resp->queue_id##x; \
576         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
577
578         GET_QUEUE_INFO(0);
579         GET_QUEUE_INFO(1);
580         GET_QUEUE_INFO(2);
581         GET_QUEUE_INFO(3);
582         GET_QUEUE_INFO(4);
583         GET_QUEUE_INFO(5);
584         GET_QUEUE_INFO(6);
585         GET_QUEUE_INFO(7);
586
587         return rc;
588 }
589
590 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
591                          struct bnxt_ring *ring,
592                          uint32_t ring_type, uint32_t map_index,
593                          uint32_t stats_ctx_id)
594 {
595         int rc = 0;
596         struct hwrm_ring_alloc_input req = {.req_type = 0 };
597         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
598
599         HWRM_PREP(req, RING_ALLOC, -1, resp);
600
601         req.enables = rte_cpu_to_le_32(0);
602
603         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
604         req.fbo = rte_cpu_to_le_32(0);
605         /* Association of ring index with doorbell index */
606         req.logical_id = rte_cpu_to_le_16(map_index);
607
608         switch (ring_type) {
609         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
610                 req.queue_id = bp->cos_queue[0].id;
611         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
612                 req.ring_type = ring_type;
613                 req.cmpl_ring_id =
614                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
615                 req.length = rte_cpu_to_le_32(ring->ring_size);
616                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
617                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
618                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
619                 break;
620         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
621                 req.ring_type = ring_type;
622                 /*
623                  * TODO: Some HWRM versions crash with
624                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
625                  */
626                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
627                 req.length = rte_cpu_to_le_32(ring->ring_size);
628                 break;
629         default:
630                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
631                         ring_type);
632                 return -1;
633         }
634
635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
636
637         if (rc || resp->error_code) {
638                 if (rc == 0 && resp->error_code)
639                         rc = rte_le_to_cpu_16(resp->error_code);
640                 switch (ring_type) {
641                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
642                         RTE_LOG(ERR, PMD,
643                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
644                         return rc;
645                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
646                         RTE_LOG(ERR, PMD,
647                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
648                         return rc;
649                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
650                         RTE_LOG(ERR, PMD,
651                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
652                         return rc;
653                 default:
654                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
655                         return rc;
656                 }
657         }
658
659         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
660         return rc;
661 }
662
663 int bnxt_hwrm_ring_free(struct bnxt *bp,
664                         struct bnxt_ring *ring, uint32_t ring_type)
665 {
666         int rc;
667         struct hwrm_ring_free_input req = {.req_type = 0 };
668         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
669
670         HWRM_PREP(req, RING_FREE, -1, resp);
671
672         req.ring_type = ring_type;
673         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
674
675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
676
677         if (rc || resp->error_code) {
678                 if (rc == 0 && resp->error_code)
679                         rc = rte_le_to_cpu_16(resp->error_code);
680
681                 switch (ring_type) {
682                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
683                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
684                                 rc);
685                         return rc;
686                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
687                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
688                                 rc);
689                         return rc;
690                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
691                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
692                                 rc);
693                         return rc;
694                 default:
695                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
696                         return rc;
697                 }
698         }
699         return 0;
700 }
701
702 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
703 {
704         int rc = 0;
705         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
706         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
707
708         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
709
710         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
711         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
712         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
713         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
714
715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
716
717         HWRM_CHECK_RESULT;
718
719         bp->grp_info[idx].fw_grp_id =
720             rte_le_to_cpu_16(resp->ring_group_id);
721
722         return rc;
723 }
724
725 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
726 {
727         int rc;
728         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
729         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
730
731         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
732
733         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
734
735         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
736
737         HWRM_CHECK_RESULT;
738
739         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
740         return rc;
741 }
742
743 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
744 {
745         int rc = 0;
746         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
747         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
748
749         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
750
751         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
752                 return rc;
753
754         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
755         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
756
757         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
758
759         HWRM_CHECK_RESULT;
760
761         return rc;
762 }
763
764 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
765                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
766 {
767         int rc;
768         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
769         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
770
771         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
772
773         req.update_period_ms = rte_cpu_to_le_32(1000);
774
775         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
776         req.stats_dma_addr =
777             rte_cpu_to_le_64(cpr->hw_stats_map);
778
779         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
780
781         HWRM_CHECK_RESULT;
782
783         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
784         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
785
786         return rc;
787 }
788
789 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
790                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
791 {
792         int rc;
793         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
794         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
795
796         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
797
798         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
799         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
800
801         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
802
803         HWRM_CHECK_RESULT;
804
805         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
806         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
807
808         return rc;
809 }
810
811 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
812 {
813         int rc = 0, i, j;
814         struct hwrm_vnic_alloc_input req = {.req_type = 0 };
815         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
816
817         /* map ring groups to this vnic */
818         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
819                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
820                         RTE_LOG(ERR, PMD,
821                                 "Not enough ring groups avail:%x req:%x\n", j,
822                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
823                         break;
824                 }
825                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
826         }
827
828         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
829         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
830
831         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
832
833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
834
835         HWRM_CHECK_RESULT;
836
837         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
838         return rc;
839 }
840
841 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
842 {
843         int rc = 0;
844         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
845         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
846
847         HWRM_PREP(req, VNIC_CFG, -1, resp);
848
849         /* Only RSS support for now TBD: COS & LB */
850         req.enables =
851             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
852                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
853                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
854         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
855         req.dflt_ring_grp =
856                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
857         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
858         req.cos_rule = rte_cpu_to_le_16(0xffff);
859         req.lb_rule = rte_cpu_to_le_16(0xffff);
860         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
861                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
862         if (vnic->func_default)
863                 req.flags = 1;
864         if (vnic->vlan_strip)
865                 req.flags |=
866                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
867
868         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
869
870         HWRM_CHECK_RESULT;
871
872         return rc;
873 }
874
875 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
876 {
877         int rc = 0;
878         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
879         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
880                                                 bp->hwrm_cmd_resp_addr;
881
882         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
883
884         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
885
886         HWRM_CHECK_RESULT;
887
888         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
889
890         return rc;
891 }
892
893 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
894 {
895         int rc = 0;
896         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
897         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
898                                                 bp->hwrm_cmd_resp_addr;
899
900         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
901
902         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
903
904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
905
906         HWRM_CHECK_RESULT;
907
908         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
909
910         return rc;
911 }
912
913 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
914 {
915         int rc = 0;
916         struct hwrm_vnic_free_input req = {.req_type = 0 };
917         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
918
919         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
920                 return rc;
921
922         HWRM_PREP(req, VNIC_FREE, -1, resp);
923
924         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
925
926         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
927
928         HWRM_CHECK_RESULT;
929
930         vnic->fw_vnic_id = INVALID_HW_RING_ID;
931         return rc;
932 }
933
934 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
935                            struct bnxt_vnic_info *vnic)
936 {
937         int rc = 0;
938         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
939         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
940
941         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
942
943         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
944
945         req.ring_grp_tbl_addr =
946             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
947         req.hash_key_tbl_addr =
948             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
949         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
950
951         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
952
953         HWRM_CHECK_RESULT;
954
955         return rc;
956 }
957
958 /*
959  * HWRM utility functions
960  */
961
962 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
963 {
964         unsigned int i;
965         int rc = 0;
966
967         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
968                 struct bnxt_tx_queue *txq;
969                 struct bnxt_rx_queue *rxq;
970                 struct bnxt_cp_ring_info *cpr;
971
972                 if (i >= bp->rx_cp_nr_rings) {
973                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
974                         cpr = txq->cp_ring;
975                 } else {
976                         rxq = bp->rx_queues[i];
977                         cpr = rxq->cp_ring;
978                 }
979
980                 rc = bnxt_hwrm_stat_clear(bp, cpr);
981                 if (rc)
982                         return rc;
983         }
984         return 0;
985 }
986
987 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
988 {
989         int rc;
990         unsigned int i;
991         struct bnxt_cp_ring_info *cpr;
992
993         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
994                 unsigned int idx = i + 1;
995
996                 if (i >= bp->rx_cp_nr_rings)
997                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
998                 else
999                         cpr = bp->rx_queues[i]->cp_ring;
1000                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1001                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1002                         if (rc)
1003                                 return rc;
1004                 }
1005         }
1006         return 0;
1007 }
1008
1009 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1010 {
1011         unsigned int i;
1012         int rc = 0;
1013
1014         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1015                 struct bnxt_tx_queue *txq;
1016                 struct bnxt_rx_queue *rxq;
1017                 struct bnxt_cp_ring_info *cpr;
1018                 unsigned int idx = i + 1;
1019
1020                 if (i >= bp->rx_cp_nr_rings) {
1021                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1022                         cpr = txq->cp_ring;
1023                 } else {
1024                         rxq = bp->rx_queues[i];
1025                         cpr = rxq->cp_ring;
1026                 }
1027
1028                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1029
1030                 if (rc)
1031                         return rc;
1032         }
1033         return rc;
1034 }
1035
1036 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1037 {
1038         uint16_t i;
1039         uint32_t rc = 0;
1040
1041         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1042                 unsigned int idx = i + 1;
1043
1044                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1045                         RTE_LOG(ERR, PMD,
1046                                 "Attempt to free invalid ring group %d\n",
1047                                 idx);
1048                         continue;
1049                 }
1050
1051                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1052
1053                 if (rc)
1054                         return rc;
1055         }
1056         return rc;
1057 }
1058
1059 static void bnxt_free_cp_ring(struct bnxt *bp,
1060                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1061 {
1062         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1063
1064         bnxt_hwrm_ring_free(bp, cp_ring,
1065                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1066         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1067         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1068         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1069                         sizeof(*cpr->cp_desc_ring));
1070         cpr->cp_raw_cons = 0;
1071 }
1072
1073 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1074 {
1075         unsigned int i;
1076         int rc = 0;
1077
1078         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1079                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1080                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1081                 struct bnxt_ring *ring = txr->tx_ring_struct;
1082                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1083                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1084
1085                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1086                         bnxt_hwrm_ring_free(bp, ring,
1087                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1088                         ring->fw_ring_id = INVALID_HW_RING_ID;
1089                         memset(txr->tx_desc_ring, 0,
1090                                         txr->tx_ring_struct->ring_size *
1091                                         sizeof(*txr->tx_desc_ring));
1092                         memset(txr->tx_buf_ring, 0,
1093                                         txr->tx_ring_struct->ring_size *
1094                                         sizeof(*txr->tx_buf_ring));
1095                         txr->tx_prod = 0;
1096                         txr->tx_cons = 0;
1097                 }
1098                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1099                         bnxt_free_cp_ring(bp, cpr, idx);
1100         }
1101
1102         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1103                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1104                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1105                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1106                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1107                 unsigned int idx = i + 1;
1108
1109                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1110                         bnxt_hwrm_ring_free(bp, ring,
1111                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1112                         ring->fw_ring_id = INVALID_HW_RING_ID;
1113                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1114                         memset(rxr->rx_desc_ring, 0,
1115                                         rxr->rx_ring_struct->ring_size *
1116                                         sizeof(*rxr->rx_desc_ring));
1117                         memset(rxr->rx_buf_ring, 0,
1118                                         rxr->rx_ring_struct->ring_size *
1119                                         sizeof(*rxr->rx_buf_ring));
1120                         rxr->rx_prod = 0;
1121                 }
1122                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1123                         bnxt_free_cp_ring(bp, cpr, idx);
1124         }
1125
1126         /* Default completion ring */
1127         {
1128                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1129
1130                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1131                         bnxt_free_cp_ring(bp, cpr, 0);
1132         }
1133
1134         return rc;
1135 }
1136
1137 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1138 {
1139         uint16_t i;
1140         uint32_t rc = 0;
1141
1142         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1143                 unsigned int idx = i + 1;
1144
1145                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1146                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1147                         continue;
1148
1149                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1150
1151                 if (rc)
1152                         return rc;
1153         }
1154         return rc;
1155 }
1156
1157 void bnxt_free_hwrm_resources(struct bnxt *bp)
1158 {
1159         /* Release memzone */
1160         rte_free(bp->hwrm_cmd_resp_addr);
1161         bp->hwrm_cmd_resp_addr = NULL;
1162         bp->hwrm_cmd_resp_dma_addr = 0;
1163 }
1164
1165 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1166 {
1167         struct rte_pci_device *pdev = bp->pdev;
1168         char type[RTE_MEMZONE_NAMESIZE];
1169
1170         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1171                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1172         bp->max_req_len = HWRM_MAX_REQ_LEN;
1173         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1174         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1175         if (bp->hwrm_cmd_resp_addr == NULL)
1176                 return -ENOMEM;
1177         bp->hwrm_cmd_resp_dma_addr =
1178                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1179         rte_spinlock_init(&bp->hwrm_lock);
1180
1181         return 0;
1182 }
1183
1184 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1185 {
1186         struct bnxt_filter_info *filter;
1187         int rc = 0;
1188
1189         STAILQ_FOREACH(filter, &vnic->filter, next) {
1190                 rc = bnxt_hwrm_clear_filter(bp, filter);
1191                 if (rc)
1192                         break;
1193         }
1194         return rc;
1195 }
1196
1197 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1198 {
1199         struct bnxt_filter_info *filter;
1200         int rc = 0;
1201
1202         STAILQ_FOREACH(filter, &vnic->filter, next) {
1203                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1204                 if (rc)
1205                         break;
1206         }
1207         return rc;
1208 }
1209
1210 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1211 {
1212         struct bnxt_vnic_info *vnic;
1213         unsigned int i;
1214
1215         if (bp->vnic_info == NULL)
1216                 return;
1217
1218         vnic = &bp->vnic_info[0];
1219         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1220
1221         /* VNIC resources */
1222         for (i = 0; i < bp->nr_vnics; i++) {
1223                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1224
1225                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1226
1227                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1228                 bnxt_hwrm_vnic_free(bp, vnic);
1229         }
1230         /* Ring resources */
1231         bnxt_free_all_hwrm_rings(bp);
1232         bnxt_free_all_hwrm_ring_grps(bp);
1233         bnxt_free_all_hwrm_stat_ctxs(bp);
1234 }
1235
1236 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1237 {
1238         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1239
1240         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1241                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1242
1243         switch (conf_link_speed) {
1244         case ETH_LINK_SPEED_10M_HD:
1245         case ETH_LINK_SPEED_100M_HD:
1246                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1247         }
1248         return hw_link_duplex;
1249 }
1250
1251 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1252 {
1253         uint16_t eth_link_speed = 0;
1254
1255         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1256                 return ETH_LINK_SPEED_AUTONEG;
1257
1258         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1259         case ETH_LINK_SPEED_100M:
1260         case ETH_LINK_SPEED_100M_HD:
1261                 eth_link_speed =
1262                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
1263                 break;
1264         case ETH_LINK_SPEED_1G:
1265                 eth_link_speed =
1266                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1267                 break;
1268         case ETH_LINK_SPEED_2_5G:
1269                 eth_link_speed =
1270                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1271                 break;
1272         case ETH_LINK_SPEED_10G:
1273                 eth_link_speed =
1274                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1275                 break;
1276         case ETH_LINK_SPEED_20G:
1277                 eth_link_speed =
1278                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1279                 break;
1280         case ETH_LINK_SPEED_25G:
1281                 eth_link_speed =
1282                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1283                 break;
1284         case ETH_LINK_SPEED_40G:
1285                 eth_link_speed =
1286                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1287                 break;
1288         case ETH_LINK_SPEED_50G:
1289                 eth_link_speed =
1290                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1291                 break;
1292         default:
1293                 RTE_LOG(ERR, PMD,
1294                         "Unsupported link speed %d; default to AUTO\n",
1295                         conf_link_speed);
1296                 break;
1297         }
1298         return eth_link_speed;
1299 }
1300
1301 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1302                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1303                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1304                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1305
1306 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1307 {
1308         uint32_t one_speed;
1309
1310         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1311                 return 0;
1312
1313         if (link_speed & ETH_LINK_SPEED_FIXED) {
1314                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1315
1316                 if (one_speed & (one_speed - 1)) {
1317                         RTE_LOG(ERR, PMD,
1318                                 "Invalid advertised speeds (%u) for port %u\n",
1319                                 link_speed, port_id);
1320                         return -EINVAL;
1321                 }
1322                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1323                         RTE_LOG(ERR, PMD,
1324                                 "Unsupported advertised speed (%u) for port %u\n",
1325                                 link_speed, port_id);
1326                         return -EINVAL;
1327                 }
1328         } else {
1329                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1330                         RTE_LOG(ERR, PMD,
1331                                 "Unsupported advertised speeds (%u) for port %u\n",
1332                                 link_speed, port_id);
1333                         return -EINVAL;
1334                 }
1335         }
1336         return 0;
1337 }
1338
1339 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1340 {
1341         uint16_t ret = 0;
1342
1343         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1344                 link_speed = BNXT_SUPPORTED_SPEEDS;
1345
1346         if (link_speed & ETH_LINK_SPEED_100M)
1347                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1348         if (link_speed & ETH_LINK_SPEED_100M_HD)
1349                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1350         if (link_speed & ETH_LINK_SPEED_1G)
1351                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1352         if (link_speed & ETH_LINK_SPEED_2_5G)
1353                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1354         if (link_speed & ETH_LINK_SPEED_10G)
1355                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1356         if (link_speed & ETH_LINK_SPEED_20G)
1357                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1358         if (link_speed & ETH_LINK_SPEED_25G)
1359                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1360         if (link_speed & ETH_LINK_SPEED_40G)
1361                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1362         if (link_speed & ETH_LINK_SPEED_50G)
1363                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1364         return ret;
1365 }
1366
1367 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1368 {
1369         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1370
1371         switch (hw_link_speed) {
1372         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1373                 eth_link_speed = ETH_SPEED_NUM_100M;
1374                 break;
1375         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1376                 eth_link_speed = ETH_SPEED_NUM_1G;
1377                 break;
1378         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1379                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1380                 break;
1381         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1382                 eth_link_speed = ETH_SPEED_NUM_10G;
1383                 break;
1384         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1385                 eth_link_speed = ETH_SPEED_NUM_20G;
1386                 break;
1387         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1388                 eth_link_speed = ETH_SPEED_NUM_25G;
1389                 break;
1390         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1391                 eth_link_speed = ETH_SPEED_NUM_40G;
1392                 break;
1393         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1394                 eth_link_speed = ETH_SPEED_NUM_50G;
1395                 break;
1396         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1397         default:
1398                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1399                         hw_link_speed);
1400                 break;
1401         }
1402         return eth_link_speed;
1403 }
1404
1405 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1406 {
1407         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1408
1409         switch (hw_link_duplex) {
1410         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1411         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1412                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1413                 break;
1414         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1415                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1416                 break;
1417         default:
1418                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1419                         hw_link_duplex);
1420                 break;
1421         }
1422         return eth_link_duplex;
1423 }
1424
1425 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1426 {
1427         int rc = 0;
1428         struct bnxt_link_info *link_info = &bp->link_info;
1429
1430         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1431         if (rc) {
1432                 RTE_LOG(ERR, PMD,
1433                         "Get link config failed with rc %d\n", rc);
1434                 goto exit;
1435         }
1436         if (link_info->link_up)
1437                 link->link_speed =
1438                         bnxt_parse_hw_link_speed(link_info->link_speed);
1439         else
1440                 link->link_speed = ETH_LINK_SPEED_10M;
1441         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1442         link->link_status = link_info->link_up;
1443         link->link_autoneg = link_info->auto_mode ==
1444                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1445                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1446 exit:
1447         return rc;
1448 }
1449
1450 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1451 {
1452         int rc = 0;
1453         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1454         struct bnxt_link_info link_req;
1455         uint16_t speed;
1456
1457         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1458                 return 0;
1459
1460         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1461                         bp->eth_dev->data->port_id);
1462         if (rc)
1463                 goto error;
1464
1465         memset(&link_req, 0, sizeof(link_req));
1466         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1467         link_req.link_up = link_up;
1468         if (speed == 0) {
1469                 link_req.phy_flags =
1470                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1471                 link_req.auto_mode =
1472                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
1473                 link_req.auto_link_speed_mask =
1474                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1475                 link_req.auto_link_speed =
1476                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
1477         } else {
1478                 link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1479                 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
1480                         HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1481                 link_req.link_speed = speed;
1482         }
1483         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1484         link_req.auto_pause = bp->link_info.auto_pause;
1485         link_req.force_pause = bp->link_info.force_pause;
1486
1487         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1488         if (rc) {
1489                 RTE_LOG(ERR, PMD,
1490                         "Set link config failed with rc %d\n", rc);
1491         }
1492
1493 error:
1494         return rc;
1495 }
1496
1497 /* JIRA 22088 */
1498 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1499 {
1500         struct hwrm_func_qcfg_input req = {0};
1501         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1502         int rc = 0;
1503
1504         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1505         req.fid = rte_cpu_to_le_16(0xffff);
1506
1507         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1508
1509         HWRM_CHECK_RESULT;
1510
1511         if (BNXT_VF(bp)) {
1512                 struct bnxt_vf_info *vf = &bp->vf;
1513
1514                 /* Hard Coded.. 0xfff VLAN ID mask */
1515                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1516         }
1517
1518         switch (resp->port_partition_type) {
1519         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1520         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1521         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1522                 bp->port_partition_type = resp->port_partition_type;
1523                 break;
1524         default:
1525                 bp->port_partition_type = 0;
1526                 break;
1527         }
1528
1529         return rc;
1530 }