net/bnxt: support LRO
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         if (resp->resp_len >= 16) { \
180                                 struct hwrm_err_output *tmp_hwrm_err_op = \
181                                                         (void *)resp; \
182                                 RTE_LOG(ERR, PMD, \
183                                         "%s error %d:%d:%08x:%04x\n", \
184                                         __func__, \
185                                         rc, tmp_hwrm_err_op->cmd_err, \
186                                         rte_le_to_cpu_32(\
187                                                 tmp_hwrm_err_op->opaque_0), \
188                                         rte_le_to_cpu_16(\
189                                                 tmp_hwrm_err_op->opaque_1)); \
190                         } \
191                         else { \
192                                 RTE_LOG(ERR, PMD, \
193                                         "%s error %d\n", __func__, rc); \
194                         } \
195                         return rc; \
196                 } \
197         }
198
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204
205         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207         req.mask = 0;
208
209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210
211         HWRM_CHECK_RESULT;
212
213         return rc;
214 }
215
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
217 {
218         int rc = 0;
219         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
221         uint32_t mask = 0;
222
223         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         /* FIXME add multicast flag, when multicast adding options is supported
227          * by ethtool.
228          */
229         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233         if (vnic->mc_addr_cnt) {
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
235                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
236                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
237         }
238         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
239                                     mask);
240
241         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
242
243         HWRM_CHECK_RESULT;
244
245         return rc;
246 }
247
248 int bnxt_hwrm_clear_filter(struct bnxt *bp,
249                            struct bnxt_filter_info *filter)
250 {
251         int rc = 0;
252         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
253         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
254
255         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
256
257         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
258
259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
260
261         HWRM_CHECK_RESULT;
262
263         filter->fw_l2_filter_id = -1;
264
265         return 0;
266 }
267
268 int bnxt_hwrm_set_filter(struct bnxt *bp,
269                          uint16_t dst_id,
270                          struct bnxt_filter_info *filter)
271 {
272         int rc = 0;
273         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
274         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
275         uint32_t enables = 0;
276
277         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
278
279         req.flags = rte_cpu_to_le_32(filter->flags);
280
281         enables = filter->enables |
282               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
283         req.dst_id = rte_cpu_to_le_16(dst_id);
284
285         if (enables &
286             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
287                 memcpy(req.l2_addr, filter->l2_addr,
288                        ETHER_ADDR_LEN);
289         if (enables &
290             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
291                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
292                        ETHER_ADDR_LEN);
293         if (enables &
294             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
295                 req.l2_ovlan = filter->l2_ovlan;
296         if (enables &
297             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
298                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
299
300         req.enables = rte_cpu_to_le_32(enables);
301
302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
303
304         HWRM_CHECK_RESULT;
305
306         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
307
308         return rc;
309 }
310
311 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
312 {
313         int rc = 0;
314         struct hwrm_func_qcaps_input req = {.req_type = 0 };
315         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
316         uint16_t new_max_vfs;
317         int i;
318
319         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
320
321         req.fid = rte_cpu_to_le_16(0xffff);
322
323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
324
325         HWRM_CHECK_RESULT;
326
327         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
328         if (BNXT_PF(bp)) {
329                 bp->pf.port_id = resp->port_id;
330                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
331                 new_max_vfs = bp->pdev->max_vfs;
332                 if (new_max_vfs != bp->pf.max_vfs) {
333                         if (bp->pf.vf_info)
334                                 rte_free(bp->pf.vf_info);
335                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
336                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
337                         bp->pf.max_vfs = new_max_vfs;
338                         for (i = 0; i < new_max_vfs; i++) {
339                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
340                                 bp->pf.vf_info[i].vlan_table =
341                                         rte_zmalloc("VF VLAN table",
342                                                     getpagesize(),
343                                                     getpagesize());
344                                 if (bp->pf.vf_info[i].vlan_table == NULL)
345                                         RTE_LOG(ERR, PMD,
346                                         "Fail to alloc VLAN table for VF %d\n",
347                                         i);
348                                 else
349                                         rte_mem_lock_page(
350                                                 bp->pf.vf_info[i].vlan_table);
351                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
352                         }
353                 }
354         }
355
356         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
357         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
358         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
359         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
360         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
361         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
362         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
363         /* TODO: For now, do not support VMDq/RFS on VFs. */
364         if (BNXT_PF(bp)) {
365                 if (bp->pf.max_vfs)
366                         bp->max_vnics = 1;
367                 else
368                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
369         } else {
370                 bp->max_vnics = 1;
371         }
372         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
373         if (BNXT_PF(bp))
374                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
375
376         return rc;
377 }
378
379 int bnxt_hwrm_func_reset(struct bnxt *bp)
380 {
381         int rc = 0;
382         struct hwrm_func_reset_input req = {.req_type = 0 };
383         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
384
385         HWRM_PREP(req, FUNC_RESET, -1, resp);
386
387         req.enables = rte_cpu_to_le_32(0);
388
389         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
390
391         HWRM_CHECK_RESULT;
392
393         return rc;
394 }
395
396 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
397 {
398         int rc;
399         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
400         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
401
402         if (bp->flags & BNXT_FLAG_REGISTERED)
403                 return 0;
404
405         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
406         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
407                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
408         req.ver_maj = RTE_VER_YEAR;
409         req.ver_min = RTE_VER_MONTH;
410         req.ver_upd = RTE_VER_MINOR;
411
412         if (BNXT_PF(bp)) {
413                 req.enables |= rte_cpu_to_le_32(
414                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
415                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
416                        RTE_MIN(sizeof(req.vf_req_fwd),
417                                sizeof(bp->pf.vf_req_fwd)));
418         }
419
420         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
421         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
422
423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424
425         HWRM_CHECK_RESULT;
426
427         bp->flags |= BNXT_FLAG_REGISTERED;
428
429         return rc;
430 }
431
432 int bnxt_hwrm_ver_get(struct bnxt *bp)
433 {
434         int rc = 0;
435         struct hwrm_ver_get_input req = {.req_type = 0 };
436         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
437         uint32_t my_version;
438         uint32_t fw_version;
439         uint16_t max_resp_len;
440         char type[RTE_MEMZONE_NAMESIZE];
441
442         HWRM_PREP(req, VER_GET, -1, resp);
443
444         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
445         req.hwrm_intf_min = HWRM_VERSION_MINOR;
446         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
447
448         /*
449          * Hold the lock since we may be adjusting the response pointers.
450          */
451         rte_spinlock_lock(&bp->hwrm_lock);
452         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
453
454         HWRM_CHECK_RESULT;
455
456         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
457                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
458                 resp->hwrm_intf_upd,
459                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
460         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
461                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
462         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
463                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
464
465         my_version = HWRM_VERSION_MAJOR << 16;
466         my_version |= HWRM_VERSION_MINOR << 8;
467         my_version |= HWRM_VERSION_UPDATE;
468
469         fw_version = resp->hwrm_intf_maj << 16;
470         fw_version |= resp->hwrm_intf_min << 8;
471         fw_version |= resp->hwrm_intf_upd;
472
473         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
474                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
475                 rc = -EINVAL;
476                 goto error;
477         }
478
479         if (my_version != fw_version) {
480                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
481                 if (my_version < fw_version) {
482                         RTE_LOG(INFO, PMD,
483                                 "Firmware API version is newer than driver.\n");
484                         RTE_LOG(INFO, PMD,
485                                 "The driver may be missing features.\n");
486                 } else {
487                         RTE_LOG(INFO, PMD,
488                                 "Firmware API version is older than driver.\n");
489                         RTE_LOG(INFO, PMD,
490                                 "Not all driver features may be functional.\n");
491                 }
492         }
493
494         if (bp->max_req_len > resp->max_req_win_len) {
495                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
496                 rc = -EINVAL;
497         }
498         bp->max_req_len = resp->max_req_win_len;
499         max_resp_len = resp->max_resp_len;
500         if (bp->max_resp_len != max_resp_len) {
501                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
502                         bp->pdev->addr.domain, bp->pdev->addr.bus,
503                         bp->pdev->addr.devid, bp->pdev->addr.function);
504
505                 rte_free(bp->hwrm_cmd_resp_addr);
506
507                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
508                 if (bp->hwrm_cmd_resp_addr == NULL) {
509                         rc = -ENOMEM;
510                         goto error;
511                 }
512                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
513                 bp->hwrm_cmd_resp_dma_addr =
514                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
515                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
516                         RTE_LOG(ERR, PMD,
517                         "Unable to map response buffer to physical memory.\n");
518                         rc = -ENOMEM;
519                         goto error;
520                 }
521                 bp->max_resp_len = max_resp_len;
522         }
523
524 error:
525         rte_spinlock_unlock(&bp->hwrm_lock);
526         return rc;
527 }
528
529 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
530 {
531         int rc;
532         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
533         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
534
535         if (!(bp->flags & BNXT_FLAG_REGISTERED))
536                 return 0;
537
538         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
539         req.flags = flags;
540
541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
542
543         HWRM_CHECK_RESULT;
544
545         bp->flags &= ~BNXT_FLAG_REGISTERED;
546
547         return rc;
548 }
549
550 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
551 {
552         int rc = 0;
553         struct hwrm_port_phy_cfg_input req = {0};
554         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
555         uint32_t enables = 0;
556
557         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
558
559         if (conf->link_up) {
560                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
561                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
562                 /*
563                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
564                  * any auto mode, even "none".
565                  */
566                 if (!conf->link_speed) {
567                         req.auto_mode |= conf->auto_mode;
568                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
569                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
570                         enables |=
571                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
572                         req.auto_link_speed = bp->link_info.auto_link_speed;
573                         enables |=
574                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
575                 }
576                 req.auto_duplex = conf->duplex;
577                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
578                 req.auto_pause = conf->auto_pause;
579                 req.force_pause = conf->force_pause;
580                 /* Set force_pause if there is no auto or if there is a force */
581                 if (req.auto_pause && !req.force_pause)
582                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
583                 else
584                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
585
586                 req.enables = rte_cpu_to_le_32(enables);
587         } else {
588                 req.flags =
589                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
590                 RTE_LOG(INFO, PMD, "Force Link Down\n");
591         }
592
593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
594
595         HWRM_CHECK_RESULT;
596
597         return rc;
598 }
599
600 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
601                                    struct bnxt_link_info *link_info)
602 {
603         int rc = 0;
604         struct hwrm_port_phy_qcfg_input req = {0};
605         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
606
607         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
608
609         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
610
611         HWRM_CHECK_RESULT;
612
613         link_info->phy_link_status = resp->link;
614         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
615                 link_info->link_up = 1;
616                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
617         } else {
618                 link_info->link_up = 0;
619                 link_info->link_speed = 0;
620         }
621         link_info->duplex = resp->duplex;
622         link_info->pause = resp->pause;
623         link_info->auto_pause = resp->auto_pause;
624         link_info->force_pause = resp->force_pause;
625         link_info->auto_mode = resp->auto_mode;
626
627         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
628         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
629         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
630         link_info->phy_ver[0] = resp->phy_maj;
631         link_info->phy_ver[1] = resp->phy_min;
632         link_info->phy_ver[2] = resp->phy_bld;
633
634         return rc;
635 }
636
637 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
638 {
639         int rc = 0;
640         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
641         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
642
643         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
644
645         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
646
647         HWRM_CHECK_RESULT;
648
649 #define GET_QUEUE_INFO(x) \
650         bp->cos_queue[x].id = resp->queue_id##x; \
651         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
652
653         GET_QUEUE_INFO(0);
654         GET_QUEUE_INFO(1);
655         GET_QUEUE_INFO(2);
656         GET_QUEUE_INFO(3);
657         GET_QUEUE_INFO(4);
658         GET_QUEUE_INFO(5);
659         GET_QUEUE_INFO(6);
660         GET_QUEUE_INFO(7);
661
662         return rc;
663 }
664
665 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
666                          struct bnxt_ring *ring,
667                          uint32_t ring_type, uint32_t map_index,
668                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
669 {
670         int rc = 0;
671         uint32_t enables = 0;
672         struct hwrm_ring_alloc_input req = {.req_type = 0 };
673         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
674
675         HWRM_PREP(req, RING_ALLOC, -1, resp);
676
677         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
678         req.fbo = rte_cpu_to_le_32(0);
679         /* Association of ring index with doorbell index */
680         req.logical_id = rte_cpu_to_le_16(map_index);
681         req.length = rte_cpu_to_le_32(ring->ring_size);
682
683         switch (ring_type) {
684         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
685                 req.queue_id = bp->cos_queue[0].id;
686                 /* FALLTHROUGH */
687         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
688                 req.ring_type = ring_type;
689                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
690                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
691                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
692                         enables |=
693                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
694                 break;
695         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
696                 req.ring_type = ring_type;
697                 /*
698                  * TODO: Some HWRM versions crash with
699                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
700                  */
701                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
702                 break;
703         default:
704                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
705                         ring_type);
706                 return -1;
707         }
708         req.enables = rte_cpu_to_le_32(enables);
709
710         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
711
712         if (rc || resp->error_code) {
713                 if (rc == 0 && resp->error_code)
714                         rc = rte_le_to_cpu_16(resp->error_code);
715                 switch (ring_type) {
716                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
717                         RTE_LOG(ERR, PMD,
718                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
719                         return rc;
720                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
721                         RTE_LOG(ERR, PMD,
722                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
723                         return rc;
724                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
725                         RTE_LOG(ERR, PMD,
726                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
727                         return rc;
728                 default:
729                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
730                         return rc;
731                 }
732         }
733
734         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
735         return rc;
736 }
737
738 int bnxt_hwrm_ring_free(struct bnxt *bp,
739                         struct bnxt_ring *ring, uint32_t ring_type)
740 {
741         int rc;
742         struct hwrm_ring_free_input req = {.req_type = 0 };
743         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
744
745         HWRM_PREP(req, RING_FREE, -1, resp);
746
747         req.ring_type = ring_type;
748         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
749
750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
751
752         if (rc || resp->error_code) {
753                 if (rc == 0 && resp->error_code)
754                         rc = rte_le_to_cpu_16(resp->error_code);
755
756                 switch (ring_type) {
757                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
758                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
759                                 rc);
760                         return rc;
761                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
762                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
763                                 rc);
764                         return rc;
765                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
766                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
767                                 rc);
768                         return rc;
769                 default:
770                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
771                         return rc;
772                 }
773         }
774         return 0;
775 }
776
777 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
778 {
779         int rc = 0;
780         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
781         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
782
783         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
784
785         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
786         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
787         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
788         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT;
793
794         bp->grp_info[idx].fw_grp_id =
795             rte_le_to_cpu_16(resp->ring_group_id);
796
797         return rc;
798 }
799
800 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
801 {
802         int rc;
803         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
804         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
805
806         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
807
808         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
809
810         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
811
812         HWRM_CHECK_RESULT;
813
814         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
815         return rc;
816 }
817
818 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
819 {
820         int rc = 0;
821         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
822         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
823
824         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
825
826         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
827                 return rc;
828
829         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
830         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
831
832         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
833
834         HWRM_CHECK_RESULT;
835
836         return rc;
837 }
838
839 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
840                                 unsigned int idx __rte_unused)
841 {
842         int rc;
843         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
844         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
845
846         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
847
848         req.update_period_ms = rte_cpu_to_le_32(1000);
849
850         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
851         req.stats_dma_addr =
852             rte_cpu_to_le_64(cpr->hw_stats_map);
853
854         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
855
856         HWRM_CHECK_RESULT;
857
858         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
859
860         return rc;
861 }
862
863 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
864                                 unsigned int idx __rte_unused)
865 {
866         int rc;
867         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
868         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
869
870         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
871
872         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
873         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
874
875         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
876
877         HWRM_CHECK_RESULT;
878
879         return rc;
880 }
881
882 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
883 {
884         int rc = 0, i, j;
885         struct hwrm_vnic_alloc_input req = { 0 };
886         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
887
888         /* map ring groups to this vnic */
889         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
890                 vnic->start_grp_id, vnic->end_grp_id);
891         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
892                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
893         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
894         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
895         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
896         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
897         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
898                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
899         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
900
901         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
902
903         HWRM_CHECK_RESULT;
904
905         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
906         return rc;
907 }
908
909 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
910                                         struct bnxt_vnic_info *vnic,
911                                         struct bnxt_plcmodes_cfg *pmode)
912 {
913         int rc = 0;
914         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
915         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
916
917         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
918
919         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
920
921         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
922
923         HWRM_CHECK_RESULT;
924
925         pmode->flags = rte_le_to_cpu_32(resp->flags);
926         /* dflt_vnic bit doesn't exist in the _cfg command */
927         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
928         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
929         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
930         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
931
932         return rc;
933 }
934
935 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
936                                        struct bnxt_vnic_info *vnic,
937                                        struct bnxt_plcmodes_cfg *pmode)
938 {
939         int rc = 0;
940         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
941         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
942
943         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
944
945         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
946         req.flags = rte_cpu_to_le_32(pmode->flags);
947         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
948         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
949         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
950         req.enables = rte_cpu_to_le_32(
951             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
952             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
953             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
954         );
955
956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
957
958         HWRM_CHECK_RESULT;
959
960         return rc;
961 }
962
963 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
964 {
965         int rc = 0;
966         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
967         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
968         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
969         struct bnxt_plcmodes_cfg pmodes;
970
971         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
972         if (rc)
973                 return rc;
974
975         HWRM_PREP(req, VNIC_CFG, -1, resp);
976
977         /* Only RSS support for now TBD: COS & LB */
978         req.enables =
979             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
980                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
981         if (vnic->lb_rule != 0xffff)
982                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
983         if (vnic->cos_rule != 0xffff)
984                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
985         if (vnic->rss_rule != 0xffff)
986                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
987         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
988         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
989         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
990         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
991         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
992         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
993         req.mru = rte_cpu_to_le_16(vnic->mru);
994         if (vnic->func_default)
995                 req.flags |=
996                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
997         if (vnic->vlan_strip)
998                 req.flags |=
999                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1000         if (vnic->bd_stall)
1001                 req.flags |=
1002                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1003         if (vnic->roce_dual)
1004                 req.flags |= rte_cpu_to_le_32(
1005                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1006         if (vnic->roce_only)
1007                 req.flags |= rte_cpu_to_le_32(
1008                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1009         if (vnic->rss_dflt_cr)
1010                 req.flags |= rte_cpu_to_le_32(
1011                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1012
1013         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1014
1015         HWRM_CHECK_RESULT;
1016
1017         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1018
1019         return rc;
1020 }
1021
1022 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1023                 int16_t fw_vf_id)
1024 {
1025         int rc = 0;
1026         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1027         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1028
1029         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1030
1031         req.enables =
1032                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1033         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1034         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1035
1036         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1037
1038         HWRM_CHECK_RESULT;
1039
1040         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1041         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1042         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1043         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1044         vnic->mru = rte_le_to_cpu_16(resp->mru);
1045         vnic->func_default = rte_le_to_cpu_32(
1046                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1047         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1048                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1049         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1050                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1051         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1052                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1053         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1054                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1055         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1056                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1057
1058         return rc;
1059 }
1060
1061 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1062 {
1063         int rc = 0;
1064         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1065         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1066                                                 bp->hwrm_cmd_resp_addr;
1067
1068         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1069
1070         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1071
1072         HWRM_CHECK_RESULT;
1073
1074         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1075
1076         return rc;
1077 }
1078
1079 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1080 {
1081         int rc = 0;
1082         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1083         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1084                                                 bp->hwrm_cmd_resp_addr;
1085
1086         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1087
1088         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT;
1093
1094         vnic->rss_rule = INVALID_HW_RING_ID;
1095
1096         return rc;
1097 }
1098
1099 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1100 {
1101         int rc = 0;
1102         struct hwrm_vnic_free_input req = {.req_type = 0 };
1103         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1104
1105         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1106                 return rc;
1107
1108         HWRM_PREP(req, VNIC_FREE, -1, resp);
1109
1110         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1111
1112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1113
1114         HWRM_CHECK_RESULT;
1115
1116         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1117         return rc;
1118 }
1119
1120 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1121                            struct bnxt_vnic_info *vnic)
1122 {
1123         int rc = 0;
1124         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1125         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1128
1129         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1130
1131         req.ring_grp_tbl_addr =
1132             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1133         req.hash_key_tbl_addr =
1134             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1135         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1136
1137         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1138
1139         HWRM_CHECK_RESULT;
1140
1141         return rc;
1142 }
1143
1144 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1145                         struct bnxt_vnic_info *vnic)
1146 {
1147         int rc = 0;
1148         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1149         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1150         uint16_t size;
1151
1152         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1153
1154         req.flags = rte_cpu_to_le_32(
1155                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1156
1157         req.enables = rte_cpu_to_le_32(
1158                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1159
1160         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1161         size -= RTE_PKTMBUF_HEADROOM;
1162
1163         req.jumbo_thresh = rte_cpu_to_le_16(size);
1164         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1165
1166         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1167
1168         HWRM_CHECK_RESULT;
1169
1170         return rc;
1171 }
1172
1173 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1174                         struct bnxt_vnic_info *vnic, bool enable)
1175 {
1176         int rc = 0;
1177         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1178         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1179
1180         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1181
1182         if (enable) {
1183                 req.enables = rte_cpu_to_le_32(
1184                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1185                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1186                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1187                 req.flags = rte_cpu_to_le_32(
1188                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1189                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1190                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1191                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1192                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1193                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1194                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1195                 req.max_agg_segs = rte_cpu_to_le_16(5);
1196                 req.max_aggs =
1197                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1198                 req.min_agg_len = rte_cpu_to_le_32(512);
1199         }
1200
1201         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1202
1203         HWRM_CHECK_RESULT;
1204
1205         return rc;
1206 }
1207
1208 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1209 {
1210         struct hwrm_func_cfg_input req = {0};
1211         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1212         int rc;
1213
1214         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1215         req.enables = rte_cpu_to_le_32(
1216                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1217         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1218         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1219
1220         HWRM_PREP(req, FUNC_CFG, -1, resp);
1221
1222         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1223         HWRM_CHECK_RESULT;
1224
1225         bp->pf.vf_info[vf].random_mac = false;
1226
1227         return rc;
1228 }
1229
1230 /*
1231  * HWRM utility functions
1232  */
1233
1234 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1235 {
1236         unsigned int i;
1237         int rc = 0;
1238
1239         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1240                 struct bnxt_tx_queue *txq;
1241                 struct bnxt_rx_queue *rxq;
1242                 struct bnxt_cp_ring_info *cpr;
1243
1244                 if (i >= bp->rx_cp_nr_rings) {
1245                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1246                         cpr = txq->cp_ring;
1247                 } else {
1248                         rxq = bp->rx_queues[i];
1249                         cpr = rxq->cp_ring;
1250                 }
1251
1252                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1253                 if (rc)
1254                         return rc;
1255         }
1256         return 0;
1257 }
1258
1259 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1260 {
1261         int rc;
1262         unsigned int i;
1263         struct bnxt_cp_ring_info *cpr;
1264
1265         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1266
1267                 if (i >= bp->rx_cp_nr_rings)
1268                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1269                 else
1270                         cpr = bp->rx_queues[i]->cp_ring;
1271                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1272                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1273                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1274                         /*
1275                          * TODO. Need a better way to reset grp_info.stats_ctx
1276                          * for Rx rings only. stats_ctx is not saved for Tx
1277                          * in grp_info.
1278                          */
1279                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1280                         if (rc)
1281                                 return rc;
1282                 }
1283         }
1284         return 0;
1285 }
1286
1287 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1288 {
1289         unsigned int i;
1290         int rc = 0;
1291
1292         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1293                 struct bnxt_tx_queue *txq;
1294                 struct bnxt_rx_queue *rxq;
1295                 struct bnxt_cp_ring_info *cpr;
1296
1297                 if (i >= bp->rx_cp_nr_rings) {
1298                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1299                         cpr = txq->cp_ring;
1300                 } else {
1301                         rxq = bp->rx_queues[i];
1302                         cpr = rxq->cp_ring;
1303                 }
1304
1305                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1306
1307                 if (rc)
1308                         return rc;
1309         }
1310         return rc;
1311 }
1312
1313 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1314 {
1315         uint16_t idx;
1316         uint32_t rc = 0;
1317
1318         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1319
1320                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1321                         RTE_LOG(ERR, PMD,
1322                                 "Attempt to free invalid ring group %d\n",
1323                                 idx);
1324                         continue;
1325                 }
1326
1327                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1328
1329                 if (rc)
1330                         return rc;
1331         }
1332         return rc;
1333 }
1334
1335 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1336                                 unsigned int idx __rte_unused)
1337 {
1338         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1339
1340         bnxt_hwrm_ring_free(bp, cp_ring,
1341                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1342         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1343         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1344         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1345                         sizeof(*cpr->cp_desc_ring));
1346         cpr->cp_raw_cons = 0;
1347 }
1348
1349 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1350 {
1351         unsigned int i;
1352         int rc = 0;
1353
1354         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1355                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1356                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1357                 struct bnxt_ring *ring = txr->tx_ring_struct;
1358                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1359                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1360
1361                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1362                         bnxt_hwrm_ring_free(bp, ring,
1363                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1364                         ring->fw_ring_id = INVALID_HW_RING_ID;
1365                         memset(txr->tx_desc_ring, 0,
1366                                         txr->tx_ring_struct->ring_size *
1367                                         sizeof(*txr->tx_desc_ring));
1368                         memset(txr->tx_buf_ring, 0,
1369                                         txr->tx_ring_struct->ring_size *
1370                                         sizeof(*txr->tx_buf_ring));
1371                         txr->tx_prod = 0;
1372                         txr->tx_cons = 0;
1373                 }
1374                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1375                         bnxt_free_cp_ring(bp, cpr, idx);
1376                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1377                 }
1378         }
1379
1380         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1381                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1382                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1383                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1384                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1385                 unsigned int idx = i + 1;
1386
1387                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1388                         bnxt_hwrm_ring_free(bp, ring,
1389                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1390                         ring->fw_ring_id = INVALID_HW_RING_ID;
1391                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1392                         memset(rxr->rx_desc_ring, 0,
1393                                         rxr->rx_ring_struct->ring_size *
1394                                         sizeof(*rxr->rx_desc_ring));
1395                         memset(rxr->rx_buf_ring, 0,
1396                                         rxr->rx_ring_struct->ring_size *
1397                                         sizeof(*rxr->rx_buf_ring));
1398                         rxr->rx_prod = 0;
1399                         memset(rxr->ag_buf_ring, 0,
1400                                         rxr->ag_ring_struct->ring_size *
1401                                         sizeof(*rxr->ag_buf_ring));
1402                         rxr->ag_prod = 0;
1403                 }
1404                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1405                         bnxt_free_cp_ring(bp, cpr, idx);
1406                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1407                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1408                 }
1409         }
1410
1411         /* Default completion ring */
1412         {
1413                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1414
1415                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1416                         bnxt_free_cp_ring(bp, cpr, 0);
1417                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1418                 }
1419         }
1420
1421         return rc;
1422 }
1423
1424 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1425 {
1426         uint16_t i;
1427         uint32_t rc = 0;
1428
1429         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1430                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1431                 if (rc)
1432                         return rc;
1433         }
1434         return rc;
1435 }
1436
1437 void bnxt_free_hwrm_resources(struct bnxt *bp)
1438 {
1439         /* Release memzone */
1440         rte_free(bp->hwrm_cmd_resp_addr);
1441         bp->hwrm_cmd_resp_addr = NULL;
1442         bp->hwrm_cmd_resp_dma_addr = 0;
1443 }
1444
1445 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1446 {
1447         struct rte_pci_device *pdev = bp->pdev;
1448         char type[RTE_MEMZONE_NAMESIZE];
1449
1450         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1451                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1452         bp->max_req_len = HWRM_MAX_REQ_LEN;
1453         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1454         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1455         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1456         if (bp->hwrm_cmd_resp_addr == NULL)
1457                 return -ENOMEM;
1458         bp->hwrm_cmd_resp_dma_addr =
1459                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1460         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1461                 RTE_LOG(ERR, PMD,
1462                         "unable to map response address to physical memory\n");
1463                 return -ENOMEM;
1464         }
1465         rte_spinlock_init(&bp->hwrm_lock);
1466
1467         return 0;
1468 }
1469
1470 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1471 {
1472         struct bnxt_filter_info *filter;
1473         int rc = 0;
1474
1475         STAILQ_FOREACH(filter, &vnic->filter, next) {
1476                 rc = bnxt_hwrm_clear_filter(bp, filter);
1477                 if (rc)
1478                         break;
1479         }
1480         return rc;
1481 }
1482
1483 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1484 {
1485         struct bnxt_filter_info *filter;
1486         int rc = 0;
1487
1488         STAILQ_FOREACH(filter, &vnic->filter, next) {
1489                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1490                 if (rc)
1491                         break;
1492         }
1493         return rc;
1494 }
1495
1496 void bnxt_free_tunnel_ports(struct bnxt *bp)
1497 {
1498         if (bp->vxlan_port_cnt)
1499                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1500                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1501         bp->vxlan_port = 0;
1502         if (bp->geneve_port_cnt)
1503                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1504                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1505         bp->geneve_port = 0;
1506 }
1507
1508 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1509 {
1510         struct bnxt_vnic_info *vnic;
1511         unsigned int i;
1512
1513         if (bp->vnic_info == NULL)
1514                 return;
1515
1516         vnic = &bp->vnic_info[0];
1517         if (BNXT_PF(bp))
1518                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1519
1520         /* VNIC resources */
1521         for (i = 0; i < bp->nr_vnics; i++) {
1522                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1523
1524                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1525
1526                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1527
1528                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1529
1530                 bnxt_hwrm_vnic_free(bp, vnic);
1531         }
1532         /* Ring resources */
1533         bnxt_free_all_hwrm_rings(bp);
1534         bnxt_free_all_hwrm_ring_grps(bp);
1535         bnxt_free_all_hwrm_stat_ctxs(bp);
1536         bnxt_free_tunnel_ports(bp);
1537 }
1538
1539 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1540 {
1541         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1542
1543         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1544                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1545
1546         switch (conf_link_speed) {
1547         case ETH_LINK_SPEED_10M_HD:
1548         case ETH_LINK_SPEED_100M_HD:
1549                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1550         }
1551         return hw_link_duplex;
1552 }
1553
1554 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1555 {
1556         uint16_t eth_link_speed = 0;
1557
1558         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1559                 return ETH_LINK_SPEED_AUTONEG;
1560
1561         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1562         case ETH_LINK_SPEED_100M:
1563         case ETH_LINK_SPEED_100M_HD:
1564                 eth_link_speed =
1565                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1566                 break;
1567         case ETH_LINK_SPEED_1G:
1568                 eth_link_speed =
1569                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1570                 break;
1571         case ETH_LINK_SPEED_2_5G:
1572                 eth_link_speed =
1573                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1574                 break;
1575         case ETH_LINK_SPEED_10G:
1576                 eth_link_speed =
1577                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1578                 break;
1579         case ETH_LINK_SPEED_20G:
1580                 eth_link_speed =
1581                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1582                 break;
1583         case ETH_LINK_SPEED_25G:
1584                 eth_link_speed =
1585                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1586                 break;
1587         case ETH_LINK_SPEED_40G:
1588                 eth_link_speed =
1589                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1590                 break;
1591         case ETH_LINK_SPEED_50G:
1592                 eth_link_speed =
1593                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1594                 break;
1595         default:
1596                 RTE_LOG(ERR, PMD,
1597                         "Unsupported link speed %d; default to AUTO\n",
1598                         conf_link_speed);
1599                 break;
1600         }
1601         return eth_link_speed;
1602 }
1603
1604 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1605                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1606                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1607                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1608
1609 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1610 {
1611         uint32_t one_speed;
1612
1613         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1614                 return 0;
1615
1616         if (link_speed & ETH_LINK_SPEED_FIXED) {
1617                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1618
1619                 if (one_speed & (one_speed - 1)) {
1620                         RTE_LOG(ERR, PMD,
1621                                 "Invalid advertised speeds (%u) for port %u\n",
1622                                 link_speed, port_id);
1623                         return -EINVAL;
1624                 }
1625                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1626                         RTE_LOG(ERR, PMD,
1627                                 "Unsupported advertised speed (%u) for port %u\n",
1628                                 link_speed, port_id);
1629                         return -EINVAL;
1630                 }
1631         } else {
1632                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1633                         RTE_LOG(ERR, PMD,
1634                                 "Unsupported advertised speeds (%u) for port %u\n",
1635                                 link_speed, port_id);
1636                         return -EINVAL;
1637                 }
1638         }
1639         return 0;
1640 }
1641
1642 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1643 {
1644         uint16_t ret = 0;
1645
1646         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1647                 link_speed = BNXT_SUPPORTED_SPEEDS;
1648
1649         if (link_speed & ETH_LINK_SPEED_100M)
1650                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1651         if (link_speed & ETH_LINK_SPEED_100M_HD)
1652                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1653         if (link_speed & ETH_LINK_SPEED_1G)
1654                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1655         if (link_speed & ETH_LINK_SPEED_2_5G)
1656                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1657         if (link_speed & ETH_LINK_SPEED_10G)
1658                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1659         if (link_speed & ETH_LINK_SPEED_20G)
1660                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1661         if (link_speed & ETH_LINK_SPEED_25G)
1662                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1663         if (link_speed & ETH_LINK_SPEED_40G)
1664                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1665         if (link_speed & ETH_LINK_SPEED_50G)
1666                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1667         return ret;
1668 }
1669
1670 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1671 {
1672         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1673
1674         switch (hw_link_speed) {
1675         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1676                 eth_link_speed = ETH_SPEED_NUM_100M;
1677                 break;
1678         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1679                 eth_link_speed = ETH_SPEED_NUM_1G;
1680                 break;
1681         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1682                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1683                 break;
1684         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1685                 eth_link_speed = ETH_SPEED_NUM_10G;
1686                 break;
1687         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1688                 eth_link_speed = ETH_SPEED_NUM_20G;
1689                 break;
1690         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1691                 eth_link_speed = ETH_SPEED_NUM_25G;
1692                 break;
1693         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1694                 eth_link_speed = ETH_SPEED_NUM_40G;
1695                 break;
1696         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1697                 eth_link_speed = ETH_SPEED_NUM_50G;
1698                 break;
1699         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1700         default:
1701                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1702                         hw_link_speed);
1703                 break;
1704         }
1705         return eth_link_speed;
1706 }
1707
1708 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1709 {
1710         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1711
1712         switch (hw_link_duplex) {
1713         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1714         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1715                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1716                 break;
1717         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1718                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1719                 break;
1720         default:
1721                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1722                         hw_link_duplex);
1723                 break;
1724         }
1725         return eth_link_duplex;
1726 }
1727
1728 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1729 {
1730         int rc = 0;
1731         struct bnxt_link_info *link_info = &bp->link_info;
1732
1733         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1734         if (rc) {
1735                 RTE_LOG(ERR, PMD,
1736                         "Get link config failed with rc %d\n", rc);
1737                 goto exit;
1738         }
1739         if (link_info->link_up)
1740                 link->link_speed =
1741                         bnxt_parse_hw_link_speed(link_info->link_speed);
1742         else
1743                 link->link_speed = ETH_LINK_SPEED_10M;
1744         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1745         link->link_status = link_info->link_up;
1746         link->link_autoneg = link_info->auto_mode ==
1747                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1748                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1749 exit:
1750         return rc;
1751 }
1752
1753 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1754 {
1755         int rc = 0;
1756         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1757         struct bnxt_link_info link_req;
1758         uint16_t speed;
1759
1760         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1761                 return 0;
1762
1763         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1764                         bp->eth_dev->data->port_id);
1765         if (rc)
1766                 goto error;
1767
1768         memset(&link_req, 0, sizeof(link_req));
1769         link_req.link_up = link_up;
1770         if (!link_up)
1771                 goto port_phy_cfg;
1772
1773         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1774         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1775         if (speed == 0) {
1776                 link_req.phy_flags |=
1777                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1778                 link_req.auto_mode =
1779                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1780                 link_req.auto_link_speed_mask =
1781                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1782         } else {
1783                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1784                 link_req.link_speed = speed;
1785                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1786         }
1787         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1788         link_req.auto_pause = bp->link_info.auto_pause;
1789         link_req.force_pause = bp->link_info.force_pause;
1790
1791 port_phy_cfg:
1792         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1793         if (rc) {
1794                 RTE_LOG(ERR, PMD,
1795                         "Set link config failed with rc %d\n", rc);
1796         }
1797
1798         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1799 error:
1800         return rc;
1801 }
1802
1803 /* JIRA 22088 */
1804 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1805 {
1806         struct hwrm_func_qcfg_input req = {0};
1807         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1808         int rc = 0;
1809
1810         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1811         req.fid = rte_cpu_to_le_16(0xffff);
1812
1813         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1814
1815         HWRM_CHECK_RESULT;
1816
1817         /* Hard Coded.. 0xfff VLAN ID mask */
1818         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1819
1820         switch (resp->port_partition_type) {
1821         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1822         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1823         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1824                 bp->port_partition_type = resp->port_partition_type;
1825                 break;
1826         default:
1827                 bp->port_partition_type = 0;
1828                 break;
1829         }
1830
1831         return rc;
1832 }
1833
1834 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1835                                    struct hwrm_func_qcaps_output *qcaps)
1836 {
1837         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1838         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1839                sizeof(qcaps->mac_address));
1840         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1841         qcaps->max_rx_rings = fcfg->num_rx_rings;
1842         qcaps->max_tx_rings = fcfg->num_tx_rings;
1843         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1844         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1845         qcaps->max_vfs = 0;
1846         qcaps->first_vf_id = 0;
1847         qcaps->max_vnics = fcfg->num_vnics;
1848         qcaps->max_decap_records = 0;
1849         qcaps->max_encap_records = 0;
1850         qcaps->max_tx_wm_flows = 0;
1851         qcaps->max_tx_em_flows = 0;
1852         qcaps->max_rx_wm_flows = 0;
1853         qcaps->max_rx_em_flows = 0;
1854         qcaps->max_flow_id = 0;
1855         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1856         qcaps->max_sp_tx_rings = 0;
1857         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1858 }
1859
1860 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1861 {
1862         struct hwrm_func_cfg_input req = {0};
1863         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1864         int rc;
1865
1866         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1867                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1868                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1869                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1870                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1871                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1872                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1873                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1874                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1875                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1876         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1877         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1878                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1879         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1880                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1881         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1882         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1883         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1884         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1885         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1886         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1887         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1888         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1889         req.fid = rte_cpu_to_le_16(0xffff);
1890
1891         HWRM_PREP(req, FUNC_CFG, -1, resp);
1892
1893         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1894         HWRM_CHECK_RESULT;
1895
1896         return rc;
1897 }
1898
1899 static void populate_vf_func_cfg_req(struct bnxt *bp,
1900                                      struct hwrm_func_cfg_input *req,
1901                                      int num_vfs)
1902 {
1903         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1904                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1905                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1906                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1907                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1908                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1909                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1910                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1911                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1912                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1913
1914         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1915                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1916         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1917                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1918         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1919                                                 (num_vfs + 1));
1920         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1921         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1922                                                (num_vfs + 1));
1923         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1924         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1925         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1926         /* TODO: For now, do not support VMDq/RFS on VFs. */
1927         req->num_vnics = rte_cpu_to_le_16(1);
1928         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1929                                                  (num_vfs + 1));
1930 }
1931
1932 static void add_random_mac_if_needed(struct bnxt *bp,
1933                                      struct hwrm_func_cfg_input *cfg_req,
1934                                      int vf)
1935 {
1936         struct ether_addr mac;
1937
1938         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1939                 return;
1940
1941         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1942                 cfg_req->enables |=
1943                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1944                 eth_random_addr(cfg_req->dflt_mac_addr);
1945                 bp->pf.vf_info[vf].random_mac = true;
1946         } else {
1947                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1948         }
1949 }
1950
1951 static void reserve_resources_from_vf(struct bnxt *bp,
1952                                       struct hwrm_func_cfg_input *cfg_req,
1953                                       int vf)
1954 {
1955         struct hwrm_func_qcaps_input req = {0};
1956         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1957         int rc;
1958
1959         /* Get the actual allocated values now */
1960         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1961         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1963
1964         if (rc) {
1965                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1966                 copy_func_cfg_to_qcaps(cfg_req, resp);
1967         } else if (resp->error_code) {
1968                 rc = rte_le_to_cpu_16(resp->error_code);
1969                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1970                 copy_func_cfg_to_qcaps(cfg_req, resp);
1971         }
1972
1973         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1974         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1975         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1976         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1977         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1978         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1979         /*
1980          * TODO: While not supporting VMDq with VFs, max_vnics is always
1981          * forced to 1 in this case
1982          */
1983         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1984         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1985 }
1986
1987 static int update_pf_resource_max(struct bnxt *bp)
1988 {
1989         struct hwrm_func_qcfg_input req = {0};
1990         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1991         int rc;
1992
1993         /* And copy the allocated numbers into the pf struct */
1994         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1995         req.fid = rte_cpu_to_le_16(0xffff);
1996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1997         HWRM_CHECK_RESULT;
1998
1999         /* Only TX ring value reflects actual allocation? TODO */
2000         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2001         bp->pf.evb_mode = resp->evb_mode;
2002
2003         return rc;
2004 }
2005
2006 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2007 {
2008         int rc;
2009
2010         if (!BNXT_PF(bp)) {
2011                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2012                 return -1;
2013         }
2014
2015         rc = bnxt_hwrm_func_qcaps(bp);
2016         if (rc)
2017                 return rc;
2018
2019         bp->pf.func_cfg_flags &=
2020                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2021                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2022         bp->pf.func_cfg_flags |=
2023                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2024         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2025         return rc;
2026 }
2027
2028 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2029 {
2030         struct hwrm_func_cfg_input req = {0};
2031         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2032         int i;
2033         size_t sz;
2034         int rc = 0;
2035         size_t req_buf_sz;
2036
2037         if (!BNXT_PF(bp)) {
2038                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2039                 return -1;
2040         }
2041
2042         rc = bnxt_hwrm_func_qcaps(bp);
2043
2044         if (rc)
2045                 return rc;
2046
2047         bp->pf.active_vfs = num_vfs;
2048
2049         /*
2050          * First, configure the PF to only use one TX ring.  This ensures that
2051          * there are enough rings for all VFs.
2052          *
2053          * If we don't do this, when we call func_alloc() later, we will lock
2054          * extra rings to the PF that won't be available during func_cfg() of
2055          * the VFs.
2056          *
2057          * This has been fixed with firmware versions above 20.6.54
2058          */
2059         bp->pf.func_cfg_flags &=
2060                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2061                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2062         bp->pf.func_cfg_flags |=
2063                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2064         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2065         if (rc)
2066                 return rc;
2067
2068         /*
2069          * Now, create and register a buffer to hold forwarded VF requests
2070          */
2071         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2072         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2073                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2074         if (bp->pf.vf_req_buf == NULL) {
2075                 rc = -ENOMEM;
2076                 goto error_free;
2077         }
2078         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2079                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2080         for (i = 0; i < num_vfs; i++)
2081                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2082                                         (i * HWRM_MAX_REQ_LEN);
2083
2084         rc = bnxt_hwrm_func_buf_rgtr(bp);
2085         if (rc)
2086                 goto error_free;
2087
2088         populate_vf_func_cfg_req(bp, &req, num_vfs);
2089
2090         bp->pf.active_vfs = 0;
2091         for (i = 0; i < num_vfs; i++) {
2092                 add_random_mac_if_needed(bp, &req, i);
2093
2094                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2095                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2096                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2097                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2098
2099                 /* Clear enable flag for next pass */
2100                 req.enables &= ~rte_cpu_to_le_32(
2101                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2102
2103                 if (rc || resp->error_code) {
2104                         RTE_LOG(ERR, PMD,
2105                                 "Failed to initizlie VF %d\n", i);
2106                         RTE_LOG(ERR, PMD,
2107                                 "Not all VFs available. (%d, %d)\n",
2108                                 rc, resp->error_code);
2109                         break;
2110                 }
2111
2112                 reserve_resources_from_vf(bp, &req, i);
2113                 bp->pf.active_vfs++;
2114         }
2115
2116         /*
2117          * Now configure the PF to use "the rest" of the resources
2118          * We're using STD_TX_RING_MODE here though which will limit the TX
2119          * rings.  This will allow QoS to function properly.  Not setting this
2120          * will cause PF rings to break bandwidth settings.
2121          */
2122         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2123         if (rc)
2124                 goto error_free;
2125
2126         rc = update_pf_resource_max(bp);
2127         if (rc)
2128                 goto error_free;
2129
2130         return rc;
2131
2132 error_free:
2133         bnxt_hwrm_func_buf_unrgtr(bp);
2134         return rc;
2135 }
2136
2137 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2138                                 uint8_t tunnel_type)
2139 {
2140         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2141         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2142         int rc = 0;
2143
2144         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2145         req.tunnel_type = tunnel_type;
2146         req.tunnel_dst_port_val = port;
2147         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2148         HWRM_CHECK_RESULT;
2149
2150         switch (tunnel_type) {
2151         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2152                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2153                 bp->vxlan_port = port;
2154                 break;
2155         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2156                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2157                 bp->geneve_port = port;
2158                 break;
2159         default:
2160                 break;
2161         }
2162         return rc;
2163 }
2164
2165 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2166                                 uint8_t tunnel_type)
2167 {
2168         struct hwrm_tunnel_dst_port_free_input req = {0};
2169         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2170         int rc = 0;
2171
2172         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2173         req.tunnel_type = tunnel_type;
2174         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2176         HWRM_CHECK_RESULT;
2177
2178         return rc;
2179 }
2180
2181 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2182 {
2183         int rc = 0;
2184         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2185         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2186
2187         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2188
2189         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2190         req.req_buf_page_size = rte_cpu_to_le_16(
2191                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2192         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2193         req.req_buf_page_addr[0] =
2194                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2195         if (req.req_buf_page_addr[0] == 0) {
2196                 RTE_LOG(ERR, PMD,
2197                         "unable to map buffer address to physical memory\n");
2198                 return -ENOMEM;
2199         }
2200
2201         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2202
2203         HWRM_CHECK_RESULT;
2204
2205         return rc;
2206 }
2207
2208 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2209 {
2210         int rc = 0;
2211         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2212         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2213
2214         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2215
2216         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2217
2218         HWRM_CHECK_RESULT;
2219
2220         return rc;
2221 }
2222
2223 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2224 {
2225         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2226         struct hwrm_func_cfg_input req = {0};
2227         int rc;
2228
2229         HWRM_PREP(req, FUNC_CFG, -1, resp);
2230         req.fid = rte_cpu_to_le_16(0xffff);
2231         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2232         req.enables = rte_cpu_to_le_32(
2233                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2234         req.async_event_cr = rte_cpu_to_le_16(
2235                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2237         HWRM_CHECK_RESULT;
2238
2239         return rc;
2240 }
2241
2242 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2243 {
2244         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2245         struct hwrm_func_vf_cfg_input req = {0};
2246         int rc;
2247
2248         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2249         req.enables = rte_cpu_to_le_32(
2250                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2251         req.async_event_cr = rte_cpu_to_le_16(
2252                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2253         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2254         HWRM_CHECK_RESULT;
2255
2256         return rc;
2257 }
2258
2259 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2260                               void *encaped, size_t ec_size)
2261 {
2262         int rc = 0;
2263         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2264         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2265
2266         if (ec_size > sizeof(req.encap_request))
2267                 return -1;
2268
2269         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2270
2271         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2272         memcpy(req.encap_request, encaped, ec_size);
2273
2274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2275
2276         HWRM_CHECK_RESULT;
2277
2278         return rc;
2279 }
2280
2281 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2282                                        struct ether_addr *mac)
2283 {
2284         struct hwrm_func_qcfg_input req = {0};
2285         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2286         int rc;
2287
2288         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2289         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2290         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2291
2292         HWRM_CHECK_RESULT;
2293
2294         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2295         return rc;
2296 }
2297
2298 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2299                             void *encaped, size_t ec_size)
2300 {
2301         int rc = 0;
2302         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2303         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2304
2305         if (ec_size > sizeof(req.encap_request))
2306                 return -1;
2307
2308         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2309
2310         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2311         memcpy(req.encap_request, encaped, ec_size);
2312
2313         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2314
2315         HWRM_CHECK_RESULT;
2316
2317         return rc;
2318 }
2319
2320 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2321 {
2322         struct hwrm_port_qstats_input req = {0};
2323         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2324         struct bnxt_pf_info *pf = &bp->pf;
2325         int rc;
2326
2327         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2328                 return 0;
2329
2330         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2331         req.port_id = rte_cpu_to_le_16(pf->port_id);
2332         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2333         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2334         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2335         HWRM_CHECK_RESULT;
2336         return rc;
2337 }
2338
2339 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2340 {
2341         struct hwrm_port_clr_stats_input req = {0};
2342         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2343         struct bnxt_pf_info *pf = &bp->pf;
2344         int rc;
2345
2346         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2347                 return 0;
2348
2349         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2350         req.port_id = rte_cpu_to_le_16(pf->port_id);
2351         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2352         HWRM_CHECK_RESULT;
2353         return rc;
2354 }