net/bnxt: return standard error codes for HWRM command
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SHORT_CMD_TIMEOUT          50000
31 #define HWRM_SPEC_CODE_1_8_3            0x10803
32 #define HWRM_VERSION_1_9_1              0x10901
33 #define HWRM_VERSION_1_9_2              0x10903
34
35 struct bnxt_plcmodes_cfg {
36         uint32_t        flags;
37         uint16_t        jumbo_thresh;
38         uint16_t        hds_offset;
39         uint16_t        hds_threshold;
40 };
41
42 static int page_getenum(size_t size)
43 {
44         if (size <= 1 << 4)
45                 return 4;
46         if (size <= 1 << 12)
47                 return 12;
48         if (size <= 1 << 13)
49                 return 13;
50         if (size <= 1 << 16)
51                 return 16;
52         if (size <= 1 << 21)
53                 return 21;
54         if (size <= 1 << 22)
55                 return 22;
56         if (size <= 1 << 30)
57                 return 30;
58         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
59         return sizeof(void *) * 8 - 1;
60 }
61
62 static int page_roundup(size_t size)
63 {
64         return 1 << page_getenum(size);
65 }
66
67 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
68                                   uint8_t *pg_attr,
69                                   uint64_t *pg_dir)
70 {
71         if (rmem->nr_pages > 1) {
72                 *pg_attr = 1;
73                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
74         } else {
75                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76         }
77 }
78
79 /*
80  * HWRM Functions (sent to HWRM)
81  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
82  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
83  * command was failed by the ChiMP.
84  */
85
86 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
87                                   uint32_t msg_len, bool use_kong_mb)
88 {
89         unsigned int i;
90         struct input *req = msg;
91         struct output *resp = bp->hwrm_cmd_resp_addr;
92         uint32_t *data = msg;
93         uint8_t *bar;
94         uint8_t *valid;
95         uint16_t max_req_len = bp->max_req_len;
96         struct hwrm_short_input short_input = { 0 };
97         uint16_t bar_offset = use_kong_mb ?
98                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
99         uint16_t mb_trigger_offset = use_kong_mb ?
100                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
101         uint32_t timeout;
102
103         /* Do not send HWRM commands to firmware in error state */
104         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
105                 return 0;
106
107         /* For VER_GET command, set timeout as 50ms */
108         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
109                 timeout = HWRM_SHORT_CMD_TIMEOUT;
110         else
111                 timeout = HWRM_CMD_TIMEOUT;
112
113         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
114             msg_len > bp->max_req_len) {
115                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
116
117                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
118                 memcpy(short_cmd_req, req, msg_len);
119
120                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
121                 short_input.signature = rte_cpu_to_le_16(
122                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
123                 short_input.size = rte_cpu_to_le_16(msg_len);
124                 short_input.req_addr =
125                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
126
127                 data = (uint32_t *)&short_input;
128                 msg_len = sizeof(short_input);
129
130                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
131         }
132
133         /* Write request msg to hwrm channel */
134         for (i = 0; i < msg_len; i += 4) {
135                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
136                 rte_write32(*data, bar);
137                 data++;
138         }
139
140         /* Zero the rest of the request space */
141         for (; i < max_req_len; i += 4) {
142                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
143                 rte_write32(0, bar);
144         }
145
146         /* Ring channel doorbell */
147         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
148         rte_write32(1, bar);
149         /*
150          * Make sure the channel doorbell ring command complete before
151          * reading the response to avoid getting stale or invalid
152          * responses.
153          */
154         rte_io_mb();
155
156         /* Poll for the valid bit */
157         for (i = 0; i < timeout; i++) {
158                 /* Sanity check on the resp->resp_len */
159                 rte_cio_rmb();
160                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
161                         /* Last byte of resp contains the valid key */
162                         valid = (uint8_t *)resp + resp->resp_len - 1;
163                         if (*valid == HWRM_RESP_VALID_KEY)
164                                 break;
165                 }
166                 rte_delay_us(1);
167         }
168
169         if (i >= timeout) {
170                 /* Suppress VER_GET timeout messages during reset recovery */
171                 if (bp->flags & BNXT_FLAG_FW_RESET &&
172                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
173                         return -ETIMEDOUT;
174
175                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
176                             req->req_type);
177                 return -ETIMEDOUT;
178         }
179         return 0;
180 }
181
182 /*
183  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
184  * spinlock, and does initial processing.
185  *
186  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
187  * releases the spinlock only if it returns.  If the regular int return codes
188  * are not used by the function, HWRM_CHECK_RESULT() should not be used
189  * directly, rather it should be copied and modified to suit the function.
190  *
191  * HWRM_UNLOCK() must be called after all response processing is completed.
192  */
193 #define HWRM_PREP(req, type, kong) do { \
194         rte_spinlock_lock(&bp->hwrm_lock); \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
197         req.cmpl_ring = rte_cpu_to_le_16(-1); \
198         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
200         req.target_id = rte_cpu_to_le_16(0xffff); \
201         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc > 0) \
229                         rc = -EIO; \
230                 return rc; \
231         } \
232         if (resp->error_code) { \
233                 rc = rte_le_to_cpu_16(resp->error_code); \
234                 if (resp->resp_len >= 16) { \
235                         struct hwrm_err_output *tmp_hwrm_err_op = \
236                                                 (void *)resp; \
237                         PMD_DRV_LOG(ERR, \
238                                 "error %d:%d:%08x:%04x\n", \
239                                 rc, tmp_hwrm_err_op->cmd_err, \
240                                 rte_le_to_cpu_32(\
241                                         tmp_hwrm_err_op->opaque_0), \
242                                 rte_le_to_cpu_16(\
243                                         tmp_hwrm_err_op->opaque_1)); \
244                 } else { \
245                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
246                 } \
247                 rte_spinlock_unlock(&bp->hwrm_lock); \
248                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249                         rc = -EACCES; \
250                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251                         rc = -ENOSPC; \
252                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253                         rc = -EINVAL; \
254                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255                         rc = -ENOTSUP; \
256                 else if (rc > 0) \
257                         rc = -EIO; \
258                 return rc; \
259         } \
260 } while (0)
261
262 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
263
264 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
265 {
266         int rc = 0;
267         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
268         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
269
270         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
271         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
272         req.mask = 0;
273
274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
275
276         HWRM_CHECK_RESULT();
277         HWRM_UNLOCK();
278
279         return rc;
280 }
281
282 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
283                                  struct bnxt_vnic_info *vnic,
284                                  uint16_t vlan_count,
285                                  struct bnxt_vlan_table_entry *vlan_table)
286 {
287         int rc = 0;
288         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
289         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
290         uint32_t mask = 0;
291
292         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
293                 return rc;
294
295         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
296         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
297
298         /* FIXME add multicast flag, when multicast adding options is supported
299          * by ethtool.
300          */
301         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
302                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
303         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
304                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
305         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
306                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
307         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
308                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
309         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
310                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
311         if (vnic->mc_addr_cnt) {
312                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
313                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
314                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
315         }
316         if (vlan_table) {
317                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
318                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
319                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
320                          rte_mem_virt2iova(vlan_table));
321                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
322         }
323         req.mask = rte_cpu_to_le_32(mask);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
326
327         HWRM_CHECK_RESULT();
328         HWRM_UNLOCK();
329
330         return rc;
331 }
332
333 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
334                         uint16_t vlan_count,
335                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
336 {
337         int rc = 0;
338         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
339         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
340                                                 bp->hwrm_cmd_resp_addr;
341
342         /*
343          * Older HWRM versions did not support this command, and the set_rx_mask
344          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
345          * removed from set_rx_mask call, and this command was added.
346          *
347          * This command is also present from 1.7.8.11 and higher,
348          * as well as 1.7.8.0
349          */
350         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
351                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
352                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
353                                         (11)))
354                                 return 0;
355                 }
356         }
357         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
358         req.fid = rte_cpu_to_le_16(fid);
359
360         req.vlan_tag_mask_tbl_addr =
361                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
362         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
363
364         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
365
366         HWRM_CHECK_RESULT();
367         HWRM_UNLOCK();
368
369         return rc;
370 }
371
372 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
373                            struct bnxt_filter_info *filter)
374 {
375         int rc = 0;
376         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
377         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
378
379         if (filter->fw_l2_filter_id == UINT64_MAX)
380                 return 0;
381
382         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
383
384         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
385
386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
387
388         HWRM_CHECK_RESULT();
389         HWRM_UNLOCK();
390
391         filter->fw_l2_filter_id = UINT64_MAX;
392
393         return 0;
394 }
395
396 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
397                          uint16_t dst_id,
398                          struct bnxt_filter_info *filter)
399 {
400         int rc = 0;
401         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
402         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
403         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
404         const struct rte_eth_vmdq_rx_conf *conf =
405                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
406         uint32_t enables = 0;
407         uint16_t j = dst_id - 1;
408
409         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
410         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
411             conf->pool_map[j].pools & (1UL << j)) {
412                 PMD_DRV_LOG(DEBUG,
413                         "Add vlan %u to vmdq pool %u\n",
414                         conf->pool_map[j].vlan_id, j);
415
416                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
417                 filter->enables |=
418                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
419                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
420         }
421
422         if (filter->fw_l2_filter_id != UINT64_MAX)
423                 bnxt_hwrm_clear_l2_filter(bp, filter);
424
425         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
426
427         req.flags = rte_cpu_to_le_32(filter->flags);
428         req.flags |=
429         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
430
431         enables = filter->enables |
432               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
433         req.dst_id = rte_cpu_to_le_16(dst_id);
434
435         if (enables &
436             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
437                 memcpy(req.l2_addr, filter->l2_addr,
438                        RTE_ETHER_ADDR_LEN);
439         if (enables &
440             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
441                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
442                        RTE_ETHER_ADDR_LEN);
443         if (enables &
444             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
445                 req.l2_ovlan = filter->l2_ovlan;
446         if (enables &
447             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
448                 req.l2_ivlan = filter->l2_ivlan;
449         if (enables &
450             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
451                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
452         if (enables &
453             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
454                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
455         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
456                 req.src_id = rte_cpu_to_le_32(filter->src_id);
457         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
458                 req.src_type = filter->src_type;
459
460         req.enables = rte_cpu_to_le_32(enables);
461
462         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
463
464         HWRM_CHECK_RESULT();
465
466         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
467         HWRM_UNLOCK();
468
469         return rc;
470 }
471
472 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
473 {
474         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
475         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
476         uint32_t flags = 0;
477         int rc;
478
479         if (!ptp)
480                 return 0;
481
482         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
483
484         if (ptp->rx_filter)
485                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
486         else
487                 flags |=
488                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
489         if (ptp->tx_tstamp_en)
490                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
491         else
492                 flags |=
493                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
494         req.flags = rte_cpu_to_le_32(flags);
495         req.enables = rte_cpu_to_le_32
496                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
497         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
498
499         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
500         HWRM_UNLOCK();
501
502         return rc;
503 }
504
505 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
506 {
507         int rc = 0;
508         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
509         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
510         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
511
512 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
513         if (ptp)
514                 return 0;
515
516         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
517
518         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
519
520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
521
522         HWRM_CHECK_RESULT();
523
524         if (!BNXT_CHIP_THOR(bp) &&
525             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
526                 return 0;
527
528         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
529                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
530
531         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
532         if (!ptp)
533                 return -ENOMEM;
534
535         if (!BNXT_CHIP_THOR(bp)) {
536                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
537                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
538                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
539                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
540                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
541                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
542                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
543                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
544                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
545                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
546                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
547                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
548                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
549                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
550                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
551                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
552                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
553                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
554         }
555
556         ptp->bp = bp;
557         bp->ptp_cfg = ptp;
558
559         return 0;
560 }
561
562 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
563 {
564         int rc = 0;
565         struct hwrm_func_qcaps_input req = {.req_type = 0 };
566         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
567         uint16_t new_max_vfs;
568         uint32_t flags;
569         int i;
570
571         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
572
573         req.fid = rte_cpu_to_le_16(0xffff);
574
575         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
576
577         HWRM_CHECK_RESULT();
578
579         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
580         flags = rte_le_to_cpu_32(resp->flags);
581         if (BNXT_PF(bp)) {
582                 bp->pf.port_id = resp->port_id;
583                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
584                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
585                 new_max_vfs = bp->pdev->max_vfs;
586                 if (new_max_vfs != bp->pf.max_vfs) {
587                         if (bp->pf.vf_info)
588                                 rte_free(bp->pf.vf_info);
589                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
590                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
591                         bp->pf.max_vfs = new_max_vfs;
592                         for (i = 0; i < new_max_vfs; i++) {
593                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
594                                 bp->pf.vf_info[i].vlan_table =
595                                         rte_zmalloc("VF VLAN table",
596                                                     getpagesize(),
597                                                     getpagesize());
598                                 if (bp->pf.vf_info[i].vlan_table == NULL)
599                                         PMD_DRV_LOG(ERR,
600                                         "Fail to alloc VLAN table for VF %d\n",
601                                         i);
602                                 else
603                                         rte_mem_lock_page(
604                                                 bp->pf.vf_info[i].vlan_table);
605                                 bp->pf.vf_info[i].vlan_as_table =
606                                         rte_zmalloc("VF VLAN AS table",
607                                                     getpagesize(),
608                                                     getpagesize());
609                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
610                                         PMD_DRV_LOG(ERR,
611                                         "Alloc VLAN AS table for VF %d fail\n",
612                                         i);
613                                 else
614                                         rte_mem_lock_page(
615                                                bp->pf.vf_info[i].vlan_as_table);
616                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
617                         }
618                 }
619         }
620
621         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
622         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
623         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
624         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
625         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
626         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
627         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
628         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
629         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
630         if (!BNXT_CHIP_THOR(bp))
631                 bp->max_l2_ctx += bp->max_rx_em_flows;
632         /* TODO: For now, do not support VMDq/RFS on VFs. */
633         if (BNXT_PF(bp)) {
634                 if (bp->pf.max_vfs)
635                         bp->max_vnics = 1;
636                 else
637                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
638         } else {
639                 bp->max_vnics = 1;
640         }
641         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
642         if (BNXT_PF(bp)) {
643                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
644                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
645                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
646                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
647                         HWRM_UNLOCK();
648                         bnxt_hwrm_ptp_qcfg(bp);
649                 }
650         }
651
652         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
653                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
654
655         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
656                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
657                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
658         } else {
659                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
660         }
661
662         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
663                 bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
664         else
665                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
666
667         HWRM_UNLOCK();
668
669         return rc;
670 }
671
672 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
673 {
674         int rc;
675
676         rc = __bnxt_hwrm_func_qcaps(bp);
677         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
678                 rc = bnxt_alloc_ctx_mem(bp);
679                 if (rc)
680                         return rc;
681
682                 rc = bnxt_hwrm_func_resc_qcaps(bp);
683                 if (!rc)
684                         bp->flags |= BNXT_FLAG_NEW_RM;
685         }
686
687         return rc;
688 }
689
690 int bnxt_hwrm_func_reset(struct bnxt *bp)
691 {
692         int rc = 0;
693         struct hwrm_func_reset_input req = {.req_type = 0 };
694         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
695
696         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
697
698         req.enables = rte_cpu_to_le_32(0);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
701
702         HWRM_CHECK_RESULT();
703         HWRM_UNLOCK();
704
705         return rc;
706 }
707
708 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
709 {
710         int rc;
711         uint32_t flags = 0;
712         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
713         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
714
715         if (bp->flags & BNXT_FLAG_REGISTERED)
716                 return 0;
717
718         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
719         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
720                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
721
722         /* PFs and trusted VFs should indicate the support of the
723          * Master capability on non Stingray platform
724          */
725         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
726                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
727
728         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
729         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
730                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
731         req.ver_maj = RTE_VER_YEAR;
732         req.ver_min = RTE_VER_MONTH;
733         req.ver_upd = RTE_VER_MINOR;
734
735         if (BNXT_PF(bp)) {
736                 req.enables |= rte_cpu_to_le_32(
737                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
738                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
739                        RTE_MIN(sizeof(req.vf_req_fwd),
740                                sizeof(bp->pf.vf_req_fwd)));
741
742                 /*
743                  * PF can sniff HWRM API issued by VF. This can be set up by
744                  * linux driver and inherited by the DPDK PF driver. Clear
745                  * this HWRM sniffer list in FW because DPDK PF driver does
746                  * not support this.
747                  */
748                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
749         }
750
751         req.flags = rte_cpu_to_le_32(flags);
752
753         req.async_event_fwd[0] |=
754                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
755                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
756                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
757                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
758                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
759         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
760                 req.async_event_fwd[0] |=
761                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
762         req.async_event_fwd[1] |=
763                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
764                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
765
766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
767
768         HWRM_CHECK_RESULT();
769
770         flags = rte_le_to_cpu_32(resp->flags);
771         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
772                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
773
774         HWRM_UNLOCK();
775
776         bp->flags |= BNXT_FLAG_REGISTERED;
777
778         return rc;
779 }
780
781 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
782 {
783         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
784                 return 0;
785
786         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
787 }
788
789 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
790 {
791         int rc;
792         uint32_t flags = 0;
793         uint32_t enables;
794         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
795         struct hwrm_func_vf_cfg_input req = {0};
796
797         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
798
799         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
800                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
801                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
802                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
803                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
804
805         if (BNXT_HAS_RING_GRPS(bp)) {
806                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
807                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
808         }
809
810         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
811         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
812                                             AGG_RING_MULTIPLIER);
813         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
814                                              bp->tx_nr_rings +
815                                              BNXT_NUM_ASYNC_CPR(bp));
816         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
817                                               bp->tx_nr_rings +
818                                               BNXT_NUM_ASYNC_CPR(bp));
819         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
820         if (bp->vf_resv_strategy ==
821             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
822                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
823                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
824                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
825                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
826                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
827                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
828         }
829
830         if (test)
831                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
832                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
833                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
834                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
835                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
836                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
837
838         if (test && BNXT_HAS_RING_GRPS(bp))
839                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
840
841         req.flags = rte_cpu_to_le_32(flags);
842         req.enables |= rte_cpu_to_le_32(enables);
843
844         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
845
846         if (test)
847                 HWRM_CHECK_RESULT_SILENT();
848         else
849                 HWRM_CHECK_RESULT();
850
851         HWRM_UNLOCK();
852         return rc;
853 }
854
855 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
856 {
857         int rc;
858         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
859         struct hwrm_func_resource_qcaps_input req = {0};
860
861         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
862         req.fid = rte_cpu_to_le_16(0xffff);
863
864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
865
866         HWRM_CHECK_RESULT();
867
868         if (BNXT_VF(bp)) {
869                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
870                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
871                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
872                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
873                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
874                 /* func_resource_qcaps does not return max_rx_em_flows.
875                  * So use the value provided by func_qcaps.
876                  */
877                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
878                 if (!BNXT_CHIP_THOR(bp))
879                         bp->max_l2_ctx += bp->max_rx_em_flows;
880                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
881                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
882         }
883         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
884         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
885         if (bp->vf_resv_strategy >
886             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
887                 bp->vf_resv_strategy =
888                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
889
890         HWRM_UNLOCK();
891         return rc;
892 }
893
894 int bnxt_hwrm_ver_get(struct bnxt *bp)
895 {
896         int rc = 0;
897         struct hwrm_ver_get_input req = {.req_type = 0 };
898         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
899         uint32_t fw_version;
900         uint16_t max_resp_len;
901         char type[RTE_MEMZONE_NAMESIZE];
902         uint32_t dev_caps_cfg;
903
904         bp->max_req_len = HWRM_MAX_REQ_LEN;
905         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
906
907         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
908         req.hwrm_intf_min = HWRM_VERSION_MINOR;
909         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
910
911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
912
913         if (bp->flags & BNXT_FLAG_FW_RESET)
914                 HWRM_CHECK_RESULT_SILENT();
915         else
916                 HWRM_CHECK_RESULT();
917
918         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
919                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
920                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
921                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
922         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
923                      (resp->hwrm_fw_min_8b << 16) |
924                      (resp->hwrm_fw_bld_8b << 8) |
925                      resp->hwrm_fw_rsvd_8b;
926         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
927                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
928
929         fw_version = resp->hwrm_intf_maj_8b << 16;
930         fw_version |= resp->hwrm_intf_min_8b << 8;
931         fw_version |= resp->hwrm_intf_upd_8b;
932         bp->hwrm_spec_code = fw_version;
933
934         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
935                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
936                 rc = -EINVAL;
937                 goto error;
938         }
939
940         if (bp->max_req_len > resp->max_req_win_len) {
941                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
942                 rc = -EINVAL;
943         }
944         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
945         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
946         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
947                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
948
949         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
950         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
951
952         if (bp->max_resp_len != max_resp_len) {
953                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
954                         bp->pdev->addr.domain, bp->pdev->addr.bus,
955                         bp->pdev->addr.devid, bp->pdev->addr.function);
956
957                 rte_free(bp->hwrm_cmd_resp_addr);
958
959                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
960                 if (bp->hwrm_cmd_resp_addr == NULL) {
961                         rc = -ENOMEM;
962                         goto error;
963                 }
964                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
965                 bp->hwrm_cmd_resp_dma_addr =
966                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
967                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
968                         PMD_DRV_LOG(ERR,
969                         "Unable to map response buffer to physical memory.\n");
970                         rc = -ENOMEM;
971                         goto error;
972                 }
973                 bp->max_resp_len = max_resp_len;
974         }
975
976         if ((dev_caps_cfg &
977                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
978             (dev_caps_cfg &
979              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
980                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
981                 bp->flags |= BNXT_FLAG_SHORT_CMD;
982         }
983
984         if (((dev_caps_cfg &
985               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
986              (dev_caps_cfg &
987               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
988             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
989                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
990                         bp->pdev->addr.domain, bp->pdev->addr.bus,
991                         bp->pdev->addr.devid, bp->pdev->addr.function);
992
993                 rte_free(bp->hwrm_short_cmd_req_addr);
994
995                 bp->hwrm_short_cmd_req_addr =
996                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
997                 if (bp->hwrm_short_cmd_req_addr == NULL) {
998                         rc = -ENOMEM;
999                         goto error;
1000                 }
1001                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
1002                 bp->hwrm_short_cmd_req_dma_addr =
1003                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
1004                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1005                         rte_free(bp->hwrm_short_cmd_req_addr);
1006                         PMD_DRV_LOG(ERR,
1007                                 "Unable to map buffer to physical memory.\n");
1008                         rc = -ENOMEM;
1009                         goto error;
1010                 }
1011         }
1012         if (dev_caps_cfg &
1013             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1014                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1015                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1016         }
1017         if (dev_caps_cfg &
1018             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1019                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1020
1021 error:
1022         HWRM_UNLOCK();
1023         return rc;
1024 }
1025
1026 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1027 {
1028         int rc;
1029         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1030         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1031
1032         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1033                 return 0;
1034
1035         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1036         req.flags = flags;
1037
1038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1039
1040         HWRM_CHECK_RESULT();
1041         HWRM_UNLOCK();
1042
1043         return rc;
1044 }
1045
1046 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1047 {
1048         int rc = 0;
1049         struct hwrm_port_phy_cfg_input req = {0};
1050         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1051         uint32_t enables = 0;
1052
1053         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1054
1055         if (conf->link_up) {
1056                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1057                 if (bp->link_info.auto_mode && conf->link_speed) {
1058                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1059                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1060                 }
1061
1062                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1063                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1064                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1065                 /*
1066                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1067                  * any auto mode, even "none".
1068                  */
1069                 if (!conf->link_speed) {
1070                         /* No speeds specified. Enable AutoNeg - all speeds */
1071                         req.auto_mode =
1072                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1073                 }
1074                 /* AutoNeg - Advertise speeds specified. */
1075                 if (conf->auto_link_speed_mask &&
1076                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1077                         req.auto_mode =
1078                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1079                         req.auto_link_speed_mask =
1080                                 conf->auto_link_speed_mask;
1081                         enables |=
1082                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1083                 }
1084
1085                 req.auto_duplex = conf->duplex;
1086                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1087                 req.auto_pause = conf->auto_pause;
1088                 req.force_pause = conf->force_pause;
1089                 /* Set force_pause if there is no auto or if there is a force */
1090                 if (req.auto_pause && !req.force_pause)
1091                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1092                 else
1093                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1094
1095                 req.enables = rte_cpu_to_le_32(enables);
1096         } else {
1097                 req.flags =
1098                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1099                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1100         }
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1103
1104         HWRM_CHECK_RESULT();
1105         HWRM_UNLOCK();
1106
1107         return rc;
1108 }
1109
1110 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1111                                    struct bnxt_link_info *link_info)
1112 {
1113         int rc = 0;
1114         struct hwrm_port_phy_qcfg_input req = {0};
1115         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1118
1119         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1120
1121         HWRM_CHECK_RESULT();
1122
1123         link_info->phy_link_status = resp->link;
1124         link_info->link_up =
1125                 (link_info->phy_link_status ==
1126                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1127         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1128         link_info->duplex = resp->duplex_cfg;
1129         link_info->pause = resp->pause;
1130         link_info->auto_pause = resp->auto_pause;
1131         link_info->force_pause = resp->force_pause;
1132         link_info->auto_mode = resp->auto_mode;
1133         link_info->phy_type = resp->phy_type;
1134         link_info->media_type = resp->media_type;
1135
1136         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1137         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1138         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1139         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1140         link_info->phy_ver[0] = resp->phy_maj;
1141         link_info->phy_ver[1] = resp->phy_min;
1142         link_info->phy_ver[2] = resp->phy_bld;
1143
1144         HWRM_UNLOCK();
1145
1146         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1147         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1148         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1149         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1150         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1151                     link_info->auto_link_speed_mask);
1152         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1153                     link_info->force_link_speed);
1154
1155         return rc;
1156 }
1157
1158 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1159 {
1160         int rc = 0;
1161         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1162         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1163         int i;
1164
1165         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1166
1167         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1168         /* HWRM Version >= 1.9.1 */
1169         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1170                 req.drv_qmap_cap =
1171                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1173
1174         HWRM_CHECK_RESULT();
1175
1176 #define GET_QUEUE_INFO(x) \
1177         bp->cos_queue[x].id = resp->queue_id##x; \
1178         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1179
1180         GET_QUEUE_INFO(0);
1181         GET_QUEUE_INFO(1);
1182         GET_QUEUE_INFO(2);
1183         GET_QUEUE_INFO(3);
1184         GET_QUEUE_INFO(4);
1185         GET_QUEUE_INFO(5);
1186         GET_QUEUE_INFO(6);
1187         GET_QUEUE_INFO(7);
1188
1189         HWRM_UNLOCK();
1190
1191         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1192                 bp->tx_cosq_id = bp->cos_queue[0].id;
1193         } else {
1194                 /* iterate and find the COSq profile to use for Tx */
1195                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1196                         if (bp->cos_queue[i].profile ==
1197                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1198                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1199                                 break;
1200                         }
1201                 }
1202         }
1203
1204         bp->max_tc = resp->max_configurable_queues;
1205         bp->max_lltc = resp->max_configurable_lossless_queues;
1206         if (bp->max_tc > BNXT_MAX_QUEUE)
1207                 bp->max_tc = BNXT_MAX_QUEUE;
1208         bp->max_q = bp->max_tc;
1209
1210         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1211
1212         return rc;
1213 }
1214
1215 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1216                          struct bnxt_ring *ring,
1217                          uint32_t ring_type, uint32_t map_index,
1218                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1219 {
1220         int rc = 0;
1221         uint32_t enables = 0;
1222         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1223         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1224         struct rte_mempool *mb_pool;
1225         uint16_t rx_buf_size;
1226
1227         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1228
1229         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1230         req.fbo = rte_cpu_to_le_32(0);
1231         /* Association of ring index with doorbell index */
1232         req.logical_id = rte_cpu_to_le_16(map_index);
1233         req.length = rte_cpu_to_le_32(ring->ring_size);
1234
1235         switch (ring_type) {
1236         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1237                 req.ring_type = ring_type;
1238                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1239                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1240                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1241                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1242                         enables |=
1243                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1244                 break;
1245         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1246                 req.ring_type = ring_type;
1247                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1248                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1249                 if (BNXT_CHIP_THOR(bp)) {
1250                         mb_pool = bp->rx_queues[0]->mb_pool;
1251                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1252                                       RTE_PKTMBUF_HEADROOM;
1253                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1254                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1255                         enables |=
1256                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1257                 }
1258                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1259                         enables |=
1260                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1261                 break;
1262         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1263                 req.ring_type = ring_type;
1264                 if (BNXT_HAS_NQ(bp)) {
1265                         /* Association of cp ring with nq */
1266                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1267                         enables |=
1268                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1269                 }
1270                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1271                 break;
1272         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1273                 req.ring_type = ring_type;
1274                 req.page_size = BNXT_PAGE_SHFT;
1275                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1276                 break;
1277         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1278                 req.ring_type = ring_type;
1279                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1280
1281                 mb_pool = bp->rx_queues[0]->mb_pool;
1282                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1283                               RTE_PKTMBUF_HEADROOM;
1284                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1285                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1286
1287                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1288                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1289                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1290                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1291                 break;
1292         default:
1293                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1294                         ring_type);
1295                 HWRM_UNLOCK();
1296                 return -EINVAL;
1297         }
1298         req.enables = rte_cpu_to_le_32(enables);
1299
1300         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1301
1302         if (rc || resp->error_code) {
1303                 if (rc == 0 && resp->error_code)
1304                         rc = rte_le_to_cpu_16(resp->error_code);
1305                 switch (ring_type) {
1306                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1307                         PMD_DRV_LOG(ERR,
1308                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1309                         HWRM_UNLOCK();
1310                         return rc;
1311                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1312                         PMD_DRV_LOG(ERR,
1313                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1314                         HWRM_UNLOCK();
1315                         return rc;
1316                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1317                         PMD_DRV_LOG(ERR,
1318                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1319                                     rc);
1320                         HWRM_UNLOCK();
1321                         return rc;
1322                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1323                         PMD_DRV_LOG(ERR,
1324                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1325                         HWRM_UNLOCK();
1326                         return rc;
1327                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1328                         PMD_DRV_LOG(ERR,
1329                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1330                         HWRM_UNLOCK();
1331                         return rc;
1332                 default:
1333                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1334                         HWRM_UNLOCK();
1335                         return rc;
1336                 }
1337         }
1338
1339         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1340         HWRM_UNLOCK();
1341         return rc;
1342 }
1343
1344 int bnxt_hwrm_ring_free(struct bnxt *bp,
1345                         struct bnxt_ring *ring, uint32_t ring_type)
1346 {
1347         int rc;
1348         struct hwrm_ring_free_input req = {.req_type = 0 };
1349         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1350
1351         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1352
1353         req.ring_type = ring_type;
1354         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1355
1356         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1357
1358         if (rc || resp->error_code) {
1359                 if (rc == 0 && resp->error_code)
1360                         rc = rte_le_to_cpu_16(resp->error_code);
1361                 HWRM_UNLOCK();
1362
1363                 switch (ring_type) {
1364                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1365                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1366                                 rc);
1367                         return rc;
1368                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1369                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1370                                 rc);
1371                         return rc;
1372                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1373                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1374                                 rc);
1375                         return rc;
1376                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1377                         PMD_DRV_LOG(ERR,
1378                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1379                         return rc;
1380                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1381                         PMD_DRV_LOG(ERR,
1382                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1383                         return rc;
1384                 default:
1385                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1386                         return rc;
1387                 }
1388         }
1389         HWRM_UNLOCK();
1390         return 0;
1391 }
1392
1393 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1394 {
1395         int rc = 0;
1396         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1397         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1398
1399         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1400
1401         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1402         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1403         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1404         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1405
1406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1407
1408         HWRM_CHECK_RESULT();
1409
1410         bp->grp_info[idx].fw_grp_id =
1411             rte_le_to_cpu_16(resp->ring_group_id);
1412
1413         HWRM_UNLOCK();
1414
1415         return rc;
1416 }
1417
1418 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1419 {
1420         int rc;
1421         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1422         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1423
1424         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1425
1426         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1427
1428         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1429
1430         HWRM_CHECK_RESULT();
1431         HWRM_UNLOCK();
1432
1433         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1434         return rc;
1435 }
1436
1437 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1438 {
1439         int rc = 0;
1440         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1441         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1442
1443         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1444                 return rc;
1445
1446         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1447
1448         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1449
1450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1451
1452         HWRM_CHECK_RESULT();
1453         HWRM_UNLOCK();
1454
1455         return rc;
1456 }
1457
1458 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1459                                 unsigned int idx __rte_unused)
1460 {
1461         int rc;
1462         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1463         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1464
1465         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1466
1467         req.update_period_ms = rte_cpu_to_le_32(0);
1468
1469         req.stats_dma_addr =
1470             rte_cpu_to_le_64(cpr->hw_stats_map);
1471
1472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1473
1474         HWRM_CHECK_RESULT();
1475
1476         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1477
1478         HWRM_UNLOCK();
1479
1480         return rc;
1481 }
1482
1483 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1484                                 unsigned int idx __rte_unused)
1485 {
1486         int rc;
1487         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1488         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1489
1490         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1491
1492         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1493
1494         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1495
1496         HWRM_CHECK_RESULT();
1497         HWRM_UNLOCK();
1498
1499         return rc;
1500 }
1501
1502 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1503 {
1504         int rc = 0, i, j;
1505         struct hwrm_vnic_alloc_input req = { 0 };
1506         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1507
1508         if (!BNXT_HAS_RING_GRPS(bp))
1509                 goto skip_ring_grps;
1510
1511         /* map ring groups to this vnic */
1512         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1513                 vnic->start_grp_id, vnic->end_grp_id);
1514         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1515                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1516
1517         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1518         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1519         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1520         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1521
1522 skip_ring_grps:
1523         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1524                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1525         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1526
1527         if (vnic->func_default)
1528                 req.flags =
1529                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1531
1532         HWRM_CHECK_RESULT();
1533
1534         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1535         HWRM_UNLOCK();
1536         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1537         return rc;
1538 }
1539
1540 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1541                                         struct bnxt_vnic_info *vnic,
1542                                         struct bnxt_plcmodes_cfg *pmode)
1543 {
1544         int rc = 0;
1545         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1546         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1547
1548         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1549
1550         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1551
1552         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1553
1554         HWRM_CHECK_RESULT();
1555
1556         pmode->flags = rte_le_to_cpu_32(resp->flags);
1557         /* dflt_vnic bit doesn't exist in the _cfg command */
1558         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1559         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1560         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1561         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1562
1563         HWRM_UNLOCK();
1564
1565         return rc;
1566 }
1567
1568 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1569                                        struct bnxt_vnic_info *vnic,
1570                                        struct bnxt_plcmodes_cfg *pmode)
1571 {
1572         int rc = 0;
1573         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1574         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1575
1576         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1577                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1578                 return rc;
1579         }
1580
1581         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1582
1583         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1584         req.flags = rte_cpu_to_le_32(pmode->flags);
1585         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1586         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1587         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1588         req.enables = rte_cpu_to_le_32(
1589             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1590             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1591             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1592         );
1593
1594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1595
1596         HWRM_CHECK_RESULT();
1597         HWRM_UNLOCK();
1598
1599         return rc;
1600 }
1601
1602 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1603 {
1604         int rc = 0;
1605         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1606         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1607         struct bnxt_plcmodes_cfg pmodes = { 0 };
1608         uint32_t ctx_enable_flag = 0;
1609         uint32_t enables = 0;
1610
1611         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1612                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1613                 return rc;
1614         }
1615
1616         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1617         if (rc)
1618                 return rc;
1619
1620         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1621
1622         if (BNXT_CHIP_THOR(bp)) {
1623                 struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
1624                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1625                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1626
1627                 req.default_rx_ring_id =
1628                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1629                 req.default_cmpl_ring_id =
1630                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1631                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1632                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1633                 goto config_mru;
1634         }
1635
1636         /* Only RSS support for now TBD: COS & LB */
1637         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1638         if (vnic->lb_rule != 0xffff)
1639                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1640         if (vnic->cos_rule != 0xffff)
1641                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1642         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1643                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1644                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1645         }
1646         enables |= ctx_enable_flag;
1647         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1648         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1649         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1650         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1651
1652 config_mru:
1653         req.enables = rte_cpu_to_le_32(enables);
1654         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1655         req.mru = rte_cpu_to_le_16(vnic->mru);
1656         /* Configure default VNIC only once. */
1657         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1658                 req.flags |=
1659                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1660                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1661         }
1662         if (vnic->vlan_strip)
1663                 req.flags |=
1664                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1665         if (vnic->bd_stall)
1666                 req.flags |=
1667                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1668         if (vnic->roce_dual)
1669                 req.flags |= rte_cpu_to_le_32(
1670                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1671         if (vnic->roce_only)
1672                 req.flags |= rte_cpu_to_le_32(
1673                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1674         if (vnic->rss_dflt_cr)
1675                 req.flags |= rte_cpu_to_le_32(
1676                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1677
1678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1679
1680         HWRM_CHECK_RESULT();
1681         HWRM_UNLOCK();
1682
1683         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1684
1685         return rc;
1686 }
1687
1688 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1689                 int16_t fw_vf_id)
1690 {
1691         int rc = 0;
1692         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1693         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1694
1695         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1696                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1697                 return rc;
1698         }
1699         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1700
1701         req.enables =
1702                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1703         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1704         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1705
1706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1707
1708         HWRM_CHECK_RESULT();
1709
1710         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1711         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1712         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1713         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1714         vnic->mru = rte_le_to_cpu_16(resp->mru);
1715         vnic->func_default = rte_le_to_cpu_32(
1716                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1717         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1718                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1719         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1720                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1721         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1722                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1723         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1724                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1725         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1726                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1727
1728         HWRM_UNLOCK();
1729
1730         return rc;
1731 }
1732
1733 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1734                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1735 {
1736         int rc = 0;
1737         uint16_t ctx_id;
1738         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1739         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1740                                                 bp->hwrm_cmd_resp_addr;
1741
1742         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1743
1744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1745         HWRM_CHECK_RESULT();
1746
1747         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1748         if (!BNXT_HAS_RING_GRPS(bp))
1749                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1750         else if (ctx_idx == 0)
1751                 vnic->rss_rule = ctx_id;
1752
1753         HWRM_UNLOCK();
1754
1755         return rc;
1756 }
1757
1758 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1759                             struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1760 {
1761         int rc = 0;
1762         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1763         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1764                                                 bp->hwrm_cmd_resp_addr;
1765
1766         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1767                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1768                 return rc;
1769         }
1770         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1771
1772         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1773
1774         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1775
1776         HWRM_CHECK_RESULT();
1777         HWRM_UNLOCK();
1778
1779         return rc;
1780 }
1781
1782 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1783 {
1784         int rc = 0;
1785         struct hwrm_vnic_free_input req = {.req_type = 0 };
1786         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1787
1788         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1789                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1790                 return rc;
1791         }
1792
1793         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1794
1795         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1796
1797         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1798
1799         HWRM_CHECK_RESULT();
1800         HWRM_UNLOCK();
1801
1802         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1803         /* Configure default VNIC again if necessary. */
1804         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1805                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1806
1807         return rc;
1808 }
1809
1810 static int
1811 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1812 {
1813         int i;
1814         int rc = 0;
1815         int nr_ctxs = vnic->num_lb_ctxts;
1816         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1817         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1818
1819         for (i = 0; i < nr_ctxs; i++) {
1820                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1821
1822                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1823                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1824                 req.hash_mode_flags = vnic->hash_mode;
1825
1826                 req.hash_key_tbl_addr =
1827                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1828
1829                 req.ring_grp_tbl_addr =
1830                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1831                                          i * HW_HASH_INDEX_SIZE);
1832                 req.ring_table_pair_index = i;
1833                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1834
1835                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1836                                             BNXT_USE_CHIMP_MB);
1837
1838                 HWRM_CHECK_RESULT();
1839                 HWRM_UNLOCK();
1840         }
1841
1842         return rc;
1843 }
1844
1845 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1846                            struct bnxt_vnic_info *vnic)
1847 {
1848         int rc = 0;
1849         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1850         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1851
1852         if (!vnic->rss_table)
1853                 return 0;
1854
1855         if (BNXT_CHIP_THOR(bp))
1856                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1857
1858         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1859
1860         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1861         req.hash_mode_flags = vnic->hash_mode;
1862
1863         req.ring_grp_tbl_addr =
1864             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1865         req.hash_key_tbl_addr =
1866             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1867         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1868         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1869
1870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1871
1872         HWRM_CHECK_RESULT();
1873         HWRM_UNLOCK();
1874
1875         return rc;
1876 }
1877
1878 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1879                         struct bnxt_vnic_info *vnic)
1880 {
1881         int rc = 0;
1882         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1883         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1884         uint16_t size;
1885
1886         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1887                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1888                 return rc;
1889         }
1890
1891         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1892
1893         req.flags = rte_cpu_to_le_32(
1894                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1895
1896         req.enables = rte_cpu_to_le_32(
1897                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1898
1899         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1900         size -= RTE_PKTMBUF_HEADROOM;
1901         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1902
1903         req.jumbo_thresh = rte_cpu_to_le_16(size);
1904         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1905
1906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1907
1908         HWRM_CHECK_RESULT();
1909         HWRM_UNLOCK();
1910
1911         return rc;
1912 }
1913
1914 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1915                         struct bnxt_vnic_info *vnic, bool enable)
1916 {
1917         int rc = 0;
1918         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1919         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1920
1921         if (BNXT_CHIP_THOR(bp))
1922                 return 0;
1923
1924         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1925
1926         if (enable) {
1927                 req.enables = rte_cpu_to_le_32(
1928                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1929                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1930                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1931                 req.flags = rte_cpu_to_le_32(
1932                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1933                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1934                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1935                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1936                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1937                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1938                 req.max_agg_segs = rte_cpu_to_le_16(5);
1939                 req.max_aggs =
1940                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1941                 req.min_agg_len = rte_cpu_to_le_32(512);
1942         }
1943         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1944
1945         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1946
1947         HWRM_CHECK_RESULT();
1948         HWRM_UNLOCK();
1949
1950         return rc;
1951 }
1952
1953 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1954 {
1955         struct hwrm_func_cfg_input req = {0};
1956         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1957         int rc;
1958
1959         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1960         req.enables = rte_cpu_to_le_32(
1961                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1962         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1963         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1964
1965         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1966
1967         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1968         HWRM_CHECK_RESULT();
1969         HWRM_UNLOCK();
1970
1971         bp->pf.vf_info[vf].random_mac = false;
1972
1973         return rc;
1974 }
1975
1976 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1977                                   uint64_t *dropped)
1978 {
1979         int rc = 0;
1980         struct hwrm_func_qstats_input req = {.req_type = 0};
1981         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1982
1983         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1984
1985         req.fid = rte_cpu_to_le_16(fid);
1986
1987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1988
1989         HWRM_CHECK_RESULT();
1990
1991         if (dropped)
1992                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1993
1994         HWRM_UNLOCK();
1995
1996         return rc;
1997 }
1998
1999 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2000                           struct rte_eth_stats *stats)
2001 {
2002         int rc = 0;
2003         struct hwrm_func_qstats_input req = {.req_type = 0};
2004         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2005
2006         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2007
2008         req.fid = rte_cpu_to_le_16(fid);
2009
2010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2011
2012         HWRM_CHECK_RESULT();
2013
2014         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2015         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2016         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2017         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2018         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2019         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2020
2021         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2022         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2023         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2024         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2025         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2026         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2027
2028         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2029         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2030         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2031
2032         HWRM_UNLOCK();
2033
2034         return rc;
2035 }
2036
2037 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2038 {
2039         int rc = 0;
2040         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2041         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2042
2043         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2044
2045         req.fid = rte_cpu_to_le_16(fid);
2046
2047         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2048
2049         HWRM_CHECK_RESULT();
2050         HWRM_UNLOCK();
2051
2052         return rc;
2053 }
2054
2055 /*
2056  * HWRM utility functions
2057  */
2058
2059 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2060 {
2061         unsigned int i;
2062         int rc = 0;
2063
2064         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2065                 struct bnxt_tx_queue *txq;
2066                 struct bnxt_rx_queue *rxq;
2067                 struct bnxt_cp_ring_info *cpr;
2068
2069                 if (i >= bp->rx_cp_nr_rings) {
2070                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2071                         cpr = txq->cp_ring;
2072                 } else {
2073                         rxq = bp->rx_queues[i];
2074                         cpr = rxq->cp_ring;
2075                 }
2076
2077                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2078                 if (rc)
2079                         return rc;
2080         }
2081         return 0;
2082 }
2083
2084 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2085 {
2086         int rc;
2087         unsigned int i;
2088         struct bnxt_cp_ring_info *cpr;
2089
2090         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2091
2092                 if (i >= bp->rx_cp_nr_rings) {
2093                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2094                 } else {
2095                         cpr = bp->rx_queues[i]->cp_ring;
2096                         if (BNXT_HAS_RING_GRPS(bp))
2097                                 bp->grp_info[i].fw_stats_ctx = -1;
2098                 }
2099                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2100                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2101                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2102                         if (rc)
2103                                 return rc;
2104                 }
2105         }
2106         return 0;
2107 }
2108
2109 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2110 {
2111         unsigned int i;
2112         int rc = 0;
2113
2114         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2115                 struct bnxt_tx_queue *txq;
2116                 struct bnxt_rx_queue *rxq;
2117                 struct bnxt_cp_ring_info *cpr;
2118
2119                 if (i >= bp->rx_cp_nr_rings) {
2120                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2121                         cpr = txq->cp_ring;
2122                 } else {
2123                         rxq = bp->rx_queues[i];
2124                         cpr = rxq->cp_ring;
2125                 }
2126
2127                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2128
2129                 if (rc)
2130                         return rc;
2131         }
2132         return rc;
2133 }
2134
2135 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2136 {
2137         uint16_t idx;
2138         uint32_t rc = 0;
2139
2140         if (!BNXT_HAS_RING_GRPS(bp))
2141                 return 0;
2142
2143         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2144
2145                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2146                         continue;
2147
2148                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2149
2150                 if (rc)
2151                         return rc;
2152         }
2153         return rc;
2154 }
2155
2156 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2157 {
2158         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2159
2160         bnxt_hwrm_ring_free(bp, cp_ring,
2161                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2162         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2163         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2164                                      sizeof(*cpr->cp_desc_ring));
2165         cpr->cp_raw_cons = 0;
2166         cpr->valid = 0;
2167 }
2168
2169 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2170 {
2171         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2172
2173         bnxt_hwrm_ring_free(bp, cp_ring,
2174                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2175         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2176         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2177                         sizeof(*cpr->cp_desc_ring));
2178         cpr->cp_raw_cons = 0;
2179         cpr->valid = 0;
2180 }
2181
2182 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2183 {
2184         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2185         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2186         struct bnxt_ring *ring = rxr->rx_ring_struct;
2187         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2188
2189         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2190                 bnxt_hwrm_ring_free(bp, ring,
2191                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2192                 ring->fw_ring_id = INVALID_HW_RING_ID;
2193                 if (BNXT_HAS_RING_GRPS(bp))
2194                         bp->grp_info[queue_index].rx_fw_ring_id =
2195                                                         INVALID_HW_RING_ID;
2196                 memset(rxr->rx_desc_ring, 0,
2197                        rxr->rx_ring_struct->ring_size *
2198                        sizeof(*rxr->rx_desc_ring));
2199                 memset(rxr->rx_buf_ring, 0,
2200                        rxr->rx_ring_struct->ring_size *
2201                        sizeof(*rxr->rx_buf_ring));
2202                 rxr->rx_prod = 0;
2203         }
2204         ring = rxr->ag_ring_struct;
2205         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2206                 bnxt_hwrm_ring_free(bp, ring,
2207                                     BNXT_CHIP_THOR(bp) ?
2208                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2209                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2210                 ring->fw_ring_id = INVALID_HW_RING_ID;
2211                 memset(rxr->ag_buf_ring, 0,
2212                        rxr->ag_ring_struct->ring_size *
2213                        sizeof(*rxr->ag_buf_ring));
2214                 rxr->ag_prod = 0;
2215                 if (BNXT_HAS_RING_GRPS(bp))
2216                         bp->grp_info[queue_index].ag_fw_ring_id =
2217                                                         INVALID_HW_RING_ID;
2218         }
2219         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2220                 bnxt_free_cp_ring(bp, cpr);
2221                 if (rxq->nq_ring)
2222                         bnxt_free_nq_ring(bp, rxq->nq_ring);
2223         }
2224
2225         if (BNXT_HAS_RING_GRPS(bp))
2226                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2227 }
2228
2229 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2230 {
2231         unsigned int i;
2232
2233         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2234                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2235                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2236                 struct bnxt_ring *ring = txr->tx_ring_struct;
2237                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2238
2239                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2240                         bnxt_hwrm_ring_free(bp, ring,
2241                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2242                         ring->fw_ring_id = INVALID_HW_RING_ID;
2243                         memset(txr->tx_desc_ring, 0,
2244                                         txr->tx_ring_struct->ring_size *
2245                                         sizeof(*txr->tx_desc_ring));
2246                         memset(txr->tx_buf_ring, 0,
2247                                         txr->tx_ring_struct->ring_size *
2248                                         sizeof(*txr->tx_buf_ring));
2249                         txr->tx_prod = 0;
2250                         txr->tx_cons = 0;
2251                 }
2252                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2253                         bnxt_free_cp_ring(bp, cpr);
2254                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2255                         if (txq->nq_ring)
2256                                 bnxt_free_nq_ring(bp, txq->nq_ring);
2257                 }
2258         }
2259
2260         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2261                 bnxt_free_hwrm_rx_ring(bp, i);
2262
2263         return 0;
2264 }
2265
2266 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2267 {
2268         uint16_t i;
2269         uint32_t rc = 0;
2270
2271         if (!BNXT_HAS_RING_GRPS(bp))
2272                 return 0;
2273
2274         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2275                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2276                 if (rc)
2277                         return rc;
2278         }
2279         return rc;
2280 }
2281
2282 void bnxt_free_hwrm_resources(struct bnxt *bp)
2283 {
2284         /* Release memzone */
2285         rte_free(bp->hwrm_cmd_resp_addr);
2286         rte_free(bp->hwrm_short_cmd_req_addr);
2287         bp->hwrm_cmd_resp_addr = NULL;
2288         bp->hwrm_short_cmd_req_addr = NULL;
2289         bp->hwrm_cmd_resp_dma_addr = 0;
2290         bp->hwrm_short_cmd_req_dma_addr = 0;
2291 }
2292
2293 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2294 {
2295         struct rte_pci_device *pdev = bp->pdev;
2296         char type[RTE_MEMZONE_NAMESIZE];
2297
2298         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2299                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2300         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2301         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2302         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2303         if (bp->hwrm_cmd_resp_addr == NULL)
2304                 return -ENOMEM;
2305         bp->hwrm_cmd_resp_dma_addr =
2306                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2307         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2308                 PMD_DRV_LOG(ERR,
2309                         "unable to map response address to physical memory\n");
2310                 return -ENOMEM;
2311         }
2312         rte_spinlock_init(&bp->hwrm_lock);
2313
2314         return 0;
2315 }
2316
2317 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2318 {
2319         struct bnxt_filter_info *filter;
2320         int rc = 0;
2321
2322         STAILQ_FOREACH(filter, &vnic->filter, next) {
2323                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2324                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2325                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2326                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2327                 else
2328                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2329                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2330                 //if (rc)
2331                         //break;
2332         }
2333         return rc;
2334 }
2335
2336 static int
2337 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2338 {
2339         struct bnxt_filter_info *filter;
2340         struct rte_flow *flow;
2341         int rc = 0;
2342
2343         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2344                 filter = flow->filter;
2345                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2346                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2347                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2348                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2349                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2350                 else
2351                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2352
2353                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2354                 rte_free(flow);
2355                 //if (rc)
2356                         //break;
2357         }
2358         return rc;
2359 }
2360
2361 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2362 {
2363         struct bnxt_filter_info *filter;
2364         int rc = 0;
2365
2366         STAILQ_FOREACH(filter, &vnic->filter, next) {
2367                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2368                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2369                                                      filter);
2370                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2371                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2372                                                          filter);
2373                 else
2374                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2375                                                      filter);
2376                 if (rc)
2377                         break;
2378         }
2379         return rc;
2380 }
2381
2382 void bnxt_free_tunnel_ports(struct bnxt *bp)
2383 {
2384         if (bp->vxlan_port_cnt)
2385                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2386                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2387         bp->vxlan_port = 0;
2388         if (bp->geneve_port_cnt)
2389                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2390                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2391         bp->geneve_port = 0;
2392 }
2393
2394 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2395 {
2396         int i, j;
2397
2398         if (bp->vnic_info == NULL)
2399                 return;
2400
2401         /*
2402          * Cleanup VNICs in reverse order, to make sure the L2 filter
2403          * from vnic0 is last to be cleaned up.
2404          */
2405         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2406                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2407
2408                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2409                         PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2410                         return;
2411                 }
2412
2413                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2414
2415                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2416
2417                 if (BNXT_CHIP_THOR(bp)) {
2418                         for (j = 0; j < vnic->num_lb_ctxts; j++) {
2419                                 bnxt_hwrm_vnic_ctx_free(bp, vnic,
2420                                                         vnic->fw_grp_ids[j]);
2421                                 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2422                         }
2423                         vnic->num_lb_ctxts = 0;
2424                 } else {
2425                         bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2426                         vnic->rss_rule = INVALID_HW_RING_ID;
2427                 }
2428
2429                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2430
2431                 bnxt_hwrm_vnic_free(bp, vnic);
2432
2433                 rte_free(vnic->fw_grp_ids);
2434         }
2435         /* Ring resources */
2436         bnxt_free_all_hwrm_rings(bp);
2437         bnxt_free_all_hwrm_ring_grps(bp);
2438         bnxt_free_all_hwrm_stat_ctxs(bp);
2439         bnxt_free_tunnel_ports(bp);
2440 }
2441
2442 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2443 {
2444         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2445
2446         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2447                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2448
2449         switch (conf_link_speed) {
2450         case ETH_LINK_SPEED_10M_HD:
2451         case ETH_LINK_SPEED_100M_HD:
2452                 /* FALLTHROUGH */
2453                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2454         }
2455         return hw_link_duplex;
2456 }
2457
2458 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2459 {
2460         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2461 }
2462
2463 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2464 {
2465         uint16_t eth_link_speed = 0;
2466
2467         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2468                 return ETH_LINK_SPEED_AUTONEG;
2469
2470         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2471         case ETH_LINK_SPEED_100M:
2472         case ETH_LINK_SPEED_100M_HD:
2473                 /* FALLTHROUGH */
2474                 eth_link_speed =
2475                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2476                 break;
2477         case ETH_LINK_SPEED_1G:
2478                 eth_link_speed =
2479                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2480                 break;
2481         case ETH_LINK_SPEED_2_5G:
2482                 eth_link_speed =
2483                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2484                 break;
2485         case ETH_LINK_SPEED_10G:
2486                 eth_link_speed =
2487                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2488                 break;
2489         case ETH_LINK_SPEED_20G:
2490                 eth_link_speed =
2491                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2492                 break;
2493         case ETH_LINK_SPEED_25G:
2494                 eth_link_speed =
2495                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2496                 break;
2497         case ETH_LINK_SPEED_40G:
2498                 eth_link_speed =
2499                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2500                 break;
2501         case ETH_LINK_SPEED_50G:
2502                 eth_link_speed =
2503                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2504                 break;
2505         case ETH_LINK_SPEED_100G:
2506                 eth_link_speed =
2507                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2508                 break;
2509         default:
2510                 PMD_DRV_LOG(ERR,
2511                         "Unsupported link speed %d; default to AUTO\n",
2512                         conf_link_speed);
2513                 break;
2514         }
2515         return eth_link_speed;
2516 }
2517
2518 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2519                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2520                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2521                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2522
2523 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2524 {
2525         uint32_t one_speed;
2526
2527         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2528                 return 0;
2529
2530         if (link_speed & ETH_LINK_SPEED_FIXED) {
2531                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2532
2533                 if (one_speed & (one_speed - 1)) {
2534                         PMD_DRV_LOG(ERR,
2535                                 "Invalid advertised speeds (%u) for port %u\n",
2536                                 link_speed, port_id);
2537                         return -EINVAL;
2538                 }
2539                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2540                         PMD_DRV_LOG(ERR,
2541                                 "Unsupported advertised speed (%u) for port %u\n",
2542                                 link_speed, port_id);
2543                         return -EINVAL;
2544                 }
2545         } else {
2546                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2547                         PMD_DRV_LOG(ERR,
2548                                 "Unsupported advertised speeds (%u) for port %u\n",
2549                                 link_speed, port_id);
2550                         return -EINVAL;
2551                 }
2552         }
2553         return 0;
2554 }
2555
2556 static uint16_t
2557 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2558 {
2559         uint16_t ret = 0;
2560
2561         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2562                 if (bp->link_info.support_speeds)
2563                         return bp->link_info.support_speeds;
2564                 link_speed = BNXT_SUPPORTED_SPEEDS;
2565         }
2566
2567         if (link_speed & ETH_LINK_SPEED_100M)
2568                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2569         if (link_speed & ETH_LINK_SPEED_100M_HD)
2570                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2571         if (link_speed & ETH_LINK_SPEED_1G)
2572                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2573         if (link_speed & ETH_LINK_SPEED_2_5G)
2574                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2575         if (link_speed & ETH_LINK_SPEED_10G)
2576                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2577         if (link_speed & ETH_LINK_SPEED_20G)
2578                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2579         if (link_speed & ETH_LINK_SPEED_25G)
2580                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2581         if (link_speed & ETH_LINK_SPEED_40G)
2582                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2583         if (link_speed & ETH_LINK_SPEED_50G)
2584                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2585         if (link_speed & ETH_LINK_SPEED_100G)
2586                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2587         return ret;
2588 }
2589
2590 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2591 {
2592         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2593
2594         switch (hw_link_speed) {
2595         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2596                 eth_link_speed = ETH_SPEED_NUM_100M;
2597                 break;
2598         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2599                 eth_link_speed = ETH_SPEED_NUM_1G;
2600                 break;
2601         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2602                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2603                 break;
2604         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2605                 eth_link_speed = ETH_SPEED_NUM_10G;
2606                 break;
2607         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2608                 eth_link_speed = ETH_SPEED_NUM_20G;
2609                 break;
2610         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2611                 eth_link_speed = ETH_SPEED_NUM_25G;
2612                 break;
2613         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2614                 eth_link_speed = ETH_SPEED_NUM_40G;
2615                 break;
2616         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2617                 eth_link_speed = ETH_SPEED_NUM_50G;
2618                 break;
2619         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2620                 eth_link_speed = ETH_SPEED_NUM_100G;
2621                 break;
2622         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2623         default:
2624                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2625                         hw_link_speed);
2626                 break;
2627         }
2628         return eth_link_speed;
2629 }
2630
2631 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2632 {
2633         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2634
2635         switch (hw_link_duplex) {
2636         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2637         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2638                 /* FALLTHROUGH */
2639                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2640                 break;
2641         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2642                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2643                 break;
2644         default:
2645                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2646                         hw_link_duplex);
2647                 break;
2648         }
2649         return eth_link_duplex;
2650 }
2651
2652 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2653 {
2654         int rc = 0;
2655         struct bnxt_link_info *link_info = &bp->link_info;
2656
2657         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2658         if (rc) {
2659                 PMD_DRV_LOG(ERR,
2660                         "Get link config failed with rc %d\n", rc);
2661                 goto exit;
2662         }
2663         if (link_info->link_speed)
2664                 link->link_speed =
2665                         bnxt_parse_hw_link_speed(link_info->link_speed);
2666         else
2667                 link->link_speed = ETH_SPEED_NUM_NONE;
2668         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2669         link->link_status = link_info->link_up;
2670         link->link_autoneg = link_info->auto_mode ==
2671                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2672                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2673 exit:
2674         return rc;
2675 }
2676
2677 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2678 {
2679         int rc = 0;
2680         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2681         struct bnxt_link_info link_req;
2682         uint16_t speed, autoneg;
2683
2684         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2685                 return 0;
2686
2687         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2688                         bp->eth_dev->data->port_id);
2689         if (rc)
2690                 goto error;
2691
2692         memset(&link_req, 0, sizeof(link_req));
2693         link_req.link_up = link_up;
2694         if (!link_up)
2695                 goto port_phy_cfg;
2696
2697         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2698         if (BNXT_CHIP_THOR(bp) &&
2699             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2700                 /* 40G is not supported as part of media auto detect.
2701                  * The speed should be forced and autoneg disabled
2702                  * to configure 40G speed.
2703                  */
2704                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2705                 autoneg = 0;
2706         }
2707
2708         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2709         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2710         /* Autoneg can be done only when the FW allows.
2711          * When user configures fixed speed of 40G and later changes to
2712          * any other speed, auto_link_speed/force_link_speed is still set
2713          * to 40G until link comes up at new speed.
2714          */
2715         if (autoneg == 1 &&
2716             !(!BNXT_CHIP_THOR(bp) &&
2717               (bp->link_info.auto_link_speed ||
2718                bp->link_info.force_link_speed))) {
2719                 link_req.phy_flags |=
2720                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2721                 link_req.auto_link_speed_mask =
2722                         bnxt_parse_eth_link_speed_mask(bp,
2723                                                        dev_conf->link_speeds);
2724         } else {
2725                 if (bp->link_info.phy_type ==
2726                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2727                     bp->link_info.phy_type ==
2728                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2729                     bp->link_info.media_type ==
2730                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2731                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2732                         return -EINVAL;
2733                 }
2734
2735                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2736                 /* If user wants a particular speed try that first. */
2737                 if (speed)
2738                         link_req.link_speed = speed;
2739                 else if (bp->link_info.force_link_speed)
2740                         link_req.link_speed = bp->link_info.force_link_speed;
2741                 else
2742                         link_req.link_speed = bp->link_info.auto_link_speed;
2743         }
2744         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2745         link_req.auto_pause = bp->link_info.auto_pause;
2746         link_req.force_pause = bp->link_info.force_pause;
2747
2748 port_phy_cfg:
2749         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2750         if (rc) {
2751                 PMD_DRV_LOG(ERR,
2752                         "Set link config failed with rc %d\n", rc);
2753         }
2754
2755 error:
2756         return rc;
2757 }
2758
2759 /* JIRA 22088 */
2760 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2761 {
2762         struct hwrm_func_qcfg_input req = {0};
2763         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2764         uint16_t flags;
2765         int rc = 0;
2766
2767         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2768         req.fid = rte_cpu_to_le_16(0xffff);
2769
2770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2771
2772         HWRM_CHECK_RESULT();
2773
2774         /* Hard Coded.. 0xfff VLAN ID mask */
2775         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2776         flags = rte_le_to_cpu_16(resp->flags);
2777         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2778                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2779
2780         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2781                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2782                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2783         } else if (BNXT_VF(bp) &&
2784                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2785                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2786                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2787         }
2788
2789         if (mtu)
2790                 *mtu = resp->mtu;
2791
2792         switch (resp->port_partition_type) {
2793         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2794         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2795         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2796                 /* FALLTHROUGH */
2797                 bp->port_partition_type = resp->port_partition_type;
2798                 break;
2799         default:
2800                 bp->port_partition_type = 0;
2801                 break;
2802         }
2803
2804         HWRM_UNLOCK();
2805
2806         return rc;
2807 }
2808
2809 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2810                                    struct hwrm_func_qcaps_output *qcaps)
2811 {
2812         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2813         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2814                sizeof(qcaps->mac_address));
2815         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2816         qcaps->max_rx_rings = fcfg->num_rx_rings;
2817         qcaps->max_tx_rings = fcfg->num_tx_rings;
2818         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2819         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2820         qcaps->max_vfs = 0;
2821         qcaps->first_vf_id = 0;
2822         qcaps->max_vnics = fcfg->num_vnics;
2823         qcaps->max_decap_records = 0;
2824         qcaps->max_encap_records = 0;
2825         qcaps->max_tx_wm_flows = 0;
2826         qcaps->max_tx_em_flows = 0;
2827         qcaps->max_rx_wm_flows = 0;
2828         qcaps->max_rx_em_flows = 0;
2829         qcaps->max_flow_id = 0;
2830         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2831         qcaps->max_sp_tx_rings = 0;
2832         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2833 }
2834
2835 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2836 {
2837         struct hwrm_func_cfg_input req = {0};
2838         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2839         uint32_t enables;
2840         int rc;
2841
2842         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2843                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2844                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2845                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2846                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2847                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2848                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2849                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2850                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2851
2852         if (BNXT_HAS_RING_GRPS(bp)) {
2853                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2854                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2855         } else if (BNXT_HAS_NQ(bp)) {
2856                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2857                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2858         }
2859
2860         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2861         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2862         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2863                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2864                                    BNXT_NUM_VLANS);
2865         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2866         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2867         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2868         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2869         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2870         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2871         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2872         req.fid = rte_cpu_to_le_16(0xffff);
2873         req.enables = rte_cpu_to_le_32(enables);
2874
2875         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2876
2877         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2878
2879         HWRM_CHECK_RESULT();
2880         HWRM_UNLOCK();
2881
2882         return rc;
2883 }
2884
2885 static void populate_vf_func_cfg_req(struct bnxt *bp,
2886                                      struct hwrm_func_cfg_input *req,
2887                                      int num_vfs)
2888 {
2889         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2890                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2891                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2892                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2893                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2894                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2895                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2896                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2897                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2898                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2899
2900         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2901                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2902                                     BNXT_NUM_VLANS);
2903         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2904                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2905                                     BNXT_NUM_VLANS);
2906         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2907                                                 (num_vfs + 1));
2908         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2909         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2910                                                (num_vfs + 1));
2911         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2912         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2913         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2914         /* TODO: For now, do not support VMDq/RFS on VFs. */
2915         req->num_vnics = rte_cpu_to_le_16(1);
2916         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2917                                                  (num_vfs + 1));
2918 }
2919
2920 static void add_random_mac_if_needed(struct bnxt *bp,
2921                                      struct hwrm_func_cfg_input *cfg_req,
2922                                      int vf)
2923 {
2924         struct rte_ether_addr mac;
2925
2926         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2927                 return;
2928
2929         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2930                 cfg_req->enables |=
2931                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2932                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2933                 bp->pf.vf_info[vf].random_mac = true;
2934         } else {
2935                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2936                         RTE_ETHER_ADDR_LEN);
2937         }
2938 }
2939
2940 static void reserve_resources_from_vf(struct bnxt *bp,
2941                                       struct hwrm_func_cfg_input *cfg_req,
2942                                       int vf)
2943 {
2944         struct hwrm_func_qcaps_input req = {0};
2945         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2946         int rc;
2947
2948         /* Get the actual allocated values now */
2949         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2950         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2951         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2952
2953         if (rc) {
2954                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2955                 copy_func_cfg_to_qcaps(cfg_req, resp);
2956         } else if (resp->error_code) {
2957                 rc = rte_le_to_cpu_16(resp->error_code);
2958                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2959                 copy_func_cfg_to_qcaps(cfg_req, resp);
2960         }
2961
2962         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2963         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2964         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2965         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2966         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2967         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2968         /*
2969          * TODO: While not supporting VMDq with VFs, max_vnics is always
2970          * forced to 1 in this case
2971          */
2972         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2973         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2974
2975         HWRM_UNLOCK();
2976 }
2977
2978 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2979 {
2980         struct hwrm_func_qcfg_input req = {0};
2981         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2982         int rc;
2983
2984         /* Check for zero MAC address */
2985         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2986         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2988         HWRM_CHECK_RESULT();
2989         rc = rte_le_to_cpu_16(resp->vlan);
2990
2991         HWRM_UNLOCK();
2992
2993         return rc;
2994 }
2995
2996 static int update_pf_resource_max(struct bnxt *bp)
2997 {
2998         struct hwrm_func_qcfg_input req = {0};
2999         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3000         int rc;
3001
3002         /* And copy the allocated numbers into the pf struct */
3003         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3004         req.fid = rte_cpu_to_le_16(0xffff);
3005         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3006         HWRM_CHECK_RESULT();
3007
3008         /* Only TX ring value reflects actual allocation? TODO */
3009         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3010         bp->pf.evb_mode = resp->evb_mode;
3011
3012         HWRM_UNLOCK();
3013
3014         return rc;
3015 }
3016
3017 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3018 {
3019         int rc;
3020
3021         if (!BNXT_PF(bp)) {
3022                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3023                 return -EINVAL;
3024         }
3025
3026         rc = bnxt_hwrm_func_qcaps(bp);
3027         if (rc)
3028                 return rc;
3029
3030         bp->pf.func_cfg_flags &=
3031                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3032                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3033         bp->pf.func_cfg_flags |=
3034                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3035         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3036         rc = __bnxt_hwrm_func_qcaps(bp);
3037         return rc;
3038 }
3039
3040 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3041 {
3042         struct hwrm_func_cfg_input req = {0};
3043         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3044         int i;
3045         size_t sz;
3046         int rc = 0;
3047         size_t req_buf_sz;
3048
3049         if (!BNXT_PF(bp)) {
3050                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3051                 return -EINVAL;
3052         }
3053
3054         rc = bnxt_hwrm_func_qcaps(bp);
3055
3056         if (rc)
3057                 return rc;
3058
3059         bp->pf.active_vfs = num_vfs;
3060
3061         /*
3062          * First, configure the PF to only use one TX ring.  This ensures that
3063          * there are enough rings for all VFs.
3064          *
3065          * If we don't do this, when we call func_alloc() later, we will lock
3066          * extra rings to the PF that won't be available during func_cfg() of
3067          * the VFs.
3068          *
3069          * This has been fixed with firmware versions above 20.6.54
3070          */
3071         bp->pf.func_cfg_flags &=
3072                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3073                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3074         bp->pf.func_cfg_flags |=
3075                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3076         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3077         if (rc)
3078                 return rc;
3079
3080         /*
3081          * Now, create and register a buffer to hold forwarded VF requests
3082          */
3083         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3084         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3085                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3086         if (bp->pf.vf_req_buf == NULL) {
3087                 rc = -ENOMEM;
3088                 goto error_free;
3089         }
3090         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3091                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3092         for (i = 0; i < num_vfs; i++)
3093                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3094                                         (i * HWRM_MAX_REQ_LEN);
3095
3096         rc = bnxt_hwrm_func_buf_rgtr(bp);
3097         if (rc)
3098                 goto error_free;
3099
3100         populate_vf_func_cfg_req(bp, &req, num_vfs);
3101
3102         bp->pf.active_vfs = 0;
3103         for (i = 0; i < num_vfs; i++) {
3104                 add_random_mac_if_needed(bp, &req, i);
3105
3106                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3107                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3108                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3109                 rc = bnxt_hwrm_send_message(bp,
3110                                             &req,
3111                                             sizeof(req),
3112                                             BNXT_USE_CHIMP_MB);
3113
3114                 /* Clear enable flag for next pass */
3115                 req.enables &= ~rte_cpu_to_le_32(
3116                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3117
3118                 if (rc || resp->error_code) {
3119                         PMD_DRV_LOG(ERR,
3120                                 "Failed to initizlie VF %d\n", i);
3121                         PMD_DRV_LOG(ERR,
3122                                 "Not all VFs available. (%d, %d)\n",
3123                                 rc, resp->error_code);
3124                         HWRM_UNLOCK();
3125                         break;
3126                 }
3127
3128                 HWRM_UNLOCK();
3129
3130                 reserve_resources_from_vf(bp, &req, i);
3131                 bp->pf.active_vfs++;
3132                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3133         }
3134
3135         /*
3136          * Now configure the PF to use "the rest" of the resources
3137          * We're using STD_TX_RING_MODE here though which will limit the TX
3138          * rings.  This will allow QoS to function properly.  Not setting this
3139          * will cause PF rings to break bandwidth settings.
3140          */
3141         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3142         if (rc)
3143                 goto error_free;
3144
3145         rc = update_pf_resource_max(bp);
3146         if (rc)
3147                 goto error_free;
3148
3149         return rc;
3150
3151 error_free:
3152         bnxt_hwrm_func_buf_unrgtr(bp);
3153         return rc;
3154 }
3155
3156 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3157 {
3158         struct hwrm_func_cfg_input req = {0};
3159         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3160         int rc;
3161
3162         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3163
3164         req.fid = rte_cpu_to_le_16(0xffff);
3165         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3166         req.evb_mode = bp->pf.evb_mode;
3167
3168         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3169         HWRM_CHECK_RESULT();
3170         HWRM_UNLOCK();
3171
3172         return rc;
3173 }
3174
3175 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3176                                 uint8_t tunnel_type)
3177 {
3178         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3179         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3180         int rc = 0;
3181
3182         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3183         req.tunnel_type = tunnel_type;
3184         req.tunnel_dst_port_val = port;
3185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3186         HWRM_CHECK_RESULT();
3187
3188         switch (tunnel_type) {
3189         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3190                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3191                 bp->vxlan_port = port;
3192                 break;
3193         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3194                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3195                 bp->geneve_port = port;
3196                 break;
3197         default:
3198                 break;
3199         }
3200
3201         HWRM_UNLOCK();
3202
3203         return rc;
3204 }
3205
3206 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3207                                 uint8_t tunnel_type)
3208 {
3209         struct hwrm_tunnel_dst_port_free_input req = {0};
3210         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3211         int rc = 0;
3212
3213         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3214
3215         req.tunnel_type = tunnel_type;
3216         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3218
3219         HWRM_CHECK_RESULT();
3220         HWRM_UNLOCK();
3221
3222         return rc;
3223 }
3224
3225 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3226                                         uint32_t flags)
3227 {
3228         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3229         struct hwrm_func_cfg_input req = {0};
3230         int rc;
3231
3232         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3233
3234         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3235         req.flags = rte_cpu_to_le_32(flags);
3236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3237
3238         HWRM_CHECK_RESULT();
3239         HWRM_UNLOCK();
3240
3241         return rc;
3242 }
3243
3244 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3245 {
3246         uint32_t *flag = flagp;
3247
3248         vnic->flags = *flag;
3249 }
3250
3251 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3252 {
3253         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3254 }
3255
3256 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3257 {
3258         int rc = 0;
3259         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3260         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3261
3262         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3263
3264         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3265         req.req_buf_page_size = rte_cpu_to_le_16(
3266                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3267         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3268         req.req_buf_page_addr0 =
3269                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3270         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3271                 PMD_DRV_LOG(ERR,
3272                         "unable to map buffer address to physical memory\n");
3273                 return -ENOMEM;
3274         }
3275
3276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3277
3278         HWRM_CHECK_RESULT();
3279         HWRM_UNLOCK();
3280
3281         return rc;
3282 }
3283
3284 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3285 {
3286         int rc = 0;
3287         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3288         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3289
3290         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3291                 return 0;
3292
3293         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3294
3295         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3296
3297         HWRM_CHECK_RESULT();
3298         HWRM_UNLOCK();
3299
3300         return rc;
3301 }
3302
3303 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3304 {
3305         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3306         struct hwrm_func_cfg_input req = {0};
3307         int rc;
3308
3309         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3310
3311         req.fid = rte_cpu_to_le_16(0xffff);
3312         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3313         req.enables = rte_cpu_to_le_32(
3314                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3315         req.async_event_cr = rte_cpu_to_le_16(
3316                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3317         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3318
3319         HWRM_CHECK_RESULT();
3320         HWRM_UNLOCK();
3321
3322         return rc;
3323 }
3324
3325 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3326 {
3327         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3328         struct hwrm_func_vf_cfg_input req = {0};
3329         int rc;
3330
3331         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3332
3333         req.enables = rte_cpu_to_le_32(
3334                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3335         req.async_event_cr = rte_cpu_to_le_16(
3336                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3337         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3338
3339         HWRM_CHECK_RESULT();
3340         HWRM_UNLOCK();
3341
3342         return rc;
3343 }
3344
3345 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3346 {
3347         struct hwrm_func_cfg_input req = {0};
3348         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3349         uint16_t dflt_vlan, fid;
3350         uint32_t func_cfg_flags;
3351         int rc = 0;
3352
3353         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3354
3355         if (is_vf) {
3356                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3357                 fid = bp->pf.vf_info[vf].fid;
3358                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3359         } else {
3360                 fid = rte_cpu_to_le_16(0xffff);
3361                 func_cfg_flags = bp->pf.func_cfg_flags;
3362                 dflt_vlan = bp->vlan;
3363         }
3364
3365         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3366         req.fid = rte_cpu_to_le_16(fid);
3367         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3368         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3369
3370         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3371
3372         HWRM_CHECK_RESULT();
3373         HWRM_UNLOCK();
3374
3375         return rc;
3376 }
3377
3378 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3379                         uint16_t max_bw, uint16_t enables)
3380 {
3381         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3382         struct hwrm_func_cfg_input req = {0};
3383         int rc;
3384
3385         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3386
3387         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3388         req.enables |= rte_cpu_to_le_32(enables);
3389         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3390         req.max_bw = rte_cpu_to_le_32(max_bw);
3391         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3392
3393         HWRM_CHECK_RESULT();
3394         HWRM_UNLOCK();
3395
3396         return rc;
3397 }
3398
3399 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3400 {
3401         struct hwrm_func_cfg_input req = {0};
3402         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3403         int rc = 0;
3404
3405         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3406
3407         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3408         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3409         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3410         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3411
3412         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3413
3414         HWRM_CHECK_RESULT();
3415         HWRM_UNLOCK();
3416
3417         return rc;
3418 }
3419
3420 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3421 {
3422         int rc;
3423
3424         if (BNXT_PF(bp))
3425                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3426         else
3427                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3428
3429         return rc;
3430 }
3431
3432 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3433                               void *encaped, size_t ec_size)
3434 {
3435         int rc = 0;
3436         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3437         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3438
3439         if (ec_size > sizeof(req.encap_request))
3440                 return -1;
3441
3442         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3443
3444         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3445         memcpy(req.encap_request, encaped, ec_size);
3446
3447         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3448
3449         HWRM_CHECK_RESULT();
3450         HWRM_UNLOCK();
3451
3452         return rc;
3453 }
3454
3455 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3456                                        struct rte_ether_addr *mac)
3457 {
3458         struct hwrm_func_qcfg_input req = {0};
3459         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3460         int rc;
3461
3462         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3463
3464         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3465         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3466
3467         HWRM_CHECK_RESULT();
3468
3469         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3470
3471         HWRM_UNLOCK();
3472
3473         return rc;
3474 }
3475
3476 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3477                             void *encaped, size_t ec_size)
3478 {
3479         int rc = 0;
3480         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3481         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3482
3483         if (ec_size > sizeof(req.encap_request))
3484                 return -1;
3485
3486         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3487
3488         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3489         memcpy(req.encap_request, encaped, ec_size);
3490
3491         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3492
3493         HWRM_CHECK_RESULT();
3494         HWRM_UNLOCK();
3495
3496         return rc;
3497 }
3498
3499 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3500                          struct rte_eth_stats *stats, uint8_t rx)
3501 {
3502         int rc = 0;
3503         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3504         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3505
3506         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3507
3508         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3509
3510         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3511
3512         HWRM_CHECK_RESULT();
3513
3514         if (rx) {
3515                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3516                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3517                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3518                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3519                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3520                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3521                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3522                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3523         } else {
3524                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3525                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3526                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3527                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3528                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3529                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3530         }
3531
3532
3533         HWRM_UNLOCK();
3534
3535         return rc;
3536 }
3537
3538 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3539 {
3540         struct hwrm_port_qstats_input req = {0};
3541         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3542         struct bnxt_pf_info *pf = &bp->pf;
3543         int rc;
3544
3545         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3546
3547         req.port_id = rte_cpu_to_le_16(pf->port_id);
3548         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3549         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3550         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3551
3552         HWRM_CHECK_RESULT();
3553         HWRM_UNLOCK();
3554
3555         return rc;
3556 }
3557
3558 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3559 {
3560         struct hwrm_port_clr_stats_input req = {0};
3561         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3562         struct bnxt_pf_info *pf = &bp->pf;
3563         int rc;
3564
3565         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3566         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3567             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3568                 return 0;
3569
3570         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3571
3572         req.port_id = rte_cpu_to_le_16(pf->port_id);
3573         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3574
3575         HWRM_CHECK_RESULT();
3576         HWRM_UNLOCK();
3577
3578         return rc;
3579 }
3580
3581 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3582 {
3583         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3584         struct hwrm_port_led_qcaps_input req = {0};
3585         int rc;
3586
3587         if (BNXT_VF(bp))
3588                 return 0;
3589
3590         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3591         req.port_id = bp->pf.port_id;
3592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3593
3594         HWRM_CHECK_RESULT();
3595
3596         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3597                 unsigned int i;
3598
3599                 bp->num_leds = resp->num_leds;
3600                 memcpy(bp->leds, &resp->led0_id,
3601                         sizeof(bp->leds[0]) * bp->num_leds);
3602                 for (i = 0; i < bp->num_leds; i++) {
3603                         struct bnxt_led_info *led = &bp->leds[i];
3604
3605                         uint16_t caps = led->led_state_caps;
3606
3607                         if (!led->led_group_id ||
3608                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3609                                 bp->num_leds = 0;
3610                                 break;
3611                         }
3612                 }
3613         }
3614
3615         HWRM_UNLOCK();
3616
3617         return rc;
3618 }
3619
3620 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3621 {
3622         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3623         struct hwrm_port_led_cfg_input req = {0};
3624         struct bnxt_led_cfg *led_cfg;
3625         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3626         uint16_t duration = 0;
3627         int rc, i;
3628
3629         if (!bp->num_leds || BNXT_VF(bp))
3630                 return -EOPNOTSUPP;
3631
3632         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3633
3634         if (led_on) {
3635                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3636                 duration = rte_cpu_to_le_16(500);
3637         }
3638         req.port_id = bp->pf.port_id;
3639         req.num_leds = bp->num_leds;
3640         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3641         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3642                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3643                 led_cfg->led_id = bp->leds[i].led_id;
3644                 led_cfg->led_state = led_state;
3645                 led_cfg->led_blink_on = duration;
3646                 led_cfg->led_blink_off = duration;
3647                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3648         }
3649
3650         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3651
3652         HWRM_CHECK_RESULT();
3653         HWRM_UNLOCK();
3654
3655         return rc;
3656 }
3657
3658 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3659                                uint32_t *length)
3660 {
3661         int rc;
3662         struct hwrm_nvm_get_dir_info_input req = {0};
3663         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3664
3665         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3666
3667         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3668
3669         HWRM_CHECK_RESULT();
3670
3671         *entries = rte_le_to_cpu_32(resp->entries);
3672         *length = rte_le_to_cpu_32(resp->entry_length);
3673
3674         HWRM_UNLOCK();
3675         return rc;
3676 }
3677
3678 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3679 {
3680         int rc;
3681         uint32_t dir_entries;
3682         uint32_t entry_length;
3683         uint8_t *buf;
3684         size_t buflen;
3685         rte_iova_t dma_handle;
3686         struct hwrm_nvm_get_dir_entries_input req = {0};
3687         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3688
3689         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3690         if (rc != 0)
3691                 return rc;
3692
3693         *data++ = dir_entries;
3694         *data++ = entry_length;
3695         len -= 2;
3696         memset(data, 0xff, len);
3697
3698         buflen = dir_entries * entry_length;
3699         buf = rte_malloc("nvm_dir", buflen, 0);
3700         rte_mem_lock_page(buf);
3701         if (buf == NULL)
3702                 return -ENOMEM;
3703         dma_handle = rte_mem_virt2iova(buf);
3704         if (dma_handle == RTE_BAD_IOVA) {
3705                 PMD_DRV_LOG(ERR,
3706                         "unable to map response address to physical memory\n");
3707                 return -ENOMEM;
3708         }
3709         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3710         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3711         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3712
3713         if (rc == 0)
3714                 memcpy(data, buf, len > buflen ? buflen : len);
3715
3716         rte_free(buf);
3717         HWRM_CHECK_RESULT();
3718         HWRM_UNLOCK();
3719
3720         return rc;
3721 }
3722
3723 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3724                              uint32_t offset, uint32_t length,
3725                              uint8_t *data)
3726 {
3727         int rc;
3728         uint8_t *buf;
3729         rte_iova_t dma_handle;
3730         struct hwrm_nvm_read_input req = {0};
3731         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3732
3733         buf = rte_malloc("nvm_item", length, 0);
3734         rte_mem_lock_page(buf);
3735         if (!buf)
3736                 return -ENOMEM;
3737
3738         dma_handle = rte_mem_virt2iova(buf);
3739         if (dma_handle == RTE_BAD_IOVA) {
3740                 PMD_DRV_LOG(ERR,
3741                         "unable to map response address to physical memory\n");
3742                 return -ENOMEM;
3743         }
3744         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3745         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3746         req.dir_idx = rte_cpu_to_le_16(index);
3747         req.offset = rte_cpu_to_le_32(offset);
3748         req.len = rte_cpu_to_le_32(length);
3749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3750         if (rc == 0)
3751                 memcpy(data, buf, length);
3752
3753         rte_free(buf);
3754         HWRM_CHECK_RESULT();
3755         HWRM_UNLOCK();
3756
3757         return rc;
3758 }
3759
3760 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3761 {
3762         int rc;
3763         struct hwrm_nvm_erase_dir_entry_input req = {0};
3764         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3765
3766         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3767         req.dir_idx = rte_cpu_to_le_16(index);
3768         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3769         HWRM_CHECK_RESULT();
3770         HWRM_UNLOCK();
3771
3772         return rc;
3773 }
3774
3775
3776 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3777                           uint16_t dir_ordinal, uint16_t dir_ext,
3778                           uint16_t dir_attr, const uint8_t *data,
3779                           size_t data_len)
3780 {
3781         int rc;
3782         struct hwrm_nvm_write_input req = {0};
3783         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3784         rte_iova_t dma_handle;
3785         uint8_t *buf;
3786
3787         buf = rte_malloc("nvm_write", data_len, 0);
3788         rte_mem_lock_page(buf);
3789         if (!buf)
3790                 return -ENOMEM;
3791
3792         dma_handle = rte_mem_virt2iova(buf);
3793         if (dma_handle == RTE_BAD_IOVA) {
3794                 PMD_DRV_LOG(ERR,
3795                         "unable to map response address to physical memory\n");
3796                 return -ENOMEM;
3797         }
3798         memcpy(buf, data, data_len);
3799
3800         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3801
3802         req.dir_type = rte_cpu_to_le_16(dir_type);
3803         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3804         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3805         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3806         req.dir_data_length = rte_cpu_to_le_32(data_len);
3807         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3808
3809         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3810
3811         rte_free(buf);
3812         HWRM_CHECK_RESULT();
3813         HWRM_UNLOCK();
3814
3815         return rc;
3816 }
3817
3818 static void
3819 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3820 {
3821         uint32_t *count = cbdata;
3822
3823         *count = *count + 1;
3824 }
3825
3826 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3827                                      struct bnxt_vnic_info *vnic __rte_unused)
3828 {
3829         return 0;
3830 }
3831
3832 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3833 {
3834         uint32_t count = 0;
3835
3836         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3837             &count, bnxt_vnic_count_hwrm_stub);
3838
3839         return count;
3840 }
3841
3842 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3843                                         uint16_t *vnic_ids)
3844 {
3845         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3846         struct hwrm_func_vf_vnic_ids_query_output *resp =
3847                                                 bp->hwrm_cmd_resp_addr;
3848         int rc;
3849
3850         /* First query all VNIC ids */
3851         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3852
3853         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3854         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3855         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3856
3857         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3858                 HWRM_UNLOCK();
3859                 PMD_DRV_LOG(ERR,
3860                 "unable to map VNIC ID table address to physical memory\n");
3861                 return -ENOMEM;
3862         }
3863         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3864         HWRM_CHECK_RESULT();
3865         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3866
3867         HWRM_UNLOCK();
3868
3869         return rc;
3870 }
3871
3872 /*
3873  * This function queries the VNIC IDs  for a specified VF. It then calls
3874  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3875  * Then it calls the hwrm_cb function to program this new vnic configuration.
3876  */
3877 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3878         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3879         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3880 {
3881         struct bnxt_vnic_info vnic;
3882         int rc = 0;
3883         int i, num_vnic_ids;
3884         uint16_t *vnic_ids;
3885         size_t vnic_id_sz;
3886         size_t sz;
3887
3888         /* First query all VNIC ids */
3889         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3890         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3891                         RTE_CACHE_LINE_SIZE);
3892         if (vnic_ids == NULL)
3893                 return -ENOMEM;
3894
3895         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3896                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3897
3898         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3899
3900         if (num_vnic_ids < 0)
3901                 return num_vnic_ids;
3902
3903         /* Retrieve VNIC, update bd_stall then update */
3904
3905         for (i = 0; i < num_vnic_ids; i++) {
3906                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3907                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3908                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3909                 if (rc)
3910                         break;
3911                 if (vnic.mru <= 4)      /* Indicates unallocated */
3912                         continue;
3913
3914                 vnic_cb(&vnic, cbdata);
3915
3916                 rc = hwrm_cb(bp, &vnic);
3917                 if (rc)
3918                         break;
3919         }
3920
3921         rte_free(vnic_ids);
3922
3923         return rc;
3924 }
3925
3926 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3927                                               bool on)
3928 {
3929         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3930         struct hwrm_func_cfg_input req = {0};
3931         int rc;
3932
3933         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3934
3935         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3936         req.enables |= rte_cpu_to_le_32(
3937                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3938         req.vlan_antispoof_mode = on ?
3939                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3940                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3942
3943         HWRM_CHECK_RESULT();
3944         HWRM_UNLOCK();
3945
3946         return rc;
3947 }
3948
3949 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3950 {
3951         struct bnxt_vnic_info vnic;
3952         uint16_t *vnic_ids;
3953         size_t vnic_id_sz;
3954         int num_vnic_ids, i;
3955         size_t sz;
3956         int rc;
3957
3958         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3959         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3960                         RTE_CACHE_LINE_SIZE);
3961         if (vnic_ids == NULL)
3962                 return -ENOMEM;
3963
3964         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3965                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3966
3967         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3968         if (rc <= 0)
3969                 goto exit;
3970         num_vnic_ids = rc;
3971
3972         /*
3973          * Loop through to find the default VNIC ID.
3974          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3975          * by sending the hwrm_func_qcfg command to the firmware.
3976          */
3977         for (i = 0; i < num_vnic_ids; i++) {
3978                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3979                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3980                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3981                                         bp->pf.first_vf_id + vf);
3982                 if (rc)
3983                         goto exit;
3984                 if (vnic.func_default) {
3985                         rte_free(vnic_ids);
3986                         return vnic.fw_vnic_id;
3987                 }
3988         }
3989         /* Could not find a default VNIC. */
3990         PMD_DRV_LOG(ERR, "No default VNIC\n");
3991 exit:
3992         rte_free(vnic_ids);
3993         return rc;
3994 }
3995
3996 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3997                          uint16_t dst_id,
3998                          struct bnxt_filter_info *filter)
3999 {
4000         int rc = 0;
4001         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4002         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4003         uint32_t enables = 0;
4004
4005         if (filter->fw_em_filter_id != UINT64_MAX)
4006                 bnxt_hwrm_clear_em_filter(bp, filter);
4007
4008         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4009
4010         req.flags = rte_cpu_to_le_32(filter->flags);
4011
4012         enables = filter->enables |
4013               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4014         req.dst_id = rte_cpu_to_le_16(dst_id);
4015
4016         if (filter->ip_addr_type) {
4017                 req.ip_addr_type = filter->ip_addr_type;
4018                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4019         }
4020         if (enables &
4021             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4022                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4023         if (enables &
4024             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4025                 memcpy(req.src_macaddr, filter->src_macaddr,
4026                        RTE_ETHER_ADDR_LEN);
4027         if (enables &
4028             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4029                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4030                        RTE_ETHER_ADDR_LEN);
4031         if (enables &
4032             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4033                 req.ovlan_vid = filter->l2_ovlan;
4034         if (enables &
4035             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4036                 req.ivlan_vid = filter->l2_ivlan;
4037         if (enables &
4038             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4039                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4040         if (enables &
4041             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4042                 req.ip_protocol = filter->ip_protocol;
4043         if (enables &
4044             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4045                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4046         if (enables &
4047             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4048                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4049         if (enables &
4050             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4051                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4052         if (enables &
4053             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4054                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4055         if (enables &
4056             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4057                 req.mirror_vnic_id = filter->mirror_vnic_id;
4058
4059         req.enables = rte_cpu_to_le_32(enables);
4060
4061         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4062
4063         HWRM_CHECK_RESULT();
4064
4065         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4066         HWRM_UNLOCK();
4067
4068         return rc;
4069 }
4070
4071 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4072 {
4073         int rc = 0;
4074         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4075         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4076
4077         if (filter->fw_em_filter_id == UINT64_MAX)
4078                 return 0;
4079
4080         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4081         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4082
4083         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4084
4085         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4086
4087         HWRM_CHECK_RESULT();
4088         HWRM_UNLOCK();
4089
4090         filter->fw_em_filter_id = UINT64_MAX;
4091         filter->fw_l2_filter_id = UINT64_MAX;
4092
4093         return 0;
4094 }
4095
4096 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4097                          uint16_t dst_id,
4098                          struct bnxt_filter_info *filter)
4099 {
4100         int rc = 0;
4101         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4102         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4103                                                 bp->hwrm_cmd_resp_addr;
4104         uint32_t enables = 0;
4105
4106         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4107                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4108
4109         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4110
4111         req.flags = rte_cpu_to_le_32(filter->flags);
4112
4113         enables = filter->enables |
4114               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4115         req.dst_id = rte_cpu_to_le_16(dst_id);
4116
4117
4118         if (filter->ip_addr_type) {
4119                 req.ip_addr_type = filter->ip_addr_type;
4120                 enables |=
4121                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4122         }
4123         if (enables &
4124             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4125                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4126         if (enables &
4127             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4128                 memcpy(req.src_macaddr, filter->src_macaddr,
4129                        RTE_ETHER_ADDR_LEN);
4130         //if (enables &
4131             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4132                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4133                        //RTE_ETHER_ADDR_LEN);
4134         if (enables &
4135             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4136                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4137         if (enables &
4138             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4139                 req.ip_protocol = filter->ip_protocol;
4140         if (enables &
4141             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4142                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4143         if (enables &
4144             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4145                 req.src_ipaddr_mask[0] =
4146                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4147         if (enables &
4148             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4149                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4150         if (enables &
4151             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4152                 req.dst_ipaddr_mask[0] =
4153                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4154         if (enables &
4155             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4156                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4157         if (enables &
4158             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4159                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4160         if (enables &
4161             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4162                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4163         if (enables &
4164             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4165                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4166         if (enables &
4167             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4168                 req.mirror_vnic_id = filter->mirror_vnic_id;
4169
4170         req.enables = rte_cpu_to_le_32(enables);
4171
4172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4173
4174         HWRM_CHECK_RESULT();
4175
4176         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4177         HWRM_UNLOCK();
4178
4179         return rc;
4180 }
4181
4182 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4183                                 struct bnxt_filter_info *filter)
4184 {
4185         int rc = 0;
4186         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4187         struct hwrm_cfa_ntuple_filter_free_output *resp =
4188                                                 bp->hwrm_cmd_resp_addr;
4189
4190         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4191                 return 0;
4192
4193         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4194
4195         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4196
4197         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4198
4199         HWRM_CHECK_RESULT();
4200         HWRM_UNLOCK();
4201
4202         filter->fw_ntuple_filter_id = UINT64_MAX;
4203
4204         return 0;
4205 }
4206
4207 static int
4208 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4209 {
4210         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4211         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4212         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4213         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4214         uint16_t *ring_tbl = vnic->rss_table;
4215         int nr_ctxs = vnic->num_lb_ctxts;
4216         int max_rings = bp->rx_nr_rings;
4217         int i, j, k, cnt;
4218         int rc = 0;
4219
4220         for (i = 0, k = 0; i < nr_ctxs; i++) {
4221                 struct bnxt_rx_ring_info *rxr;
4222                 struct bnxt_cp_ring_info *cpr;
4223
4224                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4225
4226                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4227                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4228                 req.hash_mode_flags = vnic->hash_mode;
4229
4230                 req.ring_grp_tbl_addr =
4231                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4232                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4233                                      2 * sizeof(*ring_tbl));
4234                 req.hash_key_tbl_addr =
4235                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4236
4237                 req.ring_table_pair_index = i;
4238                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4239
4240                 for (j = 0; j < 64; j++) {
4241                         uint16_t ring_id;
4242
4243                         /* Find next active ring. */
4244                         for (cnt = 0; cnt < max_rings; cnt++) {
4245                                 if (rx_queue_state[k] !=
4246                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4247                                         break;
4248                                 if (++k == max_rings)
4249                                         k = 0;
4250                         }
4251
4252                         /* Return if no rings are active. */
4253                         if (cnt == max_rings)
4254                                 return 0;
4255
4256                         /* Add rx/cp ring pair to RSS table. */
4257                         rxr = rxqs[k]->rx_ring;
4258                         cpr = rxqs[k]->cp_ring;
4259
4260                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4261                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4262                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4263                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4264
4265                         if (++k == max_rings)
4266                                 k = 0;
4267                 }
4268                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4269                                             BNXT_USE_CHIMP_MB);
4270
4271                 HWRM_CHECK_RESULT();
4272                 HWRM_UNLOCK();
4273         }
4274
4275         return rc;
4276 }
4277
4278 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4279 {
4280         unsigned int rss_idx, fw_idx, i;
4281
4282         if (!(vnic->rss_table && vnic->hash_type))
4283                 return 0;
4284
4285         if (BNXT_CHIP_THOR(bp))
4286                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4287
4288         /*
4289          * Fill the RSS hash & redirection table with
4290          * ring group ids for all VNICs
4291          */
4292         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4293                 rss_idx++, fw_idx++) {
4294                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4295                         fw_idx %= bp->rx_cp_nr_rings;
4296                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4297                                 break;
4298                         fw_idx++;
4299                 }
4300                 if (i == bp->rx_cp_nr_rings)
4301                         return 0;
4302                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4303         }
4304         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4305 }
4306
4307 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4308         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4309 {
4310         uint16_t flags;
4311
4312         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4313
4314         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4315         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4316
4317         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4318         req->num_cmpl_dma_aggr_during_int =
4319                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4320
4321         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4322
4323         /* min timer set to 1/2 of interrupt timer */
4324         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4325
4326         /* buf timer set to 1/4 of interrupt timer */
4327         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4328
4329         req->cmpl_aggr_dma_tmr_during_int =
4330                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4331
4332         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4333                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4334         req->flags = rte_cpu_to_le_16(flags);
4335 }
4336
4337 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4338                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4339 {
4340         struct hwrm_ring_aggint_qcaps_input req = {0};
4341         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4342         uint32_t enables;
4343         uint16_t flags;
4344         int rc;
4345
4346         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4348         HWRM_CHECK_RESULT();
4349
4350         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4351         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4352
4353         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4354                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4355         agg_req->flags = rte_cpu_to_le_16(flags);
4356         enables =
4357          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4358          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4359         agg_req->enables = rte_cpu_to_le_32(enables);
4360
4361         HWRM_UNLOCK();
4362         return rc;
4363 }
4364
4365 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4366                         struct bnxt_coal *coal, uint16_t ring_id)
4367 {
4368         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4369         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4370                                                 bp->hwrm_cmd_resp_addr;
4371         int rc;
4372
4373         /* Set ring coalesce parameters only for 100G NICs */
4374         if (BNXT_CHIP_THOR(bp)) {
4375                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4376                         return -1;
4377         } else if (bnxt_stratus_device(bp)) {
4378                 bnxt_hwrm_set_coal_params(coal, &req);
4379         } else {
4380                 return 0;
4381         }
4382
4383         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4384         req.ring_id = rte_cpu_to_le_16(ring_id);
4385         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4386         HWRM_CHECK_RESULT();
4387         HWRM_UNLOCK();
4388         return 0;
4389 }
4390
4391 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4392 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4393 {
4394         struct hwrm_func_backing_store_qcaps_input req = {0};
4395         struct hwrm_func_backing_store_qcaps_output *resp =
4396                 bp->hwrm_cmd_resp_addr;
4397         struct bnxt_ctx_pg_info *ctx_pg;
4398         struct bnxt_ctx_mem_info *ctx;
4399         int total_alloc_len;
4400         int rc, i;
4401
4402         if (!BNXT_CHIP_THOR(bp) ||
4403             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4404             BNXT_VF(bp) ||
4405             bp->ctx)
4406                 return 0;
4407
4408         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4410         HWRM_CHECK_RESULT_SILENT();
4411
4412         total_alloc_len = sizeof(*ctx);
4413         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4414                           RTE_CACHE_LINE_SIZE);
4415         if (!ctx) {
4416                 rc = -ENOMEM;
4417                 goto ctx_err;
4418         }
4419
4420         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4421                             sizeof(*ctx_pg) * BNXT_MAX_Q,
4422                             RTE_CACHE_LINE_SIZE);
4423         if (!ctx_pg) {
4424                 rc = -ENOMEM;
4425                 goto ctx_err;
4426         }
4427         for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4428                 ctx->tqm_mem[i] = ctx_pg;
4429
4430         bp->ctx = ctx;
4431         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4432         ctx->qp_min_qp1_entries =
4433                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4434         ctx->qp_max_l2_entries =
4435                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4436         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4437         ctx->srq_max_l2_entries =
4438                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4439         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4440         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4441         ctx->cq_max_l2_entries =
4442                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4443         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4444         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4445         ctx->vnic_max_vnic_entries =
4446                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4447         ctx->vnic_max_ring_table_entries =
4448                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4449         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4450         ctx->stat_max_entries =
4451                 rte_le_to_cpu_32(resp->stat_max_entries);
4452         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4453         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4454         ctx->tqm_min_entries_per_ring =
4455                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4456         ctx->tqm_max_entries_per_ring =
4457                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4458         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4459         if (!ctx->tqm_entries_multiple)
4460                 ctx->tqm_entries_multiple = 1;
4461         ctx->mrav_max_entries =
4462                 rte_le_to_cpu_32(resp->mrav_max_entries);
4463         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4464         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4465         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4466 ctx_err:
4467         HWRM_UNLOCK();
4468         return rc;
4469 }
4470
4471 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4472 {
4473         struct hwrm_func_backing_store_cfg_input req = {0};
4474         struct hwrm_func_backing_store_cfg_output *resp =
4475                 bp->hwrm_cmd_resp_addr;
4476         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4477         struct bnxt_ctx_pg_info *ctx_pg;
4478         uint32_t *num_entries;
4479         uint64_t *pg_dir;
4480         uint8_t *pg_attr;
4481         uint32_t ena;
4482         int i, rc;
4483
4484         if (!ctx)
4485                 return 0;
4486
4487         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4488         req.enables = rte_cpu_to_le_32(enables);
4489
4490         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4491                 ctx_pg = &ctx->qp_mem;
4492                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4493                 req.qp_num_qp1_entries =
4494                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4495                 req.qp_num_l2_entries =
4496                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4497                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4498                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4499                                       &req.qpc_pg_size_qpc_lvl,
4500                                       &req.qpc_page_dir);
4501         }
4502
4503         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4504                 ctx_pg = &ctx->srq_mem;
4505                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4506                 req.srq_num_l2_entries =
4507                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4508                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4509                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4510                                       &req.srq_pg_size_srq_lvl,
4511                                       &req.srq_page_dir);
4512         }
4513
4514         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4515                 ctx_pg = &ctx->cq_mem;
4516                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4517                 req.cq_num_l2_entries =
4518                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4519                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4520                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4521                                       &req.cq_pg_size_cq_lvl,
4522                                       &req.cq_page_dir);
4523         }
4524
4525         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4526                 ctx_pg = &ctx->vnic_mem;
4527                 req.vnic_num_vnic_entries =
4528                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4529                 req.vnic_num_ring_table_entries =
4530                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4531                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4532                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4533                                       &req.vnic_pg_size_vnic_lvl,
4534                                       &req.vnic_page_dir);
4535         }
4536
4537         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4538                 ctx_pg = &ctx->stat_mem;
4539                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4540                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4541                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4542                                       &req.stat_pg_size_stat_lvl,
4543                                       &req.stat_page_dir);
4544         }
4545
4546         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4547         num_entries = &req.tqm_sp_num_entries;
4548         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4549         pg_dir = &req.tqm_sp_page_dir;
4550         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4551         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4552                 if (!(enables & ena))
4553                         continue;
4554
4555                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4556
4557                 ctx_pg = ctx->tqm_mem[i];
4558                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4559                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4560         }
4561
4562         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4563         HWRM_CHECK_RESULT();
4564         HWRM_UNLOCK();
4565
4566         return rc;
4567 }
4568
4569 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4570 {
4571         struct hwrm_port_qstats_ext_input req = {0};
4572         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4573         struct bnxt_pf_info *pf = &bp->pf;
4574         int rc;
4575
4576         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4577               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4578                 return 0;
4579
4580         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4581
4582         req.port_id = rte_cpu_to_le_16(pf->port_id);
4583         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4584                 req.tx_stat_host_addr =
4585                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4586                 req.tx_stat_size =
4587                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4588         }
4589         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4590                 req.rx_stat_host_addr =
4591                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4592                 req.rx_stat_size =
4593                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4594         }
4595         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4596
4597         if (rc) {
4598                 bp->fw_rx_port_stats_ext_size = 0;
4599                 bp->fw_tx_port_stats_ext_size = 0;
4600         } else {
4601                 bp->fw_rx_port_stats_ext_size =
4602                         rte_le_to_cpu_16(resp->rx_stat_size);
4603                 bp->fw_tx_port_stats_ext_size =
4604                         rte_le_to_cpu_16(resp->tx_stat_size);
4605         }
4606
4607         HWRM_CHECK_RESULT();
4608         HWRM_UNLOCK();
4609
4610         return rc;
4611 }
4612
4613 int
4614 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4615 {
4616         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4617         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4618                 bp->hwrm_cmd_resp_addr;
4619         int rc = 0;
4620
4621         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4622         req.tunnel_type = type;
4623         req.dest_fid = bp->fw_fid;
4624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4625         HWRM_CHECK_RESULT();
4626
4627         HWRM_UNLOCK();
4628
4629         return rc;
4630 }
4631
4632 int
4633 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4634 {
4635         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4636         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4637                 bp->hwrm_cmd_resp_addr;
4638         int rc = 0;
4639
4640         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4641         req.tunnel_type = type;
4642         req.dest_fid = bp->fw_fid;
4643         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4644         HWRM_CHECK_RESULT();
4645
4646         HWRM_UNLOCK();
4647
4648         return rc;
4649 }
4650
4651 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4652 {
4653         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4654         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4655                 bp->hwrm_cmd_resp_addr;
4656         int rc = 0;
4657
4658         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4659         req.src_fid = bp->fw_fid;
4660         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4661         HWRM_CHECK_RESULT();
4662
4663         if (type)
4664                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4665
4666         HWRM_UNLOCK();
4667
4668         return rc;
4669 }
4670
4671 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4672                                    uint16_t *dst_fid)
4673 {
4674         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4675         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4676                 bp->hwrm_cmd_resp_addr;
4677         int rc = 0;
4678
4679         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4680         req.src_fid = bp->fw_fid;
4681         req.tunnel_type = tun_type;
4682         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4683         HWRM_CHECK_RESULT();
4684
4685         if (dst_fid)
4686                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4687
4688         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4689
4690         HWRM_UNLOCK();
4691
4692         return rc;
4693 }
4694
4695 int bnxt_hwrm_set_mac(struct bnxt *bp)
4696 {
4697         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4698         struct hwrm_func_vf_cfg_input req = {0};
4699         int rc = 0;
4700
4701         if (!BNXT_VF(bp))
4702                 return 0;
4703
4704         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4705
4706         req.enables =
4707                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4708         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4709
4710         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4711
4712         HWRM_CHECK_RESULT();
4713
4714         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4715         HWRM_UNLOCK();
4716
4717         return rc;
4718 }
4719
4720 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4721 {
4722         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4723         struct hwrm_func_drv_if_change_input req = {0};
4724         uint32_t flags;
4725         int rc;
4726
4727         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4728                 return 0;
4729
4730         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4731          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4732          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4733          */
4734         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4735                 return 0;
4736
4737         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4738
4739         if (up)
4740                 req.flags =
4741                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4742
4743         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4744
4745         HWRM_CHECK_RESULT();
4746         flags = rte_le_to_cpu_32(resp->flags);
4747         HWRM_UNLOCK();
4748
4749         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4750                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4751                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4752         }
4753
4754         return 0;
4755 }
4756
4757 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4758 {
4759         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4760         struct bnxt_error_recovery_info *info = bp->recovery_info;
4761         struct hwrm_error_recovery_qcfg_input req = {0};
4762         uint32_t flags = 0;
4763         unsigned int i;
4764         int rc;
4765
4766         /* Older FW does not have error recovery support */
4767         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4768                 return 0;
4769
4770         if (!info) {
4771                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4772                                    sizeof(*info), 0);
4773                 bp->recovery_info = info;
4774                 if (info == NULL)
4775                         return -ENOMEM;
4776         } else {
4777                 memset(info, 0, sizeof(*info));
4778         }
4779
4780         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4781
4782         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4783
4784         HWRM_CHECK_RESULT();
4785
4786         flags = rte_le_to_cpu_32(resp->flags);
4787         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4788                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4789         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4790                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4791
4792         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4793             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4794                 rc = -EINVAL;
4795                 goto err;
4796         }
4797
4798         /* FW returned values are in units of 100msec */
4799         info->driver_polling_freq =
4800                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4801         info->master_func_wait_period =
4802                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4803         info->normal_func_wait_period =
4804                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4805         info->master_func_wait_period_after_reset =
4806                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4807         info->max_bailout_time_after_reset =
4808                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4809         info->status_regs[BNXT_FW_STATUS_REG] =
4810                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4811         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4812                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4813         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4814                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4815         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4816                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4817         info->reg_array_cnt =
4818                 rte_le_to_cpu_32(resp->reg_array_cnt);
4819
4820         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4821                 rc = -EINVAL;
4822                 goto err;
4823         }
4824
4825         for (i = 0; i < info->reg_array_cnt; i++) {
4826                 info->reset_reg[i] =
4827                         rte_le_to_cpu_32(resp->reset_reg[i]);
4828                 info->reset_reg_val[i] =
4829                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4830                 info->delay_after_reset[i] =
4831                         resp->delay_after_reset[i];
4832         }
4833 err:
4834         HWRM_UNLOCK();
4835
4836         /* Map the FW status registers */
4837         if (!rc)
4838                 rc = bnxt_map_fw_health_status_regs(bp);
4839
4840         if (rc) {
4841                 rte_free(bp->recovery_info);
4842                 bp->recovery_info = NULL;
4843         }
4844         return rc;
4845 }
4846
4847 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4848 {
4849         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4850         struct hwrm_fw_reset_input req = {0};
4851         int rc;
4852
4853         if (!BNXT_PF(bp))
4854                 return -EOPNOTSUPP;
4855
4856         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4857
4858         req.embedded_proc_type =
4859                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4860         req.selfrst_status =
4861                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4862         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4863
4864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4865                                     BNXT_USE_KONG(bp));
4866
4867         HWRM_CHECK_RESULT();
4868         HWRM_UNLOCK();
4869
4870         return rc;
4871 }
4872
4873 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
4874 {
4875         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
4876         struct hwrm_port_ts_query_input req = {0};
4877         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4878         uint32_t flags = 0;
4879         int rc;
4880
4881         if (!ptp)
4882                 return 0;
4883
4884         HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
4885
4886         switch (path) {
4887         case BNXT_PTP_FLAGS_PATH_TX:
4888                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
4889                 break;
4890         case BNXT_PTP_FLAGS_PATH_RX:
4891                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
4892                 break;
4893         case BNXT_PTP_FLAGS_CURRENT_TIME:
4894                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
4895                 break;
4896         }
4897
4898         req.flags = rte_cpu_to_le_32(flags);
4899         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
4900
4901         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4902
4903         HWRM_CHECK_RESULT();
4904
4905         if (timestamp) {
4906                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
4907                 *timestamp |=
4908                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
4909         }
4910         HWRM_UNLOCK();
4911
4912         return rc;
4913 }