net/bnxt: fix default Rx queue for Thor
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SHORT_CMD_TIMEOUT          50000
31 #define HWRM_SPEC_CODE_1_8_3            0x10803
32 #define HWRM_VERSION_1_9_1              0x10901
33 #define HWRM_VERSION_1_9_2              0x10903
34
35 struct bnxt_plcmodes_cfg {
36         uint32_t        flags;
37         uint16_t        jumbo_thresh;
38         uint16_t        hds_offset;
39         uint16_t        hds_threshold;
40 };
41
42 static int page_getenum(size_t size)
43 {
44         if (size <= 1 << 4)
45                 return 4;
46         if (size <= 1 << 12)
47                 return 12;
48         if (size <= 1 << 13)
49                 return 13;
50         if (size <= 1 << 16)
51                 return 16;
52         if (size <= 1 << 21)
53                 return 21;
54         if (size <= 1 << 22)
55                 return 22;
56         if (size <= 1 << 30)
57                 return 30;
58         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
59         return sizeof(void *) * 8 - 1;
60 }
61
62 static int page_roundup(size_t size)
63 {
64         return 1 << page_getenum(size);
65 }
66
67 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
68                                   uint8_t *pg_attr,
69                                   uint64_t *pg_dir)
70 {
71         if (rmem->nr_pages > 1) {
72                 *pg_attr = 1;
73                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
74         } else {
75                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76         }
77 }
78
79 /*
80  * HWRM Functions (sent to HWRM)
81  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
82  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
83  * command was failed by the ChiMP.
84  */
85
86 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
87                                   uint32_t msg_len, bool use_kong_mb)
88 {
89         unsigned int i;
90         struct input *req = msg;
91         struct output *resp = bp->hwrm_cmd_resp_addr;
92         uint32_t *data = msg;
93         uint8_t *bar;
94         uint8_t *valid;
95         uint16_t max_req_len = bp->max_req_len;
96         struct hwrm_short_input short_input = { 0 };
97         uint16_t bar_offset = use_kong_mb ?
98                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
99         uint16_t mb_trigger_offset = use_kong_mb ?
100                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
101         uint32_t timeout;
102
103         /* Do not send HWRM commands to firmware in error state */
104         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
105                 return 0;
106
107         /* For VER_GET command, set timeout as 50ms */
108         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
109                 timeout = HWRM_SHORT_CMD_TIMEOUT;
110         else
111                 timeout = HWRM_CMD_TIMEOUT;
112
113         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
114             msg_len > bp->max_req_len) {
115                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
116
117                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
118                 memcpy(short_cmd_req, req, msg_len);
119
120                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
121                 short_input.signature = rte_cpu_to_le_16(
122                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
123                 short_input.size = rte_cpu_to_le_16(msg_len);
124                 short_input.req_addr =
125                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
126
127                 data = (uint32_t *)&short_input;
128                 msg_len = sizeof(short_input);
129
130                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
131         }
132
133         /* Write request msg to hwrm channel */
134         for (i = 0; i < msg_len; i += 4) {
135                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
136                 rte_write32(*data, bar);
137                 data++;
138         }
139
140         /* Zero the rest of the request space */
141         for (; i < max_req_len; i += 4) {
142                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
143                 rte_write32(0, bar);
144         }
145
146         /* Ring channel doorbell */
147         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
148         rte_write32(1, bar);
149         /*
150          * Make sure the channel doorbell ring command complete before
151          * reading the response to avoid getting stale or invalid
152          * responses.
153          */
154         rte_io_mb();
155
156         /* Poll for the valid bit */
157         for (i = 0; i < timeout; i++) {
158                 /* Sanity check on the resp->resp_len */
159                 rte_cio_rmb();
160                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
161                         /* Last byte of resp contains the valid key */
162                         valid = (uint8_t *)resp + resp->resp_len - 1;
163                         if (*valid == HWRM_RESP_VALID_KEY)
164                                 break;
165                 }
166                 rte_delay_us(1);
167         }
168
169         if (i >= timeout) {
170                 /* Suppress VER_GET timeout messages during reset recovery */
171                 if (bp->flags & BNXT_FLAG_FW_RESET &&
172                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
173                         return -ETIMEDOUT;
174
175                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
176                             req->req_type);
177                 return -ETIMEDOUT;
178         }
179         return 0;
180 }
181
182 /*
183  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
184  * spinlock, and does initial processing.
185  *
186  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
187  * releases the spinlock only if it returns.  If the regular int return codes
188  * are not used by the function, HWRM_CHECK_RESULT() should not be used
189  * directly, rather it should be copied and modified to suit the function.
190  *
191  * HWRM_UNLOCK() must be called after all response processing is completed.
192  */
193 #define HWRM_PREP(req, type, kong) do { \
194         rte_spinlock_lock(&bp->hwrm_lock); \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
197         req.cmpl_ring = rte_cpu_to_le_16(-1); \
198         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
200         req.target_id = rte_cpu_to_le_16(0xffff); \
201         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc > 0) \
229                         rc = -EIO; \
230                 return rc; \
231         } \
232         if (resp->error_code) { \
233                 rc = rte_le_to_cpu_16(resp->error_code); \
234                 if (resp->resp_len >= 16) { \
235                         struct hwrm_err_output *tmp_hwrm_err_op = \
236                                                 (void *)resp; \
237                         PMD_DRV_LOG(ERR, \
238                                 "error %d:%d:%08x:%04x\n", \
239                                 rc, tmp_hwrm_err_op->cmd_err, \
240                                 rte_le_to_cpu_32(\
241                                         tmp_hwrm_err_op->opaque_0), \
242                                 rte_le_to_cpu_16(\
243                                         tmp_hwrm_err_op->opaque_1)); \
244                 } else { \
245                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
246                 } \
247                 rte_spinlock_unlock(&bp->hwrm_lock); \
248                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249                         rc = -EACCES; \
250                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251                         rc = -ENOSPC; \
252                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253                         rc = -EINVAL; \
254                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255                         rc = -ENOTSUP; \
256                 else if (rc > 0) \
257                         rc = -EIO; \
258                 return rc; \
259         } \
260 } while (0)
261
262 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
263
264 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
265 {
266         int rc = 0;
267         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
268         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
269
270         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
271         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
272         req.mask = 0;
273
274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
275
276         HWRM_CHECK_RESULT();
277         HWRM_UNLOCK();
278
279         return rc;
280 }
281
282 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
283                                  struct bnxt_vnic_info *vnic,
284                                  uint16_t vlan_count,
285                                  struct bnxt_vlan_table_entry *vlan_table)
286 {
287         int rc = 0;
288         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
289         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
290         uint32_t mask = 0;
291
292         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
293                 return rc;
294
295         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
296         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
297
298         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
299                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
300         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
301                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
302
303         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
304                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
305
306         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
307                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
308         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
309                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
310                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
311                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
312         }
313         if (vlan_table) {
314                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
315                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
316                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
317                          rte_mem_virt2iova(vlan_table));
318                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
319         }
320         req.mask = rte_cpu_to_le_32(mask);
321
322         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
323
324         HWRM_CHECK_RESULT();
325         HWRM_UNLOCK();
326
327         return rc;
328 }
329
330 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
331                         uint16_t vlan_count,
332                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
333 {
334         int rc = 0;
335         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
336         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
337                                                 bp->hwrm_cmd_resp_addr;
338
339         /*
340          * Older HWRM versions did not support this command, and the set_rx_mask
341          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
342          * removed from set_rx_mask call, and this command was added.
343          *
344          * This command is also present from 1.7.8.11 and higher,
345          * as well as 1.7.8.0
346          */
347         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
348                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
349                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
350                                         (11)))
351                                 return 0;
352                 }
353         }
354         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
355         req.fid = rte_cpu_to_le_16(fid);
356
357         req.vlan_tag_mask_tbl_addr =
358                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
359         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
360
361         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
362
363         HWRM_CHECK_RESULT();
364         HWRM_UNLOCK();
365
366         return rc;
367 }
368
369 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
370                            struct bnxt_filter_info *filter)
371 {
372         int rc = 0;
373         struct bnxt_filter_info *l2_filter = filter;
374         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
375         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
376
377         if (filter->fw_l2_filter_id == UINT64_MAX)
378                 return 0;
379
380         if (filter->matching_l2_fltr_ptr)
381                 l2_filter = filter->matching_l2_fltr_ptr;
382
383         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
384                     filter, l2_filter, l2_filter->l2_ref_cnt);
385
386         if (l2_filter->l2_ref_cnt > 0)
387                 l2_filter->l2_ref_cnt--;
388
389         if (l2_filter->l2_ref_cnt > 0)
390                 return 0;
391
392         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
393
394         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
395
396         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
397
398         HWRM_CHECK_RESULT();
399         HWRM_UNLOCK();
400
401         filter->fw_l2_filter_id = UINT64_MAX;
402
403         return 0;
404 }
405
406 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
407                          uint16_t dst_id,
408                          struct bnxt_filter_info *filter)
409 {
410         int rc = 0;
411         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
412         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
413         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
414         const struct rte_eth_vmdq_rx_conf *conf =
415                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
416         uint32_t enables = 0;
417         uint16_t j = dst_id - 1;
418
419         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
420         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
421             conf->pool_map[j].pools & (1UL << j)) {
422                 PMD_DRV_LOG(DEBUG,
423                         "Add vlan %u to vmdq pool %u\n",
424                         conf->pool_map[j].vlan_id, j);
425
426                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
427                 filter->enables |=
428                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
429                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
430         }
431
432         if (filter->fw_l2_filter_id != UINT64_MAX)
433                 bnxt_hwrm_clear_l2_filter(bp, filter);
434
435         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
436
437         req.flags = rte_cpu_to_le_32(filter->flags);
438
439         enables = filter->enables |
440               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
441         req.dst_id = rte_cpu_to_le_16(dst_id);
442
443         if (enables &
444             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
445                 memcpy(req.l2_addr, filter->l2_addr,
446                        RTE_ETHER_ADDR_LEN);
447         if (enables &
448             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
449                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
450                        RTE_ETHER_ADDR_LEN);
451         if (enables &
452             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
453                 req.l2_ovlan = filter->l2_ovlan;
454         if (enables &
455             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
456                 req.l2_ivlan = filter->l2_ivlan;
457         if (enables &
458             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
459                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
460         if (enables &
461             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
462                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
463         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
464                 req.src_id = rte_cpu_to_le_32(filter->src_id);
465         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
466                 req.src_type = filter->src_type;
467         if (filter->pri_hint) {
468                 req.pri_hint = filter->pri_hint;
469                 req.l2_filter_id_hint =
470                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
471         }
472
473         req.enables = rte_cpu_to_le_32(enables);
474
475         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
476
477         HWRM_CHECK_RESULT();
478
479         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
480         HWRM_UNLOCK();
481
482         return rc;
483 }
484
485 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
486 {
487         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
488         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
489         uint32_t flags = 0;
490         int rc;
491
492         if (!ptp)
493                 return 0;
494
495         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
496
497         if (ptp->rx_filter)
498                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
499         else
500                 flags |=
501                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
502         if (ptp->tx_tstamp_en)
503                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
504         else
505                 flags |=
506                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
507         req.flags = rte_cpu_to_le_32(flags);
508         req.enables = rte_cpu_to_le_32
509                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
510         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
511
512         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
513         HWRM_UNLOCK();
514
515         return rc;
516 }
517
518 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
519 {
520         int rc = 0;
521         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
522         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
523         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
524
525 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
526         if (ptp)
527                 return 0;
528
529         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
530
531         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
532
533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
534
535         HWRM_CHECK_RESULT();
536
537         if (!BNXT_CHIP_THOR(bp) &&
538             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
539                 return 0;
540
541         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
542                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
543
544         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
545         if (!ptp)
546                 return -ENOMEM;
547
548         if (!BNXT_CHIP_THOR(bp)) {
549                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
550                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
551                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
552                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
553                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
554                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
555                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
556                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
557                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
558                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
559                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
560                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
561                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
562                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
563                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
564                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
565                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
566                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
567         }
568
569         ptp->bp = bp;
570         bp->ptp_cfg = ptp;
571
572         return 0;
573 }
574
575 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
576 {
577         int rc = 0;
578         struct hwrm_func_qcaps_input req = {.req_type = 0 };
579         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
580         uint16_t new_max_vfs;
581         uint32_t flags;
582         int i;
583
584         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
585
586         req.fid = rte_cpu_to_le_16(0xffff);
587
588         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
589
590         HWRM_CHECK_RESULT();
591
592         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
593         flags = rte_le_to_cpu_32(resp->flags);
594         if (BNXT_PF(bp)) {
595                 bp->pf.port_id = resp->port_id;
596                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
597                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
598                 new_max_vfs = bp->pdev->max_vfs;
599                 if (new_max_vfs != bp->pf.max_vfs) {
600                         if (bp->pf.vf_info)
601                                 rte_free(bp->pf.vf_info);
602                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
603                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
604                         bp->pf.max_vfs = new_max_vfs;
605                         for (i = 0; i < new_max_vfs; i++) {
606                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
607                                 bp->pf.vf_info[i].vlan_table =
608                                         rte_zmalloc("VF VLAN table",
609                                                     getpagesize(),
610                                                     getpagesize());
611                                 if (bp->pf.vf_info[i].vlan_table == NULL)
612                                         PMD_DRV_LOG(ERR,
613                                         "Fail to alloc VLAN table for VF %d\n",
614                                         i);
615                                 else
616                                         rte_mem_lock_page(
617                                                 bp->pf.vf_info[i].vlan_table);
618                                 bp->pf.vf_info[i].vlan_as_table =
619                                         rte_zmalloc("VF VLAN AS table",
620                                                     getpagesize(),
621                                                     getpagesize());
622                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
623                                         PMD_DRV_LOG(ERR,
624                                         "Alloc VLAN AS table for VF %d fail\n",
625                                         i);
626                                 else
627                                         rte_mem_lock_page(
628                                                bp->pf.vf_info[i].vlan_as_table);
629                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
630                         }
631                 }
632         }
633
634         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
635         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
636         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
637         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
638         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
639         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
640         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
641         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
642         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
643         if (!BNXT_CHIP_THOR(bp))
644                 bp->max_l2_ctx += bp->max_rx_em_flows;
645         /* TODO: For now, do not support VMDq/RFS on VFs. */
646         if (BNXT_PF(bp)) {
647                 if (bp->pf.max_vfs)
648                         bp->max_vnics = 1;
649                 else
650                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
651         } else {
652                 bp->max_vnics = 1;
653         }
654         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
655         if (BNXT_PF(bp)) {
656                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
657                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
658                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
659                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
660                         HWRM_UNLOCK();
661                         bnxt_hwrm_ptp_qcfg(bp);
662                 }
663         }
664
665         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
666                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
667
668         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
669                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
670                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
671         } else {
672                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
673         }
674
675         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
676                 bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
677         else
678                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
679
680         HWRM_UNLOCK();
681
682         return rc;
683 }
684
685 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
686 {
687         int rc;
688
689         rc = __bnxt_hwrm_func_qcaps(bp);
690         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
691                 rc = bnxt_alloc_ctx_mem(bp);
692                 if (rc)
693                         return rc;
694
695                 rc = bnxt_hwrm_func_resc_qcaps(bp);
696                 if (!rc)
697                         bp->flags |= BNXT_FLAG_NEW_RM;
698         }
699
700         return rc;
701 }
702
703 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
704 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
705 {
706         int rc = 0;
707         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
708         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
709
710         HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB);
711
712         req.target_id = rte_cpu_to_le_16(0xffff);
713
714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
715
716         HWRM_CHECK_RESULT();
717
718         if (rte_le_to_cpu_32(resp->flags) &
719             HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
720                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
721                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
722         }
723
724         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
725
726         HWRM_UNLOCK();
727
728         return rc;
729 }
730
731 int bnxt_hwrm_func_reset(struct bnxt *bp)
732 {
733         int rc = 0;
734         struct hwrm_func_reset_input req = {.req_type = 0 };
735         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
736
737         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
738
739         req.enables = rte_cpu_to_le_32(0);
740
741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
742
743         HWRM_CHECK_RESULT();
744         HWRM_UNLOCK();
745
746         return rc;
747 }
748
749 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
750 {
751         int rc;
752         uint32_t flags = 0;
753         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
754         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
755
756         if (bp->flags & BNXT_FLAG_REGISTERED)
757                 return 0;
758
759         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
760         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
761                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
762
763         /* PFs and trusted VFs should indicate the support of the
764          * Master capability on non Stingray platform
765          */
766         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
767                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
768
769         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
770         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
771                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
772         req.ver_maj = RTE_VER_YEAR;
773         req.ver_min = RTE_VER_MONTH;
774         req.ver_upd = RTE_VER_MINOR;
775
776         if (BNXT_PF(bp)) {
777                 req.enables |= rte_cpu_to_le_32(
778                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
779                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
780                        RTE_MIN(sizeof(req.vf_req_fwd),
781                                sizeof(bp->pf.vf_req_fwd)));
782
783                 /*
784                  * PF can sniff HWRM API issued by VF. This can be set up by
785                  * linux driver and inherited by the DPDK PF driver. Clear
786                  * this HWRM sniffer list in FW because DPDK PF driver does
787                  * not support this.
788                  */
789                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
790         }
791
792         req.flags = rte_cpu_to_le_32(flags);
793
794         req.async_event_fwd[0] |=
795                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
796                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
797                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
798                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
799                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
800         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
801                 req.async_event_fwd[0] |=
802                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
803         req.async_event_fwd[1] |=
804                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
805                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
806
807         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
808
809         HWRM_CHECK_RESULT();
810
811         flags = rte_le_to_cpu_32(resp->flags);
812         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
813                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
814
815         HWRM_UNLOCK();
816
817         bp->flags |= BNXT_FLAG_REGISTERED;
818
819         return rc;
820 }
821
822 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
823 {
824         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
825                 return 0;
826
827         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
828 }
829
830 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
831 {
832         int rc;
833         uint32_t flags = 0;
834         uint32_t enables;
835         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
836         struct hwrm_func_vf_cfg_input req = {0};
837
838         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
839
840         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
841                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
842                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
843                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
844                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
845
846         if (BNXT_HAS_RING_GRPS(bp)) {
847                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
848                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
849         }
850
851         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
852         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
853                                             AGG_RING_MULTIPLIER);
854         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
855         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
856                                               bp->tx_nr_rings +
857                                               BNXT_NUM_ASYNC_CPR(bp));
858         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
859         if (bp->vf_resv_strategy ==
860             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
861                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
862                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
863                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
864                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
865                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
866                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
867         }
868
869         if (test)
870                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
871                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
872                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
873                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
874                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
875                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
876
877         if (test && BNXT_HAS_RING_GRPS(bp))
878                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
879
880         req.flags = rte_cpu_to_le_32(flags);
881         req.enables |= rte_cpu_to_le_32(enables);
882
883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
884
885         if (test)
886                 HWRM_CHECK_RESULT_SILENT();
887         else
888                 HWRM_CHECK_RESULT();
889
890         HWRM_UNLOCK();
891         return rc;
892 }
893
894 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
895 {
896         int rc;
897         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
898         struct hwrm_func_resource_qcaps_input req = {0};
899
900         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
901         req.fid = rte_cpu_to_le_16(0xffff);
902
903         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
904
905         HWRM_CHECK_RESULT();
906
907         if (BNXT_VF(bp)) {
908                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
909                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
910                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
911                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
912                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
913                 /* func_resource_qcaps does not return max_rx_em_flows.
914                  * So use the value provided by func_qcaps.
915                  */
916                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
917                 if (!BNXT_CHIP_THOR(bp))
918                         bp->max_l2_ctx += bp->max_rx_em_flows;
919                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
920                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
921         }
922         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
923         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
924         if (bp->vf_resv_strategy >
925             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
926                 bp->vf_resv_strategy =
927                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
928
929         HWRM_UNLOCK();
930         return rc;
931 }
932
933 int bnxt_hwrm_ver_get(struct bnxt *bp)
934 {
935         int rc = 0;
936         struct hwrm_ver_get_input req = {.req_type = 0 };
937         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
938         uint32_t fw_version;
939         uint16_t max_resp_len;
940         char type[RTE_MEMZONE_NAMESIZE];
941         uint32_t dev_caps_cfg;
942
943         bp->max_req_len = HWRM_MAX_REQ_LEN;
944         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
945
946         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
947         req.hwrm_intf_min = HWRM_VERSION_MINOR;
948         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
949
950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
951
952         if (bp->flags & BNXT_FLAG_FW_RESET)
953                 HWRM_CHECK_RESULT_SILENT();
954         else
955                 HWRM_CHECK_RESULT();
956
957         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
958                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
959                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
960                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
961         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
962                      (resp->hwrm_fw_min_8b << 16) |
963                      (resp->hwrm_fw_bld_8b << 8) |
964                      resp->hwrm_fw_rsvd_8b;
965         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
966                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
967
968         fw_version = resp->hwrm_intf_maj_8b << 16;
969         fw_version |= resp->hwrm_intf_min_8b << 8;
970         fw_version |= resp->hwrm_intf_upd_8b;
971         bp->hwrm_spec_code = fw_version;
972
973         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
974                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
975                 rc = -EINVAL;
976                 goto error;
977         }
978
979         if (bp->max_req_len > resp->max_req_win_len) {
980                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
981                 rc = -EINVAL;
982         }
983         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
984         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
985         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
986                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
987
988         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
989         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
990
991         if (bp->max_resp_len != max_resp_len) {
992                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
993                         bp->pdev->addr.domain, bp->pdev->addr.bus,
994                         bp->pdev->addr.devid, bp->pdev->addr.function);
995
996                 rte_free(bp->hwrm_cmd_resp_addr);
997
998                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
999                 if (bp->hwrm_cmd_resp_addr == NULL) {
1000                         rc = -ENOMEM;
1001                         goto error;
1002                 }
1003                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1004                 bp->hwrm_cmd_resp_dma_addr =
1005                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1006                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1007                         PMD_DRV_LOG(ERR,
1008                         "Unable to map response buffer to physical memory.\n");
1009                         rc = -ENOMEM;
1010                         goto error;
1011                 }
1012                 bp->max_resp_len = max_resp_len;
1013         }
1014
1015         if ((dev_caps_cfg &
1016                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1017             (dev_caps_cfg &
1018              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1019                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1020                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1021         }
1022
1023         if (((dev_caps_cfg &
1024               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1025              (dev_caps_cfg &
1026               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1027             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1028                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
1029                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1030                         bp->pdev->addr.devid, bp->pdev->addr.function);
1031
1032                 rte_free(bp->hwrm_short_cmd_req_addr);
1033
1034                 bp->hwrm_short_cmd_req_addr =
1035                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1036                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1037                         rc = -ENOMEM;
1038                         goto error;
1039                 }
1040                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
1041                 bp->hwrm_short_cmd_req_dma_addr =
1042                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
1043                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1044                         rte_free(bp->hwrm_short_cmd_req_addr);
1045                         PMD_DRV_LOG(ERR,
1046                                 "Unable to map buffer to physical memory.\n");
1047                         rc = -ENOMEM;
1048                         goto error;
1049                 }
1050         }
1051         if (dev_caps_cfg &
1052             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1053                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1054                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1055         }
1056         if (dev_caps_cfg &
1057             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1058                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1059         if (dev_caps_cfg &
1060             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1061                 bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
1062                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1063         }
1064
1065 error:
1066         HWRM_UNLOCK();
1067         return rc;
1068 }
1069
1070 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1071 {
1072         int rc;
1073         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1074         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1075
1076         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1077                 return 0;
1078
1079         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1080         req.flags = flags;
1081
1082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1083
1084         HWRM_CHECK_RESULT();
1085         HWRM_UNLOCK();
1086
1087         return rc;
1088 }
1089
1090 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1091 {
1092         int rc = 0;
1093         struct hwrm_port_phy_cfg_input req = {0};
1094         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1095         uint32_t enables = 0;
1096
1097         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1098
1099         if (conf->link_up) {
1100                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1101                 if (bp->link_info.auto_mode && conf->link_speed) {
1102                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1103                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1104                 }
1105
1106                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1107                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1108                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1109                 /*
1110                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1111                  * any auto mode, even "none".
1112                  */
1113                 if (!conf->link_speed) {
1114                         /* No speeds specified. Enable AutoNeg - all speeds */
1115                         req.auto_mode =
1116                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1117                 }
1118                 /* AutoNeg - Advertise speeds specified. */
1119                 if (conf->auto_link_speed_mask &&
1120                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1121                         req.auto_mode =
1122                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1123                         req.auto_link_speed_mask =
1124                                 conf->auto_link_speed_mask;
1125                         enables |=
1126                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1127                 }
1128
1129                 req.auto_duplex = conf->duplex;
1130                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1131                 req.auto_pause = conf->auto_pause;
1132                 req.force_pause = conf->force_pause;
1133                 /* Set force_pause if there is no auto or if there is a force */
1134                 if (req.auto_pause && !req.force_pause)
1135                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1136                 else
1137                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1138
1139                 req.enables = rte_cpu_to_le_32(enables);
1140         } else {
1141                 req.flags =
1142                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1143                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1144         }
1145
1146         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1147
1148         HWRM_CHECK_RESULT();
1149         HWRM_UNLOCK();
1150
1151         return rc;
1152 }
1153
1154 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1155                                    struct bnxt_link_info *link_info)
1156 {
1157         int rc = 0;
1158         struct hwrm_port_phy_qcfg_input req = {0};
1159         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1160
1161         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1162
1163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1164
1165         HWRM_CHECK_RESULT();
1166
1167         link_info->phy_link_status = resp->link;
1168         link_info->link_up =
1169                 (link_info->phy_link_status ==
1170                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1171         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1172         link_info->duplex = resp->duplex_cfg;
1173         link_info->pause = resp->pause;
1174         link_info->auto_pause = resp->auto_pause;
1175         link_info->force_pause = resp->force_pause;
1176         link_info->auto_mode = resp->auto_mode;
1177         link_info->phy_type = resp->phy_type;
1178         link_info->media_type = resp->media_type;
1179
1180         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1181         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1182         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1183         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1184         link_info->phy_ver[0] = resp->phy_maj;
1185         link_info->phy_ver[1] = resp->phy_min;
1186         link_info->phy_ver[2] = resp->phy_bld;
1187
1188         HWRM_UNLOCK();
1189
1190         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1191         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1192         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1193         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1194         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1195                     link_info->auto_link_speed_mask);
1196         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1197                     link_info->force_link_speed);
1198
1199         return rc;
1200 }
1201
1202 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1203 {
1204         int rc = 0;
1205         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1206         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1207         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1208         int i;
1209
1210 get_rx_info:
1211         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1212
1213         req.flags = rte_cpu_to_le_32(dir);
1214         /* HWRM Version >= 1.9.1 */
1215         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1216                 req.drv_qmap_cap =
1217                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1218         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1219
1220         HWRM_CHECK_RESULT();
1221
1222         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1223                 GET_TX_QUEUE_INFO(0);
1224                 GET_TX_QUEUE_INFO(1);
1225                 GET_TX_QUEUE_INFO(2);
1226                 GET_TX_QUEUE_INFO(3);
1227                 GET_TX_QUEUE_INFO(4);
1228                 GET_TX_QUEUE_INFO(5);
1229                 GET_TX_QUEUE_INFO(6);
1230                 GET_TX_QUEUE_INFO(7);
1231         } else  {
1232                 GET_RX_QUEUE_INFO(0);
1233                 GET_RX_QUEUE_INFO(1);
1234                 GET_RX_QUEUE_INFO(2);
1235                 GET_RX_QUEUE_INFO(3);
1236                 GET_RX_QUEUE_INFO(4);
1237                 GET_RX_QUEUE_INFO(5);
1238                 GET_RX_QUEUE_INFO(6);
1239                 GET_RX_QUEUE_INFO(7);
1240         }
1241
1242         HWRM_UNLOCK();
1243
1244         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1245                 goto done;
1246
1247         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1248                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1249         } else {
1250                 int j;
1251
1252                 /* iterate and find the COSq profile to use for Tx */
1253                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1254                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1255                                 if (bp->tx_cos_queue[i].id != 0xff)
1256                                         bp->tx_cosq_id[j++] =
1257                                                 bp->tx_cos_queue[i].id;
1258                         }
1259                 } else {
1260                         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1261                                 if (bp->tx_cos_queue[i].profile ==
1262                                         HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1263                                         bp->tx_cosq_id[0] =
1264                                                 bp->tx_cos_queue[i].id;
1265                                         break;
1266                                 }
1267                         }
1268                 }
1269         }
1270
1271         bp->max_tc = resp->max_configurable_queues;
1272         bp->max_lltc = resp->max_configurable_lossless_queues;
1273         if (bp->max_tc > BNXT_MAX_QUEUE)
1274                 bp->max_tc = BNXT_MAX_QUEUE;
1275         bp->max_q = bp->max_tc;
1276
1277         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1278                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1279                 goto get_rx_info;
1280         }
1281
1282 done:
1283         return rc;
1284 }
1285
1286 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1287                          struct bnxt_ring *ring,
1288                          uint32_t ring_type, uint32_t map_index,
1289                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1290                          uint16_t tx_cosq_id)
1291 {
1292         int rc = 0;
1293         uint32_t enables = 0;
1294         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1295         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1296         struct rte_mempool *mb_pool;
1297         uint16_t rx_buf_size;
1298
1299         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1300
1301         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1302         req.fbo = rte_cpu_to_le_32(0);
1303         /* Association of ring index with doorbell index */
1304         req.logical_id = rte_cpu_to_le_16(map_index);
1305         req.length = rte_cpu_to_le_32(ring->ring_size);
1306
1307         switch (ring_type) {
1308         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1309                 req.ring_type = ring_type;
1310                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1311                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1312                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1313                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1314                         enables |=
1315                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1316                 break;
1317         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1318                 req.ring_type = ring_type;
1319                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1320                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1321                 if (BNXT_CHIP_THOR(bp)) {
1322                         mb_pool = bp->rx_queues[0]->mb_pool;
1323                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1324                                       RTE_PKTMBUF_HEADROOM;
1325                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1326                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1327                         enables |=
1328                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1329                 }
1330                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1331                         enables |=
1332                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1333                 break;
1334         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1335                 req.ring_type = ring_type;
1336                 if (BNXT_HAS_NQ(bp)) {
1337                         /* Association of cp ring with nq */
1338                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1339                         enables |=
1340                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1341                 }
1342                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1343                 break;
1344         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1345                 req.ring_type = ring_type;
1346                 req.page_size = BNXT_PAGE_SHFT;
1347                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1348                 break;
1349         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1350                 req.ring_type = ring_type;
1351                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1352
1353                 mb_pool = bp->rx_queues[0]->mb_pool;
1354                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1355                               RTE_PKTMBUF_HEADROOM;
1356                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1357                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1358
1359                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1360                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1361                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1362                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1363                 break;
1364         default:
1365                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1366                         ring_type);
1367                 HWRM_UNLOCK();
1368                 return -EINVAL;
1369         }
1370         req.enables = rte_cpu_to_le_32(enables);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1373
1374         if (rc || resp->error_code) {
1375                 if (rc == 0 && resp->error_code)
1376                         rc = rte_le_to_cpu_16(resp->error_code);
1377                 switch (ring_type) {
1378                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1379                         PMD_DRV_LOG(ERR,
1380                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1381                         HWRM_UNLOCK();
1382                         return rc;
1383                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1384                         PMD_DRV_LOG(ERR,
1385                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1386                         HWRM_UNLOCK();
1387                         return rc;
1388                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1389                         PMD_DRV_LOG(ERR,
1390                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1391                                     rc);
1392                         HWRM_UNLOCK();
1393                         return rc;
1394                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1395                         PMD_DRV_LOG(ERR,
1396                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1397                         HWRM_UNLOCK();
1398                         return rc;
1399                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1400                         PMD_DRV_LOG(ERR,
1401                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1402                         HWRM_UNLOCK();
1403                         return rc;
1404                 default:
1405                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1406                         HWRM_UNLOCK();
1407                         return rc;
1408                 }
1409         }
1410
1411         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1412         HWRM_UNLOCK();
1413         return rc;
1414 }
1415
1416 int bnxt_hwrm_ring_free(struct bnxt *bp,
1417                         struct bnxt_ring *ring, uint32_t ring_type)
1418 {
1419         int rc;
1420         struct hwrm_ring_free_input req = {.req_type = 0 };
1421         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1422
1423         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1424
1425         req.ring_type = ring_type;
1426         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1427
1428         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1429
1430         if (rc || resp->error_code) {
1431                 if (rc == 0 && resp->error_code)
1432                         rc = rte_le_to_cpu_16(resp->error_code);
1433                 HWRM_UNLOCK();
1434
1435                 switch (ring_type) {
1436                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1437                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1438                                 rc);
1439                         return rc;
1440                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1441                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1442                                 rc);
1443                         return rc;
1444                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1445                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1446                                 rc);
1447                         return rc;
1448                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1449                         PMD_DRV_LOG(ERR,
1450                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1451                         return rc;
1452                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1453                         PMD_DRV_LOG(ERR,
1454                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1455                         return rc;
1456                 default:
1457                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1458                         return rc;
1459                 }
1460         }
1461         HWRM_UNLOCK();
1462         return 0;
1463 }
1464
1465 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1466 {
1467         int rc = 0;
1468         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1469         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1470
1471         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1472
1473         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1474         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1475         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1476         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1477
1478         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1479
1480         HWRM_CHECK_RESULT();
1481
1482         bp->grp_info[idx].fw_grp_id =
1483             rte_le_to_cpu_16(resp->ring_group_id);
1484
1485         HWRM_UNLOCK();
1486
1487         return rc;
1488 }
1489
1490 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1491 {
1492         int rc;
1493         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1494         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1495
1496         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1497
1498         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1499
1500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1501
1502         HWRM_CHECK_RESULT();
1503         HWRM_UNLOCK();
1504
1505         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1506         return rc;
1507 }
1508
1509 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1510 {
1511         int rc = 0;
1512         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1513         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1514
1515         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1516                 return rc;
1517
1518         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1519
1520         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1521
1522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1523
1524         HWRM_CHECK_RESULT();
1525         HWRM_UNLOCK();
1526
1527         return rc;
1528 }
1529
1530 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1531                                 unsigned int idx __rte_unused)
1532 {
1533         int rc;
1534         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1535         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1536
1537         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1538
1539         req.update_period_ms = rte_cpu_to_le_32(0);
1540
1541         req.stats_dma_addr =
1542             rte_cpu_to_le_64(cpr->hw_stats_map);
1543
1544         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1545
1546         HWRM_CHECK_RESULT();
1547
1548         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1549
1550         HWRM_UNLOCK();
1551
1552         return rc;
1553 }
1554
1555 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1556                                 unsigned int idx __rte_unused)
1557 {
1558         int rc;
1559         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1560         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1561
1562         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1563
1564         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1565
1566         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1567
1568         HWRM_CHECK_RESULT();
1569         HWRM_UNLOCK();
1570
1571         return rc;
1572 }
1573
1574 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1575 {
1576         int rc = 0, i, j;
1577         struct hwrm_vnic_alloc_input req = { 0 };
1578         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1579
1580         if (!BNXT_HAS_RING_GRPS(bp))
1581                 goto skip_ring_grps;
1582
1583         /* map ring groups to this vnic */
1584         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1585                 vnic->start_grp_id, vnic->end_grp_id);
1586         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1587                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1588
1589         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1590         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1591         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1592         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1593
1594 skip_ring_grps:
1595         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1596                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1597         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1598
1599         if (vnic->func_default)
1600                 req.flags =
1601                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1603
1604         HWRM_CHECK_RESULT();
1605
1606         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1607         HWRM_UNLOCK();
1608         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1609         return rc;
1610 }
1611
1612 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1613                                         struct bnxt_vnic_info *vnic,
1614                                         struct bnxt_plcmodes_cfg *pmode)
1615 {
1616         int rc = 0;
1617         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1618         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1619
1620         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1621
1622         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1623
1624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1625
1626         HWRM_CHECK_RESULT();
1627
1628         pmode->flags = rte_le_to_cpu_32(resp->flags);
1629         /* dflt_vnic bit doesn't exist in the _cfg command */
1630         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1631         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1632         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1633         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1634
1635         HWRM_UNLOCK();
1636
1637         return rc;
1638 }
1639
1640 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1641                                        struct bnxt_vnic_info *vnic,
1642                                        struct bnxt_plcmodes_cfg *pmode)
1643 {
1644         int rc = 0;
1645         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1646         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1647
1648         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1649                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1650                 return rc;
1651         }
1652
1653         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1654
1655         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1656         req.flags = rte_cpu_to_le_32(pmode->flags);
1657         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1658         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1659         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1660         req.enables = rte_cpu_to_le_32(
1661             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1662             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1663             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1664         );
1665
1666         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1667
1668         HWRM_CHECK_RESULT();
1669         HWRM_UNLOCK();
1670
1671         return rc;
1672 }
1673
1674 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1675 {
1676         int rc = 0;
1677         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1678         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1679         struct bnxt_plcmodes_cfg pmodes = { 0 };
1680         uint32_t ctx_enable_flag = 0;
1681         uint32_t enables = 0;
1682
1683         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1684                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1685                 return rc;
1686         }
1687
1688         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1689         if (rc)
1690                 return rc;
1691
1692         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1693
1694         if (BNXT_CHIP_THOR(bp)) {
1695                 struct bnxt_rx_queue *rxq =
1696                         bp->eth_dev->data->rx_queues[vnic->start_grp_id];
1697                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1698                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1699
1700                 req.default_rx_ring_id =
1701                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1702                 req.default_cmpl_ring_id =
1703                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1704                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1705                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1706                 goto config_mru;
1707         }
1708
1709         /* Only RSS support for now TBD: COS & LB */
1710         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1711         if (vnic->lb_rule != 0xffff)
1712                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1713         if (vnic->cos_rule != 0xffff)
1714                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1715         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1716                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1717                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1718         }
1719         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1720                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1721                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1722         }
1723
1724         enables |= ctx_enable_flag;
1725         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1726         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1727         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1728         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1729
1730 config_mru:
1731         req.enables = rte_cpu_to_le_32(enables);
1732         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1733         req.mru = rte_cpu_to_le_16(vnic->mru);
1734         /* Configure default VNIC only once. */
1735         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1736                 req.flags |=
1737                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1738                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1739         }
1740         if (vnic->vlan_strip)
1741                 req.flags |=
1742                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1743         if (vnic->bd_stall)
1744                 req.flags |=
1745                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1746         if (vnic->roce_dual)
1747                 req.flags |= rte_cpu_to_le_32(
1748                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1749         if (vnic->roce_only)
1750                 req.flags |= rte_cpu_to_le_32(
1751                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1752         if (vnic->rss_dflt_cr)
1753                 req.flags |= rte_cpu_to_le_32(
1754                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1755
1756         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1757
1758         HWRM_CHECK_RESULT();
1759         HWRM_UNLOCK();
1760
1761         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1762
1763         return rc;
1764 }
1765
1766 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1767                 int16_t fw_vf_id)
1768 {
1769         int rc = 0;
1770         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1771         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1772
1773         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1774                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1775                 return rc;
1776         }
1777         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1778
1779         req.enables =
1780                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1781         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1782         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1783
1784         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1785
1786         HWRM_CHECK_RESULT();
1787
1788         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1789         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1790         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1791         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1792         vnic->mru = rte_le_to_cpu_16(resp->mru);
1793         vnic->func_default = rte_le_to_cpu_32(
1794                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1795         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1796                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1797         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1798                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1799         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1800                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1801         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1802                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1803         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1804                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1805
1806         HWRM_UNLOCK();
1807
1808         return rc;
1809 }
1810
1811 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1812                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1813 {
1814         int rc = 0;
1815         uint16_t ctx_id;
1816         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1817         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1818                                                 bp->hwrm_cmd_resp_addr;
1819
1820         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1821
1822         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1823         HWRM_CHECK_RESULT();
1824
1825         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1826         if (!BNXT_HAS_RING_GRPS(bp))
1827                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1828         else if (ctx_idx == 0)
1829                 vnic->rss_rule = ctx_id;
1830
1831         HWRM_UNLOCK();
1832
1833         return rc;
1834 }
1835
1836 static
1837 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1838                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1839 {
1840         int rc = 0;
1841         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1842         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1843                                                 bp->hwrm_cmd_resp_addr;
1844
1845         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1846                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1847                 return rc;
1848         }
1849         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1850
1851         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1852
1853         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1854
1855         HWRM_CHECK_RESULT();
1856         HWRM_UNLOCK();
1857
1858         return rc;
1859 }
1860
1861 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1862 {
1863         int rc = 0;
1864
1865         if (BNXT_CHIP_THOR(bp)) {
1866                 int j;
1867
1868                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
1869                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
1870                                                       vnic,
1871                                                       vnic->fw_grp_ids[j]);
1872                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
1873                 }
1874                 vnic->num_lb_ctxts = 0;
1875         } else {
1876                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
1877                 vnic->rss_rule = INVALID_HW_RING_ID;
1878         }
1879
1880         return rc;
1881 }
1882
1883 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1884 {
1885         int rc = 0;
1886         struct hwrm_vnic_free_input req = {.req_type = 0 };
1887         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1888
1889         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1890                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1891                 return rc;
1892         }
1893
1894         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1895
1896         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1897
1898         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1899
1900         HWRM_CHECK_RESULT();
1901         HWRM_UNLOCK();
1902
1903         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1904         /* Configure default VNIC again if necessary. */
1905         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1906                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1907
1908         return rc;
1909 }
1910
1911 static int
1912 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1913 {
1914         int i;
1915         int rc = 0;
1916         int nr_ctxs = vnic->num_lb_ctxts;
1917         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1918         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1919
1920         for (i = 0; i < nr_ctxs; i++) {
1921                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1922
1923                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1924                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1925                 req.hash_mode_flags = vnic->hash_mode;
1926
1927                 req.hash_key_tbl_addr =
1928                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1929
1930                 req.ring_grp_tbl_addr =
1931                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1932                                          i * HW_HASH_INDEX_SIZE);
1933                 req.ring_table_pair_index = i;
1934                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1935
1936                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1937                                             BNXT_USE_CHIMP_MB);
1938
1939                 HWRM_CHECK_RESULT();
1940                 HWRM_UNLOCK();
1941         }
1942
1943         return rc;
1944 }
1945
1946 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1947                            struct bnxt_vnic_info *vnic)
1948 {
1949         int rc = 0;
1950         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1951         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1952
1953         if (!vnic->rss_table)
1954                 return 0;
1955
1956         if (BNXT_CHIP_THOR(bp))
1957                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1958
1959         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1960
1961         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1962         req.hash_mode_flags = vnic->hash_mode;
1963
1964         req.ring_grp_tbl_addr =
1965             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1966         req.hash_key_tbl_addr =
1967             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1968         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1969         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1970
1971         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1972
1973         HWRM_CHECK_RESULT();
1974         HWRM_UNLOCK();
1975
1976         return rc;
1977 }
1978
1979 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1980                         struct bnxt_vnic_info *vnic)
1981 {
1982         int rc = 0;
1983         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1984         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1985         uint16_t size;
1986
1987         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1988                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1989                 return rc;
1990         }
1991
1992         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1993
1994         req.flags = rte_cpu_to_le_32(
1995                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1996
1997         req.enables = rte_cpu_to_le_32(
1998                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1999
2000         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2001         size -= RTE_PKTMBUF_HEADROOM;
2002         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2003
2004         req.jumbo_thresh = rte_cpu_to_le_16(size);
2005         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2006
2007         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2008
2009         HWRM_CHECK_RESULT();
2010         HWRM_UNLOCK();
2011
2012         return rc;
2013 }
2014
2015 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2016                         struct bnxt_vnic_info *vnic, bool enable)
2017 {
2018         int rc = 0;
2019         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2020         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2021
2022         if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2023                 if (enable)
2024                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2025                 return -ENOTSUP;
2026         }
2027
2028         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2029                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2030                 return 0;
2031         }
2032
2033         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2034
2035         if (enable) {
2036                 req.enables = rte_cpu_to_le_32(
2037                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2038                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2039                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2040                 req.flags = rte_cpu_to_le_32(
2041                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2042                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2043                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2044                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2045                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2046                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2047                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2048                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2049                 req.min_agg_len = rte_cpu_to_le_32(512);
2050         }
2051         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2052
2053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2054
2055         HWRM_CHECK_RESULT();
2056         HWRM_UNLOCK();
2057
2058         return rc;
2059 }
2060
2061 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2062 {
2063         struct hwrm_func_cfg_input req = {0};
2064         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2065         int rc;
2066
2067         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2068         req.enables = rte_cpu_to_le_32(
2069                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2070         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2071         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2072
2073         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2074
2075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2076         HWRM_CHECK_RESULT();
2077         HWRM_UNLOCK();
2078
2079         bp->pf.vf_info[vf].random_mac = false;
2080
2081         return rc;
2082 }
2083
2084 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2085                                   uint64_t *dropped)
2086 {
2087         int rc = 0;
2088         struct hwrm_func_qstats_input req = {.req_type = 0};
2089         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2090
2091         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2092
2093         req.fid = rte_cpu_to_le_16(fid);
2094
2095         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2096
2097         HWRM_CHECK_RESULT();
2098
2099         if (dropped)
2100                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2101
2102         HWRM_UNLOCK();
2103
2104         return rc;
2105 }
2106
2107 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2108                           struct rte_eth_stats *stats)
2109 {
2110         int rc = 0;
2111         struct hwrm_func_qstats_input req = {.req_type = 0};
2112         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2113
2114         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2115
2116         req.fid = rte_cpu_to_le_16(fid);
2117
2118         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2119
2120         HWRM_CHECK_RESULT();
2121
2122         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2123         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2124         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2125         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2126         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2127         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2128
2129         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2130         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2131         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2132         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2133         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2134         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2135
2136         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2137         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2138         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2139
2140         HWRM_UNLOCK();
2141
2142         return rc;
2143 }
2144
2145 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2146 {
2147         int rc = 0;
2148         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2149         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2150
2151         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2152
2153         req.fid = rte_cpu_to_le_16(fid);
2154
2155         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2156
2157         HWRM_CHECK_RESULT();
2158         HWRM_UNLOCK();
2159
2160         return rc;
2161 }
2162
2163 /*
2164  * HWRM utility functions
2165  */
2166
2167 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2168 {
2169         unsigned int i;
2170         int rc = 0;
2171
2172         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2173                 struct bnxt_tx_queue *txq;
2174                 struct bnxt_rx_queue *rxq;
2175                 struct bnxt_cp_ring_info *cpr;
2176
2177                 if (i >= bp->rx_cp_nr_rings) {
2178                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2179                         cpr = txq->cp_ring;
2180                 } else {
2181                         rxq = bp->rx_queues[i];
2182                         cpr = rxq->cp_ring;
2183                 }
2184
2185                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2186                 if (rc)
2187                         return rc;
2188         }
2189         return 0;
2190 }
2191
2192 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2193 {
2194         int rc;
2195         unsigned int i;
2196         struct bnxt_cp_ring_info *cpr;
2197
2198         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2199
2200                 if (i >= bp->rx_cp_nr_rings) {
2201                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2202                 } else {
2203                         cpr = bp->rx_queues[i]->cp_ring;
2204                         if (BNXT_HAS_RING_GRPS(bp))
2205                                 bp->grp_info[i].fw_stats_ctx = -1;
2206                 }
2207                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2208                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2209                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2210                         if (rc)
2211                                 return rc;
2212                 }
2213         }
2214         return 0;
2215 }
2216
2217 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2218 {
2219         unsigned int i;
2220         int rc = 0;
2221
2222         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2223                 struct bnxt_tx_queue *txq;
2224                 struct bnxt_rx_queue *rxq;
2225                 struct bnxt_cp_ring_info *cpr;
2226
2227                 if (i >= bp->rx_cp_nr_rings) {
2228                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2229                         cpr = txq->cp_ring;
2230                 } else {
2231                         rxq = bp->rx_queues[i];
2232                         cpr = rxq->cp_ring;
2233                 }
2234
2235                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2236
2237                 if (rc)
2238                         return rc;
2239         }
2240         return rc;
2241 }
2242
2243 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2244 {
2245         uint16_t idx;
2246         uint32_t rc = 0;
2247
2248         if (!BNXT_HAS_RING_GRPS(bp))
2249                 return 0;
2250
2251         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2252
2253                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2254                         continue;
2255
2256                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2257
2258                 if (rc)
2259                         return rc;
2260         }
2261         return rc;
2262 }
2263
2264 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2265 {
2266         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2267
2268         bnxt_hwrm_ring_free(bp, cp_ring,
2269                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2270         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2271         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2272                                      sizeof(*cpr->cp_desc_ring));
2273         cpr->cp_raw_cons = 0;
2274         cpr->valid = 0;
2275 }
2276
2277 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2278 {
2279         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2280
2281         bnxt_hwrm_ring_free(bp, cp_ring,
2282                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2283         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2284         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2285                         sizeof(*cpr->cp_desc_ring));
2286         cpr->cp_raw_cons = 0;
2287         cpr->valid = 0;
2288 }
2289
2290 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2291 {
2292         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2293         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2294         struct bnxt_ring *ring = rxr->rx_ring_struct;
2295         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2296
2297         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2298                 bnxt_hwrm_ring_free(bp, ring,
2299                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2300                 ring->fw_ring_id = INVALID_HW_RING_ID;
2301                 if (BNXT_HAS_RING_GRPS(bp))
2302                         bp->grp_info[queue_index].rx_fw_ring_id =
2303                                                         INVALID_HW_RING_ID;
2304                 memset(rxr->rx_desc_ring, 0,
2305                        rxr->rx_ring_struct->ring_size *
2306                        sizeof(*rxr->rx_desc_ring));
2307                 memset(rxr->rx_buf_ring, 0,
2308                        rxr->rx_ring_struct->ring_size *
2309                        sizeof(*rxr->rx_buf_ring));
2310                 rxr->rx_prod = 0;
2311         }
2312         ring = rxr->ag_ring_struct;
2313         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2314                 bnxt_hwrm_ring_free(bp, ring,
2315                                     BNXT_CHIP_THOR(bp) ?
2316                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2317                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2318                 ring->fw_ring_id = INVALID_HW_RING_ID;
2319                 memset(rxr->ag_buf_ring, 0,
2320                        rxr->ag_ring_struct->ring_size *
2321                        sizeof(*rxr->ag_buf_ring));
2322                 rxr->ag_prod = 0;
2323                 if (BNXT_HAS_RING_GRPS(bp))
2324                         bp->grp_info[queue_index].ag_fw_ring_id =
2325                                                         INVALID_HW_RING_ID;
2326         }
2327         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2328                 bnxt_free_cp_ring(bp, cpr);
2329
2330         if (BNXT_HAS_RING_GRPS(bp))
2331                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2332 }
2333
2334 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2335 {
2336         unsigned int i;
2337
2338         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2339                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2340                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2341                 struct bnxt_ring *ring = txr->tx_ring_struct;
2342                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2343
2344                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2345                         bnxt_hwrm_ring_free(bp, ring,
2346                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2347                         ring->fw_ring_id = INVALID_HW_RING_ID;
2348                         memset(txr->tx_desc_ring, 0,
2349                                         txr->tx_ring_struct->ring_size *
2350                                         sizeof(*txr->tx_desc_ring));
2351                         memset(txr->tx_buf_ring, 0,
2352                                         txr->tx_ring_struct->ring_size *
2353                                         sizeof(*txr->tx_buf_ring));
2354                         txr->tx_prod = 0;
2355                         txr->tx_cons = 0;
2356                 }
2357                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2358                         bnxt_free_cp_ring(bp, cpr);
2359                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2360                 }
2361         }
2362
2363         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2364                 bnxt_free_hwrm_rx_ring(bp, i);
2365
2366         return 0;
2367 }
2368
2369 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2370 {
2371         uint16_t i;
2372         uint32_t rc = 0;
2373
2374         if (!BNXT_HAS_RING_GRPS(bp))
2375                 return 0;
2376
2377         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2378                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2379                 if (rc)
2380                         return rc;
2381         }
2382         return rc;
2383 }
2384
2385 void bnxt_free_hwrm_resources(struct bnxt *bp)
2386 {
2387         /* Release memzone */
2388         rte_free(bp->hwrm_cmd_resp_addr);
2389         rte_free(bp->hwrm_short_cmd_req_addr);
2390         bp->hwrm_cmd_resp_addr = NULL;
2391         bp->hwrm_short_cmd_req_addr = NULL;
2392         bp->hwrm_cmd_resp_dma_addr = 0;
2393         bp->hwrm_short_cmd_req_dma_addr = 0;
2394 }
2395
2396 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2397 {
2398         struct rte_pci_device *pdev = bp->pdev;
2399         char type[RTE_MEMZONE_NAMESIZE];
2400
2401         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2402                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2403         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2404         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2405         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2406         if (bp->hwrm_cmd_resp_addr == NULL)
2407                 return -ENOMEM;
2408         bp->hwrm_cmd_resp_dma_addr =
2409                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2410         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2411                 PMD_DRV_LOG(ERR,
2412                         "unable to map response address to physical memory\n");
2413                 return -ENOMEM;
2414         }
2415         rte_spinlock_init(&bp->hwrm_lock);
2416
2417         return 0;
2418 }
2419
2420 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2421 {
2422         struct bnxt_filter_info *filter;
2423         int rc = 0;
2424
2425         STAILQ_FOREACH(filter, &vnic->filter, next) {
2426                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2427                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2428                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2429                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2430                 else
2431                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2432                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2433                 bnxt_free_filter(bp, filter);
2434                 //if (rc)
2435                         //break;
2436         }
2437         return rc;
2438 }
2439
2440 static int
2441 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2442 {
2443         struct bnxt_filter_info *filter;
2444         struct rte_flow *flow;
2445         int rc = 0;
2446
2447         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2448                 flow = STAILQ_FIRST(&vnic->flow_list);
2449                 filter = flow->filter;
2450                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2451                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2452                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2453                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2454                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2455                 else
2456                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2457
2458                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2459                 rte_free(flow);
2460                 //if (rc)
2461                         //break;
2462         }
2463         return rc;
2464 }
2465
2466 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2467 {
2468         struct bnxt_filter_info *filter;
2469         int rc = 0;
2470
2471         STAILQ_FOREACH(filter, &vnic->filter, next) {
2472                 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2473                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2474                                                      filter);
2475                 } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2476                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2477                                                          filter);
2478                 } else {
2479                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2480                                                      filter);
2481                         if (!rc)
2482                                 filter->dflt = 1;
2483                 }
2484                 if (rc)
2485                         break;
2486         }
2487         return rc;
2488 }
2489
2490 void bnxt_free_tunnel_ports(struct bnxt *bp)
2491 {
2492         if (bp->vxlan_port_cnt)
2493                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2494                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2495         bp->vxlan_port = 0;
2496         if (bp->geneve_port_cnt)
2497                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2498                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2499         bp->geneve_port = 0;
2500 }
2501
2502 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2503 {
2504         int i;
2505
2506         if (bp->vnic_info == NULL)
2507                 return;
2508
2509         /*
2510          * Cleanup VNICs in reverse order, to make sure the L2 filter
2511          * from vnic0 is last to be cleaned up.
2512          */
2513         for (i = bp->max_vnics - 1; i >= 0; i--) {
2514                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2515
2516                 // If the VNIC ID is invalid we are not currently using the VNIC
2517                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2518                         continue;
2519
2520                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2521
2522                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2523
2524                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2525
2526                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2527
2528                 bnxt_hwrm_vnic_free(bp, vnic);
2529
2530                 rte_free(vnic->fw_grp_ids);
2531         }
2532         /* Ring resources */
2533         bnxt_free_all_hwrm_rings(bp);
2534         bnxt_free_all_hwrm_ring_grps(bp);
2535         bnxt_free_all_hwrm_stat_ctxs(bp);
2536         bnxt_free_tunnel_ports(bp);
2537 }
2538
2539 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2540 {
2541         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2542
2543         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2544                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2545
2546         switch (conf_link_speed) {
2547         case ETH_LINK_SPEED_10M_HD:
2548         case ETH_LINK_SPEED_100M_HD:
2549                 /* FALLTHROUGH */
2550                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2551         }
2552         return hw_link_duplex;
2553 }
2554
2555 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2556 {
2557         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2558 }
2559
2560 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2561 {
2562         uint16_t eth_link_speed = 0;
2563
2564         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2565                 return ETH_LINK_SPEED_AUTONEG;
2566
2567         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2568         case ETH_LINK_SPEED_100M:
2569         case ETH_LINK_SPEED_100M_HD:
2570                 /* FALLTHROUGH */
2571                 eth_link_speed =
2572                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2573                 break;
2574         case ETH_LINK_SPEED_1G:
2575                 eth_link_speed =
2576                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2577                 break;
2578         case ETH_LINK_SPEED_2_5G:
2579                 eth_link_speed =
2580                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2581                 break;
2582         case ETH_LINK_SPEED_10G:
2583                 eth_link_speed =
2584                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2585                 break;
2586         case ETH_LINK_SPEED_20G:
2587                 eth_link_speed =
2588                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2589                 break;
2590         case ETH_LINK_SPEED_25G:
2591                 eth_link_speed =
2592                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2593                 break;
2594         case ETH_LINK_SPEED_40G:
2595                 eth_link_speed =
2596                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2597                 break;
2598         case ETH_LINK_SPEED_50G:
2599                 eth_link_speed =
2600                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2601                 break;
2602         case ETH_LINK_SPEED_100G:
2603                 eth_link_speed =
2604                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2605                 break;
2606         default:
2607                 PMD_DRV_LOG(ERR,
2608                         "Unsupported link speed %d; default to AUTO\n",
2609                         conf_link_speed);
2610                 break;
2611         }
2612         return eth_link_speed;
2613 }
2614
2615 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2616                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2617                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2618                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2619
2620 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2621 {
2622         uint32_t one_speed;
2623
2624         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2625                 return 0;
2626
2627         if (link_speed & ETH_LINK_SPEED_FIXED) {
2628                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2629
2630                 if (one_speed & (one_speed - 1)) {
2631                         PMD_DRV_LOG(ERR,
2632                                 "Invalid advertised speeds (%u) for port %u\n",
2633                                 link_speed, port_id);
2634                         return -EINVAL;
2635                 }
2636                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2637                         PMD_DRV_LOG(ERR,
2638                                 "Unsupported advertised speed (%u) for port %u\n",
2639                                 link_speed, port_id);
2640                         return -EINVAL;
2641                 }
2642         } else {
2643                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2644                         PMD_DRV_LOG(ERR,
2645                                 "Unsupported advertised speeds (%u) for port %u\n",
2646                                 link_speed, port_id);
2647                         return -EINVAL;
2648                 }
2649         }
2650         return 0;
2651 }
2652
2653 static uint16_t
2654 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2655 {
2656         uint16_t ret = 0;
2657
2658         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2659                 if (bp->link_info.support_speeds)
2660                         return bp->link_info.support_speeds;
2661                 link_speed = BNXT_SUPPORTED_SPEEDS;
2662         }
2663
2664         if (link_speed & ETH_LINK_SPEED_100M)
2665                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2666         if (link_speed & ETH_LINK_SPEED_100M_HD)
2667                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2668         if (link_speed & ETH_LINK_SPEED_1G)
2669                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2670         if (link_speed & ETH_LINK_SPEED_2_5G)
2671                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2672         if (link_speed & ETH_LINK_SPEED_10G)
2673                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2674         if (link_speed & ETH_LINK_SPEED_20G)
2675                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2676         if (link_speed & ETH_LINK_SPEED_25G)
2677                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2678         if (link_speed & ETH_LINK_SPEED_40G)
2679                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2680         if (link_speed & ETH_LINK_SPEED_50G)
2681                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2682         if (link_speed & ETH_LINK_SPEED_100G)
2683                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2684         return ret;
2685 }
2686
2687 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2688 {
2689         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2690
2691         switch (hw_link_speed) {
2692         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2693                 eth_link_speed = ETH_SPEED_NUM_100M;
2694                 break;
2695         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2696                 eth_link_speed = ETH_SPEED_NUM_1G;
2697                 break;
2698         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2699                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2700                 break;
2701         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2702                 eth_link_speed = ETH_SPEED_NUM_10G;
2703                 break;
2704         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2705                 eth_link_speed = ETH_SPEED_NUM_20G;
2706                 break;
2707         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2708                 eth_link_speed = ETH_SPEED_NUM_25G;
2709                 break;
2710         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2711                 eth_link_speed = ETH_SPEED_NUM_40G;
2712                 break;
2713         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2714                 eth_link_speed = ETH_SPEED_NUM_50G;
2715                 break;
2716         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2717                 eth_link_speed = ETH_SPEED_NUM_100G;
2718                 break;
2719         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2720         default:
2721                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2722                         hw_link_speed);
2723                 break;
2724         }
2725         return eth_link_speed;
2726 }
2727
2728 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2729 {
2730         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2731
2732         switch (hw_link_duplex) {
2733         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2734         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2735                 /* FALLTHROUGH */
2736                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2737                 break;
2738         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2739                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2740                 break;
2741         default:
2742                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2743                         hw_link_duplex);
2744                 break;
2745         }
2746         return eth_link_duplex;
2747 }
2748
2749 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2750 {
2751         int rc = 0;
2752         struct bnxt_link_info *link_info = &bp->link_info;
2753
2754         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2755         if (rc) {
2756                 PMD_DRV_LOG(ERR,
2757                         "Get link config failed with rc %d\n", rc);
2758                 goto exit;
2759         }
2760         if (link_info->link_speed)
2761                 link->link_speed =
2762                         bnxt_parse_hw_link_speed(link_info->link_speed);
2763         else
2764                 link->link_speed = ETH_SPEED_NUM_NONE;
2765         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2766         link->link_status = link_info->link_up;
2767         link->link_autoneg = link_info->auto_mode ==
2768                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2769                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2770 exit:
2771         return rc;
2772 }
2773
2774 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2775 {
2776         int rc = 0;
2777         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2778         struct bnxt_link_info link_req;
2779         uint16_t speed, autoneg;
2780
2781         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2782                 return 0;
2783
2784         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2785                         bp->eth_dev->data->port_id);
2786         if (rc)
2787                 goto error;
2788
2789         memset(&link_req, 0, sizeof(link_req));
2790         link_req.link_up = link_up;
2791         if (!link_up)
2792                 goto port_phy_cfg;
2793
2794         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2795         if (BNXT_CHIP_THOR(bp) &&
2796             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2797                 /* 40G is not supported as part of media auto detect.
2798                  * The speed should be forced and autoneg disabled
2799                  * to configure 40G speed.
2800                  */
2801                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2802                 autoneg = 0;
2803         }
2804
2805         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2806         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2807         /* Autoneg can be done only when the FW allows.
2808          * When user configures fixed speed of 40G and later changes to
2809          * any other speed, auto_link_speed/force_link_speed is still set
2810          * to 40G until link comes up at new speed.
2811          */
2812         if (autoneg == 1 &&
2813             !(!BNXT_CHIP_THOR(bp) &&
2814               (bp->link_info.auto_link_speed ||
2815                bp->link_info.force_link_speed))) {
2816                 link_req.phy_flags |=
2817                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2818                 link_req.auto_link_speed_mask =
2819                         bnxt_parse_eth_link_speed_mask(bp,
2820                                                        dev_conf->link_speeds);
2821         } else {
2822                 if (bp->link_info.phy_type ==
2823                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2824                     bp->link_info.phy_type ==
2825                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2826                     bp->link_info.media_type ==
2827                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2828                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2829                         return -EINVAL;
2830                 }
2831
2832                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2833                 /* If user wants a particular speed try that first. */
2834                 if (speed)
2835                         link_req.link_speed = speed;
2836                 else if (bp->link_info.force_link_speed)
2837                         link_req.link_speed = bp->link_info.force_link_speed;
2838                 else
2839                         link_req.link_speed = bp->link_info.auto_link_speed;
2840         }
2841         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2842         link_req.auto_pause = bp->link_info.auto_pause;
2843         link_req.force_pause = bp->link_info.force_pause;
2844
2845 port_phy_cfg:
2846         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2847         if (rc) {
2848                 PMD_DRV_LOG(ERR,
2849                         "Set link config failed with rc %d\n", rc);
2850         }
2851
2852 error:
2853         return rc;
2854 }
2855
2856 /* JIRA 22088 */
2857 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2858 {
2859         struct hwrm_func_qcfg_input req = {0};
2860         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2861         uint16_t flags;
2862         int rc = 0;
2863
2864         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2865         req.fid = rte_cpu_to_le_16(0xffff);
2866
2867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2868
2869         HWRM_CHECK_RESULT();
2870
2871         /* Hard Coded.. 0xfff VLAN ID mask */
2872         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2873         flags = rte_le_to_cpu_16(resp->flags);
2874         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2875                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2876
2877         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2878                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2879                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2880         } else if (BNXT_VF(bp) &&
2881                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2882                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2883                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2884         }
2885
2886         if (mtu)
2887                 *mtu = resp->mtu;
2888
2889         switch (resp->port_partition_type) {
2890         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2891         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2892         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2893                 /* FALLTHROUGH */
2894                 bp->port_partition_type = resp->port_partition_type;
2895                 break;
2896         default:
2897                 bp->port_partition_type = 0;
2898                 break;
2899         }
2900
2901         HWRM_UNLOCK();
2902
2903         return rc;
2904 }
2905
2906 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2907                                    struct hwrm_func_qcaps_output *qcaps)
2908 {
2909         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2910         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2911                sizeof(qcaps->mac_address));
2912         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2913         qcaps->max_rx_rings = fcfg->num_rx_rings;
2914         qcaps->max_tx_rings = fcfg->num_tx_rings;
2915         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2916         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2917         qcaps->max_vfs = 0;
2918         qcaps->first_vf_id = 0;
2919         qcaps->max_vnics = fcfg->num_vnics;
2920         qcaps->max_decap_records = 0;
2921         qcaps->max_encap_records = 0;
2922         qcaps->max_tx_wm_flows = 0;
2923         qcaps->max_tx_em_flows = 0;
2924         qcaps->max_rx_wm_flows = 0;
2925         qcaps->max_rx_em_flows = 0;
2926         qcaps->max_flow_id = 0;
2927         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2928         qcaps->max_sp_tx_rings = 0;
2929         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2930 }
2931
2932 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2933 {
2934         struct hwrm_func_cfg_input req = {0};
2935         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2936         uint32_t enables;
2937         int rc;
2938
2939         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2940                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2941                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2942                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2943                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2944                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2945                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2946                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2947                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2948
2949         if (BNXT_HAS_RING_GRPS(bp)) {
2950                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2951                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2952         } else if (BNXT_HAS_NQ(bp)) {
2953                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2954                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2955         }
2956
2957         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2958         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2959         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2960                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2961                                    BNXT_NUM_VLANS);
2962         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2963         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2964         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2965         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2966         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2967         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2968         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2969         req.fid = rte_cpu_to_le_16(0xffff);
2970         req.enables = rte_cpu_to_le_32(enables);
2971
2972         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2973
2974         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2975
2976         HWRM_CHECK_RESULT();
2977         HWRM_UNLOCK();
2978
2979         return rc;
2980 }
2981
2982 static void populate_vf_func_cfg_req(struct bnxt *bp,
2983                                      struct hwrm_func_cfg_input *req,
2984                                      int num_vfs)
2985 {
2986         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2987                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2988                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2989                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2990                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2991                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2992                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2993                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2994                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2995                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2996
2997         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2998                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2999                                     BNXT_NUM_VLANS);
3000         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3001                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3002                                     BNXT_NUM_VLANS);
3003         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3004                                                 (num_vfs + 1));
3005         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3006         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3007                                                (num_vfs + 1));
3008         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3009         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3010         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3011         /* TODO: For now, do not support VMDq/RFS on VFs. */
3012         req->num_vnics = rte_cpu_to_le_16(1);
3013         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3014                                                  (num_vfs + 1));
3015 }
3016
3017 static void add_random_mac_if_needed(struct bnxt *bp,
3018                                      struct hwrm_func_cfg_input *cfg_req,
3019                                      int vf)
3020 {
3021         struct rte_ether_addr mac;
3022
3023         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3024                 return;
3025
3026         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3027                 cfg_req->enables |=
3028                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3029                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3030                 bp->pf.vf_info[vf].random_mac = true;
3031         } else {
3032                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3033                         RTE_ETHER_ADDR_LEN);
3034         }
3035 }
3036
3037 static void reserve_resources_from_vf(struct bnxt *bp,
3038                                       struct hwrm_func_cfg_input *cfg_req,
3039                                       int vf)
3040 {
3041         struct hwrm_func_qcaps_input req = {0};
3042         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3043         int rc;
3044
3045         /* Get the actual allocated values now */
3046         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3047         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3049
3050         if (rc) {
3051                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3052                 copy_func_cfg_to_qcaps(cfg_req, resp);
3053         } else if (resp->error_code) {
3054                 rc = rte_le_to_cpu_16(resp->error_code);
3055                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3056                 copy_func_cfg_to_qcaps(cfg_req, resp);
3057         }
3058
3059         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3060         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3061         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3062         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3063         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3064         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3065         /*
3066          * TODO: While not supporting VMDq with VFs, max_vnics is always
3067          * forced to 1 in this case
3068          */
3069         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3070         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3071
3072         HWRM_UNLOCK();
3073 }
3074
3075 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3076 {
3077         struct hwrm_func_qcfg_input req = {0};
3078         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3079         int rc;
3080
3081         /* Check for zero MAC address */
3082         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3083         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3084         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3085         HWRM_CHECK_RESULT();
3086         rc = rte_le_to_cpu_16(resp->vlan);
3087
3088         HWRM_UNLOCK();
3089
3090         return rc;
3091 }
3092
3093 static int update_pf_resource_max(struct bnxt *bp)
3094 {
3095         struct hwrm_func_qcfg_input req = {0};
3096         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3097         int rc;
3098
3099         /* And copy the allocated numbers into the pf struct */
3100         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3101         req.fid = rte_cpu_to_le_16(0xffff);
3102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3103         HWRM_CHECK_RESULT();
3104
3105         /* Only TX ring value reflects actual allocation? TODO */
3106         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3107         bp->pf.evb_mode = resp->evb_mode;
3108
3109         HWRM_UNLOCK();
3110
3111         return rc;
3112 }
3113
3114 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3115 {
3116         int rc;
3117
3118         if (!BNXT_PF(bp)) {
3119                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3120                 return -EINVAL;
3121         }
3122
3123         rc = bnxt_hwrm_func_qcaps(bp);
3124         if (rc)
3125                 return rc;
3126
3127         bp->pf.func_cfg_flags &=
3128                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3129                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3130         bp->pf.func_cfg_flags |=
3131                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3132         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3133         rc = __bnxt_hwrm_func_qcaps(bp);
3134         return rc;
3135 }
3136
3137 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3138 {
3139         struct hwrm_func_cfg_input req = {0};
3140         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3141         int i;
3142         size_t sz;
3143         int rc = 0;
3144         size_t req_buf_sz;
3145
3146         if (!BNXT_PF(bp)) {
3147                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3148                 return -EINVAL;
3149         }
3150
3151         rc = bnxt_hwrm_func_qcaps(bp);
3152
3153         if (rc)
3154                 return rc;
3155
3156         bp->pf.active_vfs = num_vfs;
3157
3158         /*
3159          * First, configure the PF to only use one TX ring.  This ensures that
3160          * there are enough rings for all VFs.
3161          *
3162          * If we don't do this, when we call func_alloc() later, we will lock
3163          * extra rings to the PF that won't be available during func_cfg() of
3164          * the VFs.
3165          *
3166          * This has been fixed with firmware versions above 20.6.54
3167          */
3168         bp->pf.func_cfg_flags &=
3169                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3170                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3171         bp->pf.func_cfg_flags |=
3172                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3173         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3174         if (rc)
3175                 return rc;
3176
3177         /*
3178          * Now, create and register a buffer to hold forwarded VF requests
3179          */
3180         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3181         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3182                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3183         if (bp->pf.vf_req_buf == NULL) {
3184                 rc = -ENOMEM;
3185                 goto error_free;
3186         }
3187         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3188                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3189         for (i = 0; i < num_vfs; i++)
3190                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3191                                         (i * HWRM_MAX_REQ_LEN);
3192
3193         rc = bnxt_hwrm_func_buf_rgtr(bp);
3194         if (rc)
3195                 goto error_free;
3196
3197         populate_vf_func_cfg_req(bp, &req, num_vfs);
3198
3199         bp->pf.active_vfs = 0;
3200         for (i = 0; i < num_vfs; i++) {
3201                 add_random_mac_if_needed(bp, &req, i);
3202
3203                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3204                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3205                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3206                 rc = bnxt_hwrm_send_message(bp,
3207                                             &req,
3208                                             sizeof(req),
3209                                             BNXT_USE_CHIMP_MB);
3210
3211                 /* Clear enable flag for next pass */
3212                 req.enables &= ~rte_cpu_to_le_32(
3213                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3214
3215                 if (rc || resp->error_code) {
3216                         PMD_DRV_LOG(ERR,
3217                                 "Failed to initizlie VF %d\n", i);
3218                         PMD_DRV_LOG(ERR,
3219                                 "Not all VFs available. (%d, %d)\n",
3220                                 rc, resp->error_code);
3221                         HWRM_UNLOCK();
3222                         break;
3223                 }
3224
3225                 HWRM_UNLOCK();
3226
3227                 reserve_resources_from_vf(bp, &req, i);
3228                 bp->pf.active_vfs++;
3229                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3230         }
3231
3232         /*
3233          * Now configure the PF to use "the rest" of the resources
3234          * We're using STD_TX_RING_MODE here though which will limit the TX
3235          * rings.  This will allow QoS to function properly.  Not setting this
3236          * will cause PF rings to break bandwidth settings.
3237          */
3238         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3239         if (rc)
3240                 goto error_free;
3241
3242         rc = update_pf_resource_max(bp);
3243         if (rc)
3244                 goto error_free;
3245
3246         return rc;
3247
3248 error_free:
3249         bnxt_hwrm_func_buf_unrgtr(bp);
3250         return rc;
3251 }
3252
3253 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3254 {
3255         struct hwrm_func_cfg_input req = {0};
3256         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3257         int rc;
3258
3259         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3260
3261         req.fid = rte_cpu_to_le_16(0xffff);
3262         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3263         req.evb_mode = bp->pf.evb_mode;
3264
3265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3266         HWRM_CHECK_RESULT();
3267         HWRM_UNLOCK();
3268
3269         return rc;
3270 }
3271
3272 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3273                                 uint8_t tunnel_type)
3274 {
3275         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3276         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3277         int rc = 0;
3278
3279         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3280         req.tunnel_type = tunnel_type;
3281         req.tunnel_dst_port_val = port;
3282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3283         HWRM_CHECK_RESULT();
3284
3285         switch (tunnel_type) {
3286         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3287                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3288                 bp->vxlan_port = port;
3289                 break;
3290         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3291                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3292                 bp->geneve_port = port;
3293                 break;
3294         default:
3295                 break;
3296         }
3297
3298         HWRM_UNLOCK();
3299
3300         return rc;
3301 }
3302
3303 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3304                                 uint8_t tunnel_type)
3305 {
3306         struct hwrm_tunnel_dst_port_free_input req = {0};
3307         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3308         int rc = 0;
3309
3310         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3311
3312         req.tunnel_type = tunnel_type;
3313         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3314         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3315
3316         HWRM_CHECK_RESULT();
3317         HWRM_UNLOCK();
3318
3319         return rc;
3320 }
3321
3322 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3323                                         uint32_t flags)
3324 {
3325         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3326         struct hwrm_func_cfg_input req = {0};
3327         int rc;
3328
3329         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3330
3331         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3332         req.flags = rte_cpu_to_le_32(flags);
3333         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3334
3335         HWRM_CHECK_RESULT();
3336         HWRM_UNLOCK();
3337
3338         return rc;
3339 }
3340
3341 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3342 {
3343         uint32_t *flag = flagp;
3344
3345         vnic->flags = *flag;
3346 }
3347
3348 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3349 {
3350         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3351 }
3352
3353 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3354 {
3355         int rc = 0;
3356         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3357         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3358
3359         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3360
3361         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3362         req.req_buf_page_size = rte_cpu_to_le_16(
3363                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3364         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3365         req.req_buf_page_addr0 =
3366                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3367         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3368                 PMD_DRV_LOG(ERR,
3369                         "unable to map buffer address to physical memory\n");
3370                 return -ENOMEM;
3371         }
3372
3373         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3374
3375         HWRM_CHECK_RESULT();
3376         HWRM_UNLOCK();
3377
3378         return rc;
3379 }
3380
3381 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3382 {
3383         int rc = 0;
3384         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3385         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3386
3387         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3388                 return 0;
3389
3390         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3391
3392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3393
3394         HWRM_CHECK_RESULT();
3395         HWRM_UNLOCK();
3396
3397         return rc;
3398 }
3399
3400 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3401 {
3402         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3403         struct hwrm_func_cfg_input req = {0};
3404         int rc;
3405
3406         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3407
3408         req.fid = rte_cpu_to_le_16(0xffff);
3409         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3410         req.enables = rte_cpu_to_le_32(
3411                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3412         req.async_event_cr = rte_cpu_to_le_16(
3413                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3414         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3415
3416         HWRM_CHECK_RESULT();
3417         HWRM_UNLOCK();
3418
3419         return rc;
3420 }
3421
3422 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3423 {
3424         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3425         struct hwrm_func_vf_cfg_input req = {0};
3426         int rc;
3427
3428         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3429
3430         req.enables = rte_cpu_to_le_32(
3431                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3432         req.async_event_cr = rte_cpu_to_le_16(
3433                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3434         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3435
3436         HWRM_CHECK_RESULT();
3437         HWRM_UNLOCK();
3438
3439         return rc;
3440 }
3441
3442 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3443 {
3444         struct hwrm_func_cfg_input req = {0};
3445         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3446         uint16_t dflt_vlan, fid;
3447         uint32_t func_cfg_flags;
3448         int rc = 0;
3449
3450         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3451
3452         if (is_vf) {
3453                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3454                 fid = bp->pf.vf_info[vf].fid;
3455                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3456         } else {
3457                 fid = rte_cpu_to_le_16(0xffff);
3458                 func_cfg_flags = bp->pf.func_cfg_flags;
3459                 dflt_vlan = bp->vlan;
3460         }
3461
3462         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3463         req.fid = rte_cpu_to_le_16(fid);
3464         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3465         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3466
3467         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3468
3469         HWRM_CHECK_RESULT();
3470         HWRM_UNLOCK();
3471
3472         return rc;
3473 }
3474
3475 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3476                         uint16_t max_bw, uint16_t enables)
3477 {
3478         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3479         struct hwrm_func_cfg_input req = {0};
3480         int rc;
3481
3482         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3483
3484         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3485         req.enables |= rte_cpu_to_le_32(enables);
3486         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3487         req.max_bw = rte_cpu_to_le_32(max_bw);
3488         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3489
3490         HWRM_CHECK_RESULT();
3491         HWRM_UNLOCK();
3492
3493         return rc;
3494 }
3495
3496 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3497 {
3498         struct hwrm_func_cfg_input req = {0};
3499         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3500         int rc = 0;
3501
3502         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3503
3504         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3505         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3506         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3507         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3508
3509         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3510
3511         HWRM_CHECK_RESULT();
3512         HWRM_UNLOCK();
3513
3514         return rc;
3515 }
3516
3517 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3518 {
3519         int rc;
3520
3521         if (BNXT_PF(bp))
3522                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3523         else
3524                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3525
3526         return rc;
3527 }
3528
3529 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3530                               void *encaped, size_t ec_size)
3531 {
3532         int rc = 0;
3533         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3534         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3535
3536         if (ec_size > sizeof(req.encap_request))
3537                 return -1;
3538
3539         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3540
3541         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3542         memcpy(req.encap_request, encaped, ec_size);
3543
3544         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3545
3546         HWRM_CHECK_RESULT();
3547         HWRM_UNLOCK();
3548
3549         return rc;
3550 }
3551
3552 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3553                                        struct rte_ether_addr *mac)
3554 {
3555         struct hwrm_func_qcfg_input req = {0};
3556         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3557         int rc;
3558
3559         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3560
3561         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3562         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3563
3564         HWRM_CHECK_RESULT();
3565
3566         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3567
3568         HWRM_UNLOCK();
3569
3570         return rc;
3571 }
3572
3573 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3574                             void *encaped, size_t ec_size)
3575 {
3576         int rc = 0;
3577         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3578         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3579
3580         if (ec_size > sizeof(req.encap_request))
3581                 return -1;
3582
3583         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3584
3585         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3586         memcpy(req.encap_request, encaped, ec_size);
3587
3588         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3589
3590         HWRM_CHECK_RESULT();
3591         HWRM_UNLOCK();
3592
3593         return rc;
3594 }
3595
3596 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3597                          struct rte_eth_stats *stats, uint8_t rx)
3598 {
3599         int rc = 0;
3600         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3601         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3602
3603         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3604
3605         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3606
3607         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3608
3609         HWRM_CHECK_RESULT();
3610
3611         if (rx) {
3612                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3613                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3614                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3615                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3616                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3617                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3618                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3619                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3620         } else {
3621                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3622                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3623                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3624                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3625                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3626                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3627         }
3628
3629
3630         HWRM_UNLOCK();
3631
3632         return rc;
3633 }
3634
3635 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3636 {
3637         struct hwrm_port_qstats_input req = {0};
3638         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3639         struct bnxt_pf_info *pf = &bp->pf;
3640         int rc;
3641
3642         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3643
3644         req.port_id = rte_cpu_to_le_16(pf->port_id);
3645         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3646         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3648
3649         HWRM_CHECK_RESULT();
3650         HWRM_UNLOCK();
3651
3652         return rc;
3653 }
3654
3655 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3656 {
3657         struct hwrm_port_clr_stats_input req = {0};
3658         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3659         struct bnxt_pf_info *pf = &bp->pf;
3660         int rc;
3661
3662         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3663         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3664             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3665                 return 0;
3666
3667         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3668
3669         req.port_id = rte_cpu_to_le_16(pf->port_id);
3670         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3671
3672         HWRM_CHECK_RESULT();
3673         HWRM_UNLOCK();
3674
3675         return rc;
3676 }
3677
3678 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3679 {
3680         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3681         struct hwrm_port_led_qcaps_input req = {0};
3682         int rc;
3683
3684         if (BNXT_VF(bp))
3685                 return 0;
3686
3687         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3688         req.port_id = bp->pf.port_id;
3689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3690
3691         HWRM_CHECK_RESULT();
3692
3693         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3694                 unsigned int i;
3695
3696                 bp->num_leds = resp->num_leds;
3697                 memcpy(bp->leds, &resp->led0_id,
3698                         sizeof(bp->leds[0]) * bp->num_leds);
3699                 for (i = 0; i < bp->num_leds; i++) {
3700                         struct bnxt_led_info *led = &bp->leds[i];
3701
3702                         uint16_t caps = led->led_state_caps;
3703
3704                         if (!led->led_group_id ||
3705                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3706                                 bp->num_leds = 0;
3707                                 break;
3708                         }
3709                 }
3710         }
3711
3712         HWRM_UNLOCK();
3713
3714         return rc;
3715 }
3716
3717 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3718 {
3719         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3720         struct hwrm_port_led_cfg_input req = {0};
3721         struct bnxt_led_cfg *led_cfg;
3722         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3723         uint16_t duration = 0;
3724         int rc, i;
3725
3726         if (!bp->num_leds || BNXT_VF(bp))
3727                 return -EOPNOTSUPP;
3728
3729         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3730
3731         if (led_on) {
3732                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3733                 duration = rte_cpu_to_le_16(500);
3734         }
3735         req.port_id = bp->pf.port_id;
3736         req.num_leds = bp->num_leds;
3737         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3738         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3739                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3740                 led_cfg->led_id = bp->leds[i].led_id;
3741                 led_cfg->led_state = led_state;
3742                 led_cfg->led_blink_on = duration;
3743                 led_cfg->led_blink_off = duration;
3744                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3745         }
3746
3747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3748
3749         HWRM_CHECK_RESULT();
3750         HWRM_UNLOCK();
3751
3752         return rc;
3753 }
3754
3755 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3756                                uint32_t *length)
3757 {
3758         int rc;
3759         struct hwrm_nvm_get_dir_info_input req = {0};
3760         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3761
3762         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3763
3764         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3765
3766         HWRM_CHECK_RESULT();
3767
3768         *entries = rte_le_to_cpu_32(resp->entries);
3769         *length = rte_le_to_cpu_32(resp->entry_length);
3770
3771         HWRM_UNLOCK();
3772         return rc;
3773 }
3774
3775 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3776 {
3777         int rc;
3778         uint32_t dir_entries;
3779         uint32_t entry_length;
3780         uint8_t *buf;
3781         size_t buflen;
3782         rte_iova_t dma_handle;
3783         struct hwrm_nvm_get_dir_entries_input req = {0};
3784         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3785
3786         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3787         if (rc != 0)
3788                 return rc;
3789
3790         *data++ = dir_entries;
3791         *data++ = entry_length;
3792         len -= 2;
3793         memset(data, 0xff, len);
3794
3795         buflen = dir_entries * entry_length;
3796         buf = rte_malloc("nvm_dir", buflen, 0);
3797         rte_mem_lock_page(buf);
3798         if (buf == NULL)
3799                 return -ENOMEM;
3800         dma_handle = rte_mem_virt2iova(buf);
3801         if (dma_handle == RTE_BAD_IOVA) {
3802                 PMD_DRV_LOG(ERR,
3803                         "unable to map response address to physical memory\n");
3804                 return -ENOMEM;
3805         }
3806         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3807         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3808         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3809
3810         if (rc == 0)
3811                 memcpy(data, buf, len > buflen ? buflen : len);
3812
3813         rte_free(buf);
3814         HWRM_CHECK_RESULT();
3815         HWRM_UNLOCK();
3816
3817         return rc;
3818 }
3819
3820 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3821                              uint32_t offset, uint32_t length,
3822                              uint8_t *data)
3823 {
3824         int rc;
3825         uint8_t *buf;
3826         rte_iova_t dma_handle;
3827         struct hwrm_nvm_read_input req = {0};
3828         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3829
3830         buf = rte_malloc("nvm_item", length, 0);
3831         rte_mem_lock_page(buf);
3832         if (!buf)
3833                 return -ENOMEM;
3834
3835         dma_handle = rte_mem_virt2iova(buf);
3836         if (dma_handle == RTE_BAD_IOVA) {
3837                 PMD_DRV_LOG(ERR,
3838                         "unable to map response address to physical memory\n");
3839                 return -ENOMEM;
3840         }
3841         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3842         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3843         req.dir_idx = rte_cpu_to_le_16(index);
3844         req.offset = rte_cpu_to_le_32(offset);
3845         req.len = rte_cpu_to_le_32(length);
3846         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3847         if (rc == 0)
3848                 memcpy(data, buf, length);
3849
3850         rte_free(buf);
3851         HWRM_CHECK_RESULT();
3852         HWRM_UNLOCK();
3853
3854         return rc;
3855 }
3856
3857 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3858 {
3859         int rc;
3860         struct hwrm_nvm_erase_dir_entry_input req = {0};
3861         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3862
3863         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3864         req.dir_idx = rte_cpu_to_le_16(index);
3865         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3866         HWRM_CHECK_RESULT();
3867         HWRM_UNLOCK();
3868
3869         return rc;
3870 }
3871
3872
3873 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3874                           uint16_t dir_ordinal, uint16_t dir_ext,
3875                           uint16_t dir_attr, const uint8_t *data,
3876                           size_t data_len)
3877 {
3878         int rc;
3879         struct hwrm_nvm_write_input req = {0};
3880         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3881         rte_iova_t dma_handle;
3882         uint8_t *buf;
3883
3884         buf = rte_malloc("nvm_write", data_len, 0);
3885         rte_mem_lock_page(buf);
3886         if (!buf)
3887                 return -ENOMEM;
3888
3889         dma_handle = rte_mem_virt2iova(buf);
3890         if (dma_handle == RTE_BAD_IOVA) {
3891                 PMD_DRV_LOG(ERR,
3892                         "unable to map response address to physical memory\n");
3893                 return -ENOMEM;
3894         }
3895         memcpy(buf, data, data_len);
3896
3897         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3898
3899         req.dir_type = rte_cpu_to_le_16(dir_type);
3900         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3901         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3902         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3903         req.dir_data_length = rte_cpu_to_le_32(data_len);
3904         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3905
3906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3907
3908         rte_free(buf);
3909         HWRM_CHECK_RESULT();
3910         HWRM_UNLOCK();
3911
3912         return rc;
3913 }
3914
3915 static void
3916 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3917 {
3918         uint32_t *count = cbdata;
3919
3920         *count = *count + 1;
3921 }
3922
3923 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3924                                      struct bnxt_vnic_info *vnic __rte_unused)
3925 {
3926         return 0;
3927 }
3928
3929 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3930 {
3931         uint32_t count = 0;
3932
3933         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3934             &count, bnxt_vnic_count_hwrm_stub);
3935
3936         return count;
3937 }
3938
3939 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3940                                         uint16_t *vnic_ids)
3941 {
3942         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3943         struct hwrm_func_vf_vnic_ids_query_output *resp =
3944                                                 bp->hwrm_cmd_resp_addr;
3945         int rc;
3946
3947         /* First query all VNIC ids */
3948         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3949
3950         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3951         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3952         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3953
3954         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3955                 HWRM_UNLOCK();
3956                 PMD_DRV_LOG(ERR,
3957                 "unable to map VNIC ID table address to physical memory\n");
3958                 return -ENOMEM;
3959         }
3960         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3961         HWRM_CHECK_RESULT();
3962         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3963
3964         HWRM_UNLOCK();
3965
3966         return rc;
3967 }
3968
3969 /*
3970  * This function queries the VNIC IDs  for a specified VF. It then calls
3971  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3972  * Then it calls the hwrm_cb function to program this new vnic configuration.
3973  */
3974 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3975         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3976         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3977 {
3978         struct bnxt_vnic_info vnic;
3979         int rc = 0;
3980         int i, num_vnic_ids;
3981         uint16_t *vnic_ids;
3982         size_t vnic_id_sz;
3983         size_t sz;
3984
3985         /* First query all VNIC ids */
3986         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3987         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3988                         RTE_CACHE_LINE_SIZE);
3989         if (vnic_ids == NULL)
3990                 return -ENOMEM;
3991
3992         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3993                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3994
3995         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3996
3997         if (num_vnic_ids < 0)
3998                 return num_vnic_ids;
3999
4000         /* Retrieve VNIC, update bd_stall then update */
4001
4002         for (i = 0; i < num_vnic_ids; i++) {
4003                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4004                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4005                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
4006                 if (rc)
4007                         break;
4008                 if (vnic.mru <= 4)      /* Indicates unallocated */
4009                         continue;
4010
4011                 vnic_cb(&vnic, cbdata);
4012
4013                 rc = hwrm_cb(bp, &vnic);
4014                 if (rc)
4015                         break;
4016         }
4017
4018         rte_free(vnic_ids);
4019
4020         return rc;
4021 }
4022
4023 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4024                                               bool on)
4025 {
4026         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4027         struct hwrm_func_cfg_input req = {0};
4028         int rc;
4029
4030         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
4031
4032         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
4033         req.enables |= rte_cpu_to_le_32(
4034                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4035         req.vlan_antispoof_mode = on ?
4036                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4037                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4039
4040         HWRM_CHECK_RESULT();
4041         HWRM_UNLOCK();
4042
4043         return rc;
4044 }
4045
4046 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4047 {
4048         struct bnxt_vnic_info vnic;
4049         uint16_t *vnic_ids;
4050         size_t vnic_id_sz;
4051         int num_vnic_ids, i;
4052         size_t sz;
4053         int rc;
4054
4055         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
4056         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4057                         RTE_CACHE_LINE_SIZE);
4058         if (vnic_ids == NULL)
4059                 return -ENOMEM;
4060
4061         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4062                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4063
4064         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4065         if (rc <= 0)
4066                 goto exit;
4067         num_vnic_ids = rc;
4068
4069         /*
4070          * Loop through to find the default VNIC ID.
4071          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4072          * by sending the hwrm_func_qcfg command to the firmware.
4073          */
4074         for (i = 0; i < num_vnic_ids; i++) {
4075                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4076                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4077                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4078                                         bp->pf.first_vf_id + vf);
4079                 if (rc)
4080                         goto exit;
4081                 if (vnic.func_default) {
4082                         rte_free(vnic_ids);
4083                         return vnic.fw_vnic_id;
4084                 }
4085         }
4086         /* Could not find a default VNIC. */
4087         PMD_DRV_LOG(ERR, "No default VNIC\n");
4088 exit:
4089         rte_free(vnic_ids);
4090         return rc;
4091 }
4092
4093 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4094                          uint16_t dst_id,
4095                          struct bnxt_filter_info *filter)
4096 {
4097         int rc = 0;
4098         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4099         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4100         uint32_t enables = 0;
4101
4102         if (filter->fw_em_filter_id != UINT64_MAX)
4103                 bnxt_hwrm_clear_em_filter(bp, filter);
4104
4105         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4106
4107         req.flags = rte_cpu_to_le_32(filter->flags);
4108
4109         enables = filter->enables |
4110               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4111         req.dst_id = rte_cpu_to_le_16(dst_id);
4112
4113         if (filter->ip_addr_type) {
4114                 req.ip_addr_type = filter->ip_addr_type;
4115                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4116         }
4117         if (enables &
4118             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4119                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4120         if (enables &
4121             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4122                 memcpy(req.src_macaddr, filter->src_macaddr,
4123                        RTE_ETHER_ADDR_LEN);
4124         if (enables &
4125             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4126                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4127                        RTE_ETHER_ADDR_LEN);
4128         if (enables &
4129             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4130                 req.ovlan_vid = filter->l2_ovlan;
4131         if (enables &
4132             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4133                 req.ivlan_vid = filter->l2_ivlan;
4134         if (enables &
4135             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4136                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4137         if (enables &
4138             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4139                 req.ip_protocol = filter->ip_protocol;
4140         if (enables &
4141             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4142                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4143         if (enables &
4144             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4145                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4146         if (enables &
4147             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4148                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4149         if (enables &
4150             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4151                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4152         if (enables &
4153             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4154                 req.mirror_vnic_id = filter->mirror_vnic_id;
4155
4156         req.enables = rte_cpu_to_le_32(enables);
4157
4158         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4159
4160         HWRM_CHECK_RESULT();
4161
4162         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4163         HWRM_UNLOCK();
4164
4165         return rc;
4166 }
4167
4168 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4169 {
4170         int rc = 0;
4171         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4172         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4173
4174         if (filter->fw_em_filter_id == UINT64_MAX)
4175                 return 0;
4176
4177         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4178         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4179
4180         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4181
4182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4183
4184         HWRM_CHECK_RESULT();
4185         HWRM_UNLOCK();
4186
4187         filter->fw_em_filter_id = UINT64_MAX;
4188         filter->fw_l2_filter_id = UINT64_MAX;
4189
4190         return 0;
4191 }
4192
4193 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4194                          uint16_t dst_id,
4195                          struct bnxt_filter_info *filter)
4196 {
4197         int rc = 0;
4198         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4199         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4200                                                 bp->hwrm_cmd_resp_addr;
4201         uint32_t enables = 0;
4202
4203         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4204                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4205
4206         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4207
4208         req.flags = rte_cpu_to_le_32(filter->flags);
4209
4210         enables = filter->enables |
4211               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4212         req.dst_id = rte_cpu_to_le_16(dst_id);
4213
4214
4215         if (filter->ip_addr_type) {
4216                 req.ip_addr_type = filter->ip_addr_type;
4217                 enables |=
4218                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4219         }
4220         if (enables &
4221             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4222                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4223         if (enables &
4224             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4225                 memcpy(req.src_macaddr, filter->src_macaddr,
4226                        RTE_ETHER_ADDR_LEN);
4227         //if (enables &
4228             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4229                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4230                        //RTE_ETHER_ADDR_LEN);
4231         if (enables &
4232             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4233                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4234         if (enables &
4235             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4236                 req.ip_protocol = filter->ip_protocol;
4237         if (enables &
4238             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4239                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4240         if (enables &
4241             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4242                 req.src_ipaddr_mask[0] =
4243                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4244         if (enables &
4245             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4246                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4247         if (enables &
4248             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4249                 req.dst_ipaddr_mask[0] =
4250                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4251         if (enables &
4252             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4253                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4254         if (enables &
4255             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4256                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4257         if (enables &
4258             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4259                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4260         if (enables &
4261             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4262                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4263         if (enables &
4264             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4265                 req.mirror_vnic_id = filter->mirror_vnic_id;
4266
4267         req.enables = rte_cpu_to_le_32(enables);
4268
4269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4270
4271         HWRM_CHECK_RESULT();
4272
4273         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4274         HWRM_UNLOCK();
4275
4276         return rc;
4277 }
4278
4279 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4280                                 struct bnxt_filter_info *filter)
4281 {
4282         int rc = 0;
4283         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4284         struct hwrm_cfa_ntuple_filter_free_output *resp =
4285                                                 bp->hwrm_cmd_resp_addr;
4286
4287         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4288                 return 0;
4289
4290         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4291
4292         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4293
4294         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4295
4296         HWRM_CHECK_RESULT();
4297         HWRM_UNLOCK();
4298
4299         filter->fw_ntuple_filter_id = UINT64_MAX;
4300
4301         return 0;
4302 }
4303
4304 static int
4305 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4306 {
4307         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4308         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4309         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4310         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4311         uint16_t *ring_tbl = vnic->rss_table;
4312         int nr_ctxs = vnic->num_lb_ctxts;
4313         int max_rings = bp->rx_nr_rings;
4314         int i, j, k, cnt;
4315         int rc = 0;
4316
4317         for (i = 0, k = 0; i < nr_ctxs; i++) {
4318                 struct bnxt_rx_ring_info *rxr;
4319                 struct bnxt_cp_ring_info *cpr;
4320
4321                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4322
4323                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4324                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4325                 req.hash_mode_flags = vnic->hash_mode;
4326
4327                 req.ring_grp_tbl_addr =
4328                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4329                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4330                                      2 * sizeof(*ring_tbl));
4331                 req.hash_key_tbl_addr =
4332                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4333
4334                 req.ring_table_pair_index = i;
4335                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4336
4337                 for (j = 0; j < 64; j++) {
4338                         uint16_t ring_id;
4339
4340                         /* Find next active ring. */
4341                         for (cnt = 0; cnt < max_rings; cnt++) {
4342                                 if (rx_queue_state[k] !=
4343                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4344                                         break;
4345                                 if (++k == max_rings)
4346                                         k = 0;
4347                         }
4348
4349                         /* Return if no rings are active. */
4350                         if (cnt == max_rings)
4351                                 return 0;
4352
4353                         /* Add rx/cp ring pair to RSS table. */
4354                         rxr = rxqs[k]->rx_ring;
4355                         cpr = rxqs[k]->cp_ring;
4356
4357                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4358                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4359                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4360                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4361
4362                         if (++k == max_rings)
4363                                 k = 0;
4364                 }
4365                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4366                                             BNXT_USE_CHIMP_MB);
4367
4368                 HWRM_CHECK_RESULT();
4369                 HWRM_UNLOCK();
4370         }
4371
4372         return rc;
4373 }
4374
4375 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4376 {
4377         unsigned int rss_idx, fw_idx, i;
4378
4379         if (!(vnic->rss_table && vnic->hash_type))
4380                 return 0;
4381
4382         if (BNXT_CHIP_THOR(bp))
4383                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4384
4385         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4386                 return 0;
4387
4388         if (vnic->rss_table && vnic->hash_type) {
4389                 /*
4390                  * Fill the RSS hash & redirection table with
4391                  * ring group ids for all VNICs
4392                  */
4393                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4394                         rss_idx++, fw_idx++) {
4395                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4396                                 fw_idx %= bp->rx_cp_nr_rings;
4397                                 if (vnic->fw_grp_ids[fw_idx] !=
4398                                     INVALID_HW_RING_ID)
4399                                         break;
4400                                 fw_idx++;
4401                         }
4402                         if (i == bp->rx_cp_nr_rings)
4403                                 return 0;
4404                         vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4405                 }
4406                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4407         }
4408
4409         return 0;
4410 }
4411
4412 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4413         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4414 {
4415         uint16_t flags;
4416
4417         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4418
4419         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4420         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4421
4422         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4423         req->num_cmpl_dma_aggr_during_int =
4424                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4425
4426         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4427
4428         /* min timer set to 1/2 of interrupt timer */
4429         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4430
4431         /* buf timer set to 1/4 of interrupt timer */
4432         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4433
4434         req->cmpl_aggr_dma_tmr_during_int =
4435                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4436
4437         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4438                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4439         req->flags = rte_cpu_to_le_16(flags);
4440 }
4441
4442 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4443                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4444 {
4445         struct hwrm_ring_aggint_qcaps_input req = {0};
4446         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4447         uint32_t enables;
4448         uint16_t flags;
4449         int rc;
4450
4451         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4453         HWRM_CHECK_RESULT();
4454
4455         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4456         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4457
4458         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4459                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4460         agg_req->flags = rte_cpu_to_le_16(flags);
4461         enables =
4462          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4463          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4464         agg_req->enables = rte_cpu_to_le_32(enables);
4465
4466         HWRM_UNLOCK();
4467         return rc;
4468 }
4469
4470 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4471                         struct bnxt_coal *coal, uint16_t ring_id)
4472 {
4473         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4474         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4475                                                 bp->hwrm_cmd_resp_addr;
4476         int rc;
4477
4478         /* Set ring coalesce parameters only for 100G NICs */
4479         if (BNXT_CHIP_THOR(bp)) {
4480                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4481                         return -1;
4482         } else if (bnxt_stratus_device(bp)) {
4483                 bnxt_hwrm_set_coal_params(coal, &req);
4484         } else {
4485                 return 0;
4486         }
4487
4488         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4489         req.ring_id = rte_cpu_to_le_16(ring_id);
4490         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4491         HWRM_CHECK_RESULT();
4492         HWRM_UNLOCK();
4493         return 0;
4494 }
4495
4496 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4497 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4498 {
4499         struct hwrm_func_backing_store_qcaps_input req = {0};
4500         struct hwrm_func_backing_store_qcaps_output *resp =
4501                 bp->hwrm_cmd_resp_addr;
4502         struct bnxt_ctx_pg_info *ctx_pg;
4503         struct bnxt_ctx_mem_info *ctx;
4504         int total_alloc_len;
4505         int rc, i;
4506
4507         if (!BNXT_CHIP_THOR(bp) ||
4508             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4509             BNXT_VF(bp) ||
4510             bp->ctx)
4511                 return 0;
4512
4513         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4514         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4515         HWRM_CHECK_RESULT_SILENT();
4516
4517         total_alloc_len = sizeof(*ctx);
4518         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4519                           RTE_CACHE_LINE_SIZE);
4520         if (!ctx) {
4521                 rc = -ENOMEM;
4522                 goto ctx_err;
4523         }
4524
4525         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4526                             sizeof(*ctx_pg) * BNXT_MAX_Q,
4527                             RTE_CACHE_LINE_SIZE);
4528         if (!ctx_pg) {
4529                 rc = -ENOMEM;
4530                 goto ctx_err;
4531         }
4532         for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4533                 ctx->tqm_mem[i] = ctx_pg;
4534
4535         bp->ctx = ctx;
4536         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4537         ctx->qp_min_qp1_entries =
4538                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4539         ctx->qp_max_l2_entries =
4540                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4541         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4542         ctx->srq_max_l2_entries =
4543                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4544         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4545         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4546         ctx->cq_max_l2_entries =
4547                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4548         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4549         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4550         ctx->vnic_max_vnic_entries =
4551                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4552         ctx->vnic_max_ring_table_entries =
4553                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4554         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4555         ctx->stat_max_entries =
4556                 rte_le_to_cpu_32(resp->stat_max_entries);
4557         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4558         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4559         ctx->tqm_min_entries_per_ring =
4560                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4561         ctx->tqm_max_entries_per_ring =
4562                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4563         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4564         if (!ctx->tqm_entries_multiple)
4565                 ctx->tqm_entries_multiple = 1;
4566         ctx->mrav_max_entries =
4567                 rte_le_to_cpu_32(resp->mrav_max_entries);
4568         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4569         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4570         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4571 ctx_err:
4572         HWRM_UNLOCK();
4573         return rc;
4574 }
4575
4576 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4577 {
4578         struct hwrm_func_backing_store_cfg_input req = {0};
4579         struct hwrm_func_backing_store_cfg_output *resp =
4580                 bp->hwrm_cmd_resp_addr;
4581         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4582         struct bnxt_ctx_pg_info *ctx_pg;
4583         uint32_t *num_entries;
4584         uint64_t *pg_dir;
4585         uint8_t *pg_attr;
4586         uint32_t ena;
4587         int i, rc;
4588
4589         if (!ctx)
4590                 return 0;
4591
4592         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4593         req.enables = rte_cpu_to_le_32(enables);
4594
4595         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4596                 ctx_pg = &ctx->qp_mem;
4597                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4598                 req.qp_num_qp1_entries =
4599                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4600                 req.qp_num_l2_entries =
4601                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4602                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4603                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4604                                       &req.qpc_pg_size_qpc_lvl,
4605                                       &req.qpc_page_dir);
4606         }
4607
4608         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4609                 ctx_pg = &ctx->srq_mem;
4610                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4611                 req.srq_num_l2_entries =
4612                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4613                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4614                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4615                                       &req.srq_pg_size_srq_lvl,
4616                                       &req.srq_page_dir);
4617         }
4618
4619         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4620                 ctx_pg = &ctx->cq_mem;
4621                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4622                 req.cq_num_l2_entries =
4623                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4624                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4625                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4626                                       &req.cq_pg_size_cq_lvl,
4627                                       &req.cq_page_dir);
4628         }
4629
4630         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4631                 ctx_pg = &ctx->vnic_mem;
4632                 req.vnic_num_vnic_entries =
4633                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4634                 req.vnic_num_ring_table_entries =
4635                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4636                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4637                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4638                                       &req.vnic_pg_size_vnic_lvl,
4639                                       &req.vnic_page_dir);
4640         }
4641
4642         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4643                 ctx_pg = &ctx->stat_mem;
4644                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4645                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4646                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4647                                       &req.stat_pg_size_stat_lvl,
4648                                       &req.stat_page_dir);
4649         }
4650
4651         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4652         num_entries = &req.tqm_sp_num_entries;
4653         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4654         pg_dir = &req.tqm_sp_page_dir;
4655         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4656         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4657                 if (!(enables & ena))
4658                         continue;
4659
4660                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4661
4662                 ctx_pg = ctx->tqm_mem[i];
4663                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4664                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4665         }
4666
4667         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4668         HWRM_CHECK_RESULT();
4669         HWRM_UNLOCK();
4670
4671         return rc;
4672 }
4673
4674 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4675 {
4676         struct hwrm_port_qstats_ext_input req = {0};
4677         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4678         struct bnxt_pf_info *pf = &bp->pf;
4679         int rc;
4680
4681         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4682               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4683                 return 0;
4684
4685         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4686
4687         req.port_id = rte_cpu_to_le_16(pf->port_id);
4688         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4689                 req.tx_stat_host_addr =
4690                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4691                 req.tx_stat_size =
4692                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4693         }
4694         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4695                 req.rx_stat_host_addr =
4696                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4697                 req.rx_stat_size =
4698                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4699         }
4700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4701
4702         if (rc) {
4703                 bp->fw_rx_port_stats_ext_size = 0;
4704                 bp->fw_tx_port_stats_ext_size = 0;
4705         } else {
4706                 bp->fw_rx_port_stats_ext_size =
4707                         rte_le_to_cpu_16(resp->rx_stat_size);
4708                 bp->fw_tx_port_stats_ext_size =
4709                         rte_le_to_cpu_16(resp->tx_stat_size);
4710         }
4711
4712         HWRM_CHECK_RESULT();
4713         HWRM_UNLOCK();
4714
4715         return rc;
4716 }
4717
4718 int
4719 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4720 {
4721         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4722         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4723                 bp->hwrm_cmd_resp_addr;
4724         int rc = 0;
4725
4726         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4727         req.tunnel_type = type;
4728         req.dest_fid = bp->fw_fid;
4729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4730         HWRM_CHECK_RESULT();
4731
4732         HWRM_UNLOCK();
4733
4734         return rc;
4735 }
4736
4737 int
4738 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4739 {
4740         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4741         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4742                 bp->hwrm_cmd_resp_addr;
4743         int rc = 0;
4744
4745         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4746         req.tunnel_type = type;
4747         req.dest_fid = bp->fw_fid;
4748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4749         HWRM_CHECK_RESULT();
4750
4751         HWRM_UNLOCK();
4752
4753         return rc;
4754 }
4755
4756 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4757 {
4758         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4759         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4760                 bp->hwrm_cmd_resp_addr;
4761         int rc = 0;
4762
4763         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4764         req.src_fid = bp->fw_fid;
4765         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4766         HWRM_CHECK_RESULT();
4767
4768         if (type)
4769                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4770
4771         HWRM_UNLOCK();
4772
4773         return rc;
4774 }
4775
4776 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4777                                    uint16_t *dst_fid)
4778 {
4779         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4780         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4781                 bp->hwrm_cmd_resp_addr;
4782         int rc = 0;
4783
4784         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4785         req.src_fid = bp->fw_fid;
4786         req.tunnel_type = tun_type;
4787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4788         HWRM_CHECK_RESULT();
4789
4790         if (dst_fid)
4791                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4792
4793         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4794
4795         HWRM_UNLOCK();
4796
4797         return rc;
4798 }
4799
4800 int bnxt_hwrm_set_mac(struct bnxt *bp)
4801 {
4802         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4803         struct hwrm_func_vf_cfg_input req = {0};
4804         int rc = 0;
4805
4806         if (!BNXT_VF(bp))
4807                 return 0;
4808
4809         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4810
4811         req.enables =
4812                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4813         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4814
4815         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4816
4817         HWRM_CHECK_RESULT();
4818
4819         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4820         HWRM_UNLOCK();
4821
4822         return rc;
4823 }
4824
4825 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4826 {
4827         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4828         struct hwrm_func_drv_if_change_input req = {0};
4829         uint32_t flags;
4830         int rc;
4831
4832         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4833                 return 0;
4834
4835         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4836          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4837          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4838          */
4839         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4840                 return 0;
4841
4842         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4843
4844         if (up)
4845                 req.flags =
4846                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4847
4848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4849
4850         HWRM_CHECK_RESULT();
4851         flags = rte_le_to_cpu_32(resp->flags);
4852         HWRM_UNLOCK();
4853
4854         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4855                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4856                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4857         }
4858
4859         return 0;
4860 }
4861
4862 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4863 {
4864         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4865         struct bnxt_error_recovery_info *info = bp->recovery_info;
4866         struct hwrm_error_recovery_qcfg_input req = {0};
4867         uint32_t flags = 0;
4868         unsigned int i;
4869         int rc;
4870
4871         /* Older FW does not have error recovery support */
4872         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4873                 return 0;
4874
4875         if (!info) {
4876                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4877                                    sizeof(*info), 0);
4878                 bp->recovery_info = info;
4879                 if (info == NULL)
4880                         return -ENOMEM;
4881         } else {
4882                 memset(info, 0, sizeof(*info));
4883         }
4884
4885         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4886
4887         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4888
4889         HWRM_CHECK_RESULT();
4890
4891         flags = rte_le_to_cpu_32(resp->flags);
4892         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4893                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4894         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4895                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4896
4897         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4898             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4899                 rc = -EINVAL;
4900                 goto err;
4901         }
4902
4903         /* FW returned values are in units of 100msec */
4904         info->driver_polling_freq =
4905                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4906         info->master_func_wait_period =
4907                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4908         info->normal_func_wait_period =
4909                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4910         info->master_func_wait_period_after_reset =
4911                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4912         info->max_bailout_time_after_reset =
4913                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4914         info->status_regs[BNXT_FW_STATUS_REG] =
4915                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4916         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4917                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4918         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4919                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4920         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4921                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4922         info->reg_array_cnt =
4923                 rte_le_to_cpu_32(resp->reg_array_cnt);
4924
4925         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4926                 rc = -EINVAL;
4927                 goto err;
4928         }
4929
4930         for (i = 0; i < info->reg_array_cnt; i++) {
4931                 info->reset_reg[i] =
4932                         rte_le_to_cpu_32(resp->reset_reg[i]);
4933                 info->reset_reg_val[i] =
4934                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4935                 info->delay_after_reset[i] =
4936                         resp->delay_after_reset[i];
4937         }
4938 err:
4939         HWRM_UNLOCK();
4940
4941         /* Map the FW status registers */
4942         if (!rc)
4943                 rc = bnxt_map_fw_health_status_regs(bp);
4944
4945         if (rc) {
4946                 rte_free(bp->recovery_info);
4947                 bp->recovery_info = NULL;
4948         }
4949         return rc;
4950 }
4951
4952 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4953 {
4954         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4955         struct hwrm_fw_reset_input req = {0};
4956         int rc;
4957
4958         if (!BNXT_PF(bp))
4959                 return -EOPNOTSUPP;
4960
4961         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4962
4963         req.embedded_proc_type =
4964                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4965         req.selfrst_status =
4966                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4967         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4968
4969         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4970                                     BNXT_USE_KONG(bp));
4971
4972         HWRM_CHECK_RESULT();
4973         HWRM_UNLOCK();
4974
4975         return rc;
4976 }
4977
4978 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
4979 {
4980         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
4981         struct hwrm_port_ts_query_input req = {0};
4982         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4983         uint32_t flags = 0;
4984         int rc;
4985
4986         if (!ptp)
4987                 return 0;
4988
4989         HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
4990
4991         switch (path) {
4992         case BNXT_PTP_FLAGS_PATH_TX:
4993                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
4994                 break;
4995         case BNXT_PTP_FLAGS_PATH_RX:
4996                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
4997                 break;
4998         case BNXT_PTP_FLAGS_CURRENT_TIME:
4999                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5000                 break;
5001         }
5002
5003         req.flags = rte_cpu_to_le_32(flags);
5004         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
5005
5006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5007
5008         HWRM_CHECK_RESULT();
5009
5010         if (timestamp) {
5011                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5012                 *timestamp |=
5013                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5014         }
5015         HWRM_UNLOCK();
5016
5017         return rc;
5018 }
5019
5020 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5021 {
5022         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5023                                         bp->hwrm_cmd_resp_addr;
5024         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5025         uint32_t flags = 0;
5026         int rc = 0;
5027
5028         if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
5029                 return rc;
5030
5031         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5032                 PMD_DRV_LOG(DEBUG,
5033                             "Not a PF or trusted VF. Command not supported\n");
5034                 return 0;
5035         }
5036
5037         HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
5038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5039
5040         HWRM_CHECK_RESULT();
5041         flags = rte_le_to_cpu_32(resp->flags);
5042         HWRM_UNLOCK();
5043
5044         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
5045                 bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
5046                 PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
5047         }
5048
5049         return rc;
5050 }