16f2c2ccc9ee8a4d8834d02c377ee537693b9784
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                   uint32_t msg_len, bool use_kong_mb)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83         uint16_t bar_offset = use_kong_mb ?
84                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85         uint16_t mb_trigger_offset = use_kong_mb ?
86                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
87
88         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
89                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
90
91                 memset(short_cmd_req, 0, bp->max_req_len);
92                 memcpy(short_cmd_req, req, msg_len);
93
94                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
95                 short_input.signature = rte_cpu_to_le_16(
96                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
97                 short_input.size = rte_cpu_to_le_16(msg_len);
98                 short_input.req_addr =
99                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
100
101                 data = (uint32_t *)&short_input;
102                 msg_len = sizeof(short_input);
103
104                 /* Sync memory write before updating doorbell */
105                 rte_wmb();
106
107                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
108         }
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 /*
153  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
154  * spinlock, and does initial processing.
155  *
156  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
157  * releases the spinlock only if it returns.  If the regular int return codes
158  * are not used by the function, HWRM_CHECK_RESULT() should not be used
159  * directly, rather it should be copied and modified to suit the function.
160  *
161  * HWRM_UNLOCK() must be called after all response processing is completed.
162  */
163 #define HWRM_PREP(req, type, kong) do { \
164         rte_spinlock_lock(&bp->hwrm_lock); \
165         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167         req.cmpl_ring = rte_cpu_to_le_16(-1); \
168         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170         req.target_id = rte_cpu_to_le_16(0xffff); \
171         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
172 } while (0)
173
174 #define HWRM_CHECK_RESULT_SILENT() do {\
175         if (rc) { \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179         if (resp->error_code) { \
180                 rc = rte_le_to_cpu_16(resp->error_code); \
181                 rte_spinlock_unlock(&bp->hwrm_lock); \
182                 return rc; \
183         } \
184 } while (0)
185
186 #define HWRM_CHECK_RESULT() do {\
187         if (rc) { \
188                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
191                         rc = -EACCES; \
192                 else if (rc > 0) \
193                         rc = -EINVAL; \
194                 return rc; \
195         } \
196         if (resp->error_code) { \
197                 rc = rte_le_to_cpu_16(resp->error_code); \
198                 if (resp->resp_len >= 16) { \
199                         struct hwrm_err_output *tmp_hwrm_err_op = \
200                                                 (void *)resp; \
201                         PMD_DRV_LOG(ERR, \
202                                 "error %d:%d:%08x:%04x\n", \
203                                 rc, tmp_hwrm_err_op->cmd_err, \
204                                 rte_le_to_cpu_32(\
205                                         tmp_hwrm_err_op->opaque_0), \
206                                 rte_le_to_cpu_16(\
207                                         tmp_hwrm_err_op->opaque_1)); \
208                 } else { \
209                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
210                 } \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
213                         rc = -EACCES; \
214                 else if (rc > 0) \
215                         rc = -EINVAL; \
216                 return rc; \
217         } \
218 } while (0)
219
220 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
221
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
223 {
224         int rc = 0;
225         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230         req.mask = 0;
231
232         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
233
234         HWRM_CHECK_RESULT();
235         HWRM_UNLOCK();
236
237         return rc;
238 }
239
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241                                  struct bnxt_vnic_info *vnic,
242                                  uint16_t vlan_count,
243                                  struct bnxt_vlan_table_entry *vlan_table)
244 {
245         int rc = 0;
246         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
248         uint32_t mask = 0;
249
250         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
251                 return rc;
252
253         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
255
256         /* FIXME add multicast flag, when multicast adding options is supported
257          * by ethtool.
258          */
259         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269         if (vnic->mc_addr_cnt) {
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
273         }
274         if (vlan_table) {
275                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278                          rte_mem_virt2iova(vlan_table));
279                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
280         }
281         req.mask = rte_cpu_to_le_32(mask);
282
283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
284
285         HWRM_CHECK_RESULT();
286         HWRM_UNLOCK();
287
288         return rc;
289 }
290
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
292                         uint16_t vlan_count,
293                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
294 {
295         int rc = 0;
296         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298                                                 bp->hwrm_cmd_resp_addr;
299
300         /*
301          * Older HWRM versions did not support this command, and the set_rx_mask
302          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303          * removed from set_rx_mask call, and this command was added.
304          *
305          * This command is also present from 1.7.8.11 and higher,
306          * as well as 1.7.8.0
307          */
308         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
311                                         (11)))
312                                 return 0;
313                 }
314         }
315         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316         req.fid = rte_cpu_to_le_16(fid);
317
318         req.vlan_tag_mask_tbl_addr =
319                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
321
322         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
323
324         HWRM_CHECK_RESULT();
325         HWRM_UNLOCK();
326
327         return rc;
328 }
329
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331                            struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
336
337         if (filter->fw_l2_filter_id == UINT64_MAX)
338                 return 0;
339
340         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
341
342         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
343
344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
345
346         HWRM_CHECK_RESULT();
347         HWRM_UNLOCK();
348
349         filter->fw_l2_filter_id = UINT64_MAX;
350
351         return 0;
352 }
353
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
355                          uint16_t dst_id,
356                          struct bnxt_filter_info *filter)
357 {
358         int rc = 0;
359         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362         const struct rte_eth_vmdq_rx_conf *conf =
363                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
364         uint32_t enables = 0;
365         uint16_t j = dst_id - 1;
366
367         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369             conf->pool_map[j].pools & (1UL << j)) {
370                 PMD_DRV_LOG(DEBUG,
371                         "Add vlan %u to vmdq pool %u\n",
372                         conf->pool_map[j].vlan_id, j);
373
374                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
375                 filter->enables |=
376                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
378         }
379
380         if (filter->fw_l2_filter_id != UINT64_MAX)
381                 bnxt_hwrm_clear_l2_filter(bp, filter);
382
383         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
384
385         req.flags = rte_cpu_to_le_32(filter->flags);
386
387         enables = filter->enables |
388               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
389         req.dst_id = rte_cpu_to_le_16(dst_id);
390
391         if (enables &
392             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
393                 memcpy(req.l2_addr, filter->l2_addr,
394                        ETHER_ADDR_LEN);
395         if (enables &
396             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
397                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
398                        ETHER_ADDR_LEN);
399         if (enables &
400             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
401                 req.l2_ovlan = filter->l2_ovlan;
402         if (enables &
403             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
404                 req.l2_ivlan = filter->l2_ivlan;
405         if (enables &
406             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
407                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
408         if (enables &
409             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
410                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
411         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
412                 req.src_id = rte_cpu_to_le_32(filter->src_id);
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
414                 req.src_type = filter->src_type;
415
416         req.enables = rte_cpu_to_le_32(enables);
417
418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
419
420         HWRM_CHECK_RESULT();
421
422         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
423         HWRM_UNLOCK();
424
425         return rc;
426 }
427
428 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
429 {
430         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
431         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
432         uint32_t flags = 0;
433         int rc;
434
435         if (!ptp)
436                 return 0;
437
438         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
439
440         if (ptp->rx_filter)
441                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
442         else
443                 flags |=
444                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
445         if (ptp->tx_tstamp_en)
446                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
447         else
448                 flags |=
449                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables = rte_cpu_to_le_32
452                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
535                 new_max_vfs = bp->pdev->max_vfs;
536                 if (new_max_vfs != bp->pf.max_vfs) {
537                         if (bp->pf.vf_info)
538                                 rte_free(bp->pf.vf_info);
539                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
540                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
541                         bp->pf.max_vfs = new_max_vfs;
542                         for (i = 0; i < new_max_vfs; i++) {
543                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
544                                 bp->pf.vf_info[i].vlan_table =
545                                         rte_zmalloc("VF VLAN table",
546                                                     getpagesize(),
547                                                     getpagesize());
548                                 if (bp->pf.vf_info[i].vlan_table == NULL)
549                                         PMD_DRV_LOG(ERR,
550                                         "Fail to alloc VLAN table for VF %d\n",
551                                         i);
552                                 else
553                                         rte_mem_lock_page(
554                                                 bp->pf.vf_info[i].vlan_table);
555                                 bp->pf.vf_info[i].vlan_as_table =
556                                         rte_zmalloc("VF VLAN AS table",
557                                                     getpagesize(),
558                                                     getpagesize());
559                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
560                                         PMD_DRV_LOG(ERR,
561                                         "Alloc VLAN AS table for VF %d fail\n",
562                                         i);
563                                 else
564                                         rte_mem_lock_page(
565                                                bp->pf.vf_info[i].vlan_as_table);
566                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
567                         }
568                 }
569         }
570
571         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
572         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
573         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
574         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
575         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
576         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
577         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
578         /* TODO: For now, do not support VMDq/RFS on VFs. */
579         if (BNXT_PF(bp)) {
580                 if (bp->pf.max_vfs)
581                         bp->max_vnics = 1;
582                 else
583                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
584         } else {
585                 bp->max_vnics = 1;
586         }
587         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
588         if (BNXT_PF(bp)) {
589                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
590                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
591                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
592                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
593                         HWRM_UNLOCK();
594                         bnxt_hwrm_ptp_qcfg(bp);
595                 }
596         }
597
598         HWRM_UNLOCK();
599
600         return rc;
601 }
602
603 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
604 {
605         int rc;
606
607         rc = __bnxt_hwrm_func_qcaps(bp);
608         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
609                 rc = bnxt_hwrm_func_resc_qcaps(bp);
610                 if (!rc)
611                         bp->flags |= BNXT_FLAG_NEW_RM;
612         }
613
614         return rc;
615 }
616
617 int bnxt_hwrm_func_reset(struct bnxt *bp)
618 {
619         int rc = 0;
620         struct hwrm_func_reset_input req = {.req_type = 0 };
621         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
622
623         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
624
625         req.enables = rte_cpu_to_le_32(0);
626
627         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
628
629         HWRM_CHECK_RESULT();
630         HWRM_UNLOCK();
631
632         return rc;
633 }
634
635 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
636 {
637         int rc;
638         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
639         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
640
641         if (bp->flags & BNXT_FLAG_REGISTERED)
642                 return 0;
643
644         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
645         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
646                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
647         req.ver_maj = RTE_VER_YEAR;
648         req.ver_min = RTE_VER_MONTH;
649         req.ver_upd = RTE_VER_MINOR;
650
651         if (BNXT_PF(bp)) {
652                 req.enables |= rte_cpu_to_le_32(
653                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
654                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
655                        RTE_MIN(sizeof(req.vf_req_fwd),
656                                sizeof(bp->pf.vf_req_fwd)));
657
658                 /*
659                  * PF can sniff HWRM API issued by VF. This can be set up by
660                  * linux driver and inherited by the DPDK PF driver. Clear
661                  * this HWRM sniffer list in FW because DPDK PF driver does
662                  * not support this.
663                  */
664                 req.flags =
665                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
666         }
667
668         req.async_event_fwd[0] |=
669                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
670                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
671                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
672         req.async_event_fwd[1] |=
673                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
674                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
675
676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
677
678         HWRM_CHECK_RESULT();
679         HWRM_UNLOCK();
680
681         bp->flags |= BNXT_FLAG_REGISTERED;
682
683         return rc;
684 }
685
686 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
687 {
688         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
689                 return 0;
690
691         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
692 }
693
694 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
695 {
696         int rc;
697         uint32_t flags = 0;
698         uint32_t enables;
699         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
700         struct hwrm_func_vf_cfg_input req = {0};
701
702         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
703
704         req.enables = rte_cpu_to_le_32
705                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
706                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
707                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
708                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
709                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
710                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
711
712         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
713         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
714                                             AGG_RING_MULTIPLIER);
715         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
716         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
717                                               bp->tx_nr_rings);
718         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
719         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
720         if (bp->vf_resv_strategy ==
721             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
722                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
723                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
724                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
725                 req.enables |= rte_cpu_to_le_32(enables);
726                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
727                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
728                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
729         }
730
731         if (test)
732                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
733                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
734                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
735                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
736                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
737                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
738
739         req.flags = rte_cpu_to_le_32(flags);
740
741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
742
743         if (test)
744                 HWRM_CHECK_RESULT_SILENT();
745         else
746                 HWRM_CHECK_RESULT();
747
748         HWRM_UNLOCK();
749         return rc;
750 }
751
752 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
753 {
754         int rc;
755         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
756         struct hwrm_func_resource_qcaps_input req = {0};
757
758         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
759         req.fid = rte_cpu_to_le_16(0xffff);
760
761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
762
763         HWRM_CHECK_RESULT();
764
765         if (BNXT_VF(bp)) {
766                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
767                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
768                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
769                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
770                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
771                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
772                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
773                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
774         }
775         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
776         if (bp->vf_resv_strategy >
777             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
778                 bp->vf_resv_strategy =
779                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
780
781         HWRM_UNLOCK();
782         return rc;
783 }
784
785 int bnxt_hwrm_ver_get(struct bnxt *bp)
786 {
787         int rc = 0;
788         struct hwrm_ver_get_input req = {.req_type = 0 };
789         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
790         uint32_t my_version;
791         uint32_t fw_version;
792         uint16_t max_resp_len;
793         char type[RTE_MEMZONE_NAMESIZE];
794         uint32_t dev_caps_cfg;
795
796         bp->max_req_len = HWRM_MAX_REQ_LEN;
797         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
798
799         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
800         req.hwrm_intf_min = HWRM_VERSION_MINOR;
801         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
802
803         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
804
805         HWRM_CHECK_RESULT();
806
807         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
808                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
809                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
810                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
811         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
812                      (resp->hwrm_fw_min_8b << 16) |
813                      (resp->hwrm_fw_bld_8b << 8) |
814                      resp->hwrm_fw_rsvd_8b;
815         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
816                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
817
818         my_version = HWRM_VERSION_MAJOR << 16;
819         my_version |= HWRM_VERSION_MINOR << 8;
820         my_version |= HWRM_VERSION_UPDATE;
821
822         fw_version = resp->hwrm_intf_maj_8b << 16;
823         fw_version |= resp->hwrm_intf_min_8b << 8;
824         fw_version |= resp->hwrm_intf_upd_8b;
825         bp->hwrm_spec_code = fw_version;
826
827         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
828                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
829                 rc = -EINVAL;
830                 goto error;
831         }
832
833         if (my_version != fw_version) {
834                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
835                 if (my_version < fw_version) {
836                         PMD_DRV_LOG(INFO,
837                                 "Firmware API version is newer than driver.\n");
838                         PMD_DRV_LOG(INFO,
839                                 "The driver may be missing features.\n");
840                 } else {
841                         PMD_DRV_LOG(INFO,
842                                 "Firmware API version is older than driver.\n");
843                         PMD_DRV_LOG(INFO,
844                                 "Not all driver features may be functional.\n");
845                 }
846         }
847
848         if (bp->max_req_len > resp->max_req_win_len) {
849                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
850                 rc = -EINVAL;
851         }
852         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
853         max_resp_len = resp->max_resp_len;
854         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
855
856         if (bp->max_resp_len != max_resp_len) {
857                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
858                         bp->pdev->addr.domain, bp->pdev->addr.bus,
859                         bp->pdev->addr.devid, bp->pdev->addr.function);
860
861                 rte_free(bp->hwrm_cmd_resp_addr);
862
863                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
864                 if (bp->hwrm_cmd_resp_addr == NULL) {
865                         rc = -ENOMEM;
866                         goto error;
867                 }
868                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
869                 bp->hwrm_cmd_resp_dma_addr =
870                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
871                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
872                         PMD_DRV_LOG(ERR,
873                         "Unable to map response buffer to physical memory.\n");
874                         rc = -ENOMEM;
875                         goto error;
876                 }
877                 bp->max_resp_len = max_resp_len;
878         }
879
880         if ((dev_caps_cfg &
881                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
882             (dev_caps_cfg &
883              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
884                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
885
886                 rte_free(bp->hwrm_short_cmd_req_addr);
887
888                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
889                                                         bp->max_req_len, 0);
890                 if (bp->hwrm_short_cmd_req_addr == NULL) {
891                         rc = -ENOMEM;
892                         goto error;
893                 }
894                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
895                 bp->hwrm_short_cmd_req_dma_addr =
896                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
897                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
898                         rte_free(bp->hwrm_short_cmd_req_addr);
899                         PMD_DRV_LOG(ERR,
900                                 "Unable to map buffer to physical memory.\n");
901                         rc = -ENOMEM;
902                         goto error;
903                 }
904
905                 bp->flags |= BNXT_FLAG_SHORT_CMD;
906         }
907         if (dev_caps_cfg &
908             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
909                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
910                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
911         }
912
913 error:
914         HWRM_UNLOCK();
915         return rc;
916 }
917
918 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
919 {
920         int rc;
921         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
922         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
923
924         if (!(bp->flags & BNXT_FLAG_REGISTERED))
925                 return 0;
926
927         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
928         req.flags = flags;
929
930         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
931
932         HWRM_CHECK_RESULT();
933         HWRM_UNLOCK();
934
935         bp->flags &= ~BNXT_FLAG_REGISTERED;
936
937         return rc;
938 }
939
940 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
941 {
942         int rc = 0;
943         struct hwrm_port_phy_cfg_input req = {0};
944         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
945         uint32_t enables = 0;
946
947         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
948
949         if (conf->link_up) {
950                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
951                 if (bp->link_info.auto_mode && conf->link_speed) {
952                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
953                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
954                 }
955
956                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
957                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
958                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
959                 /*
960                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
961                  * any auto mode, even "none".
962                  */
963                 if (!conf->link_speed) {
964                         /* No speeds specified. Enable AutoNeg - all speeds */
965                         req.auto_mode =
966                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
967                 }
968                 /* AutoNeg - Advertise speeds specified. */
969                 if (conf->auto_link_speed_mask &&
970                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
971                         req.auto_mode =
972                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
973                         req.auto_link_speed_mask =
974                                 conf->auto_link_speed_mask;
975                         enables |=
976                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
977                 }
978
979                 req.auto_duplex = conf->duplex;
980                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
981                 req.auto_pause = conf->auto_pause;
982                 req.force_pause = conf->force_pause;
983                 /* Set force_pause if there is no auto or if there is a force */
984                 if (req.auto_pause && !req.force_pause)
985                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
986                 else
987                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
988
989                 req.enables = rte_cpu_to_le_32(enables);
990         } else {
991                 req.flags =
992                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
993                 PMD_DRV_LOG(INFO, "Force Link Down\n");
994         }
995
996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
997
998         HWRM_CHECK_RESULT();
999         HWRM_UNLOCK();
1000
1001         return rc;
1002 }
1003
1004 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1005                                    struct bnxt_link_info *link_info)
1006 {
1007         int rc = 0;
1008         struct hwrm_port_phy_qcfg_input req = {0};
1009         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1010
1011         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1012
1013         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1014
1015         HWRM_CHECK_RESULT();
1016
1017         link_info->phy_link_status = resp->link;
1018         link_info->link_up =
1019                 (link_info->phy_link_status ==
1020                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1021         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1022         link_info->duplex = resp->duplex_cfg;
1023         link_info->pause = resp->pause;
1024         link_info->auto_pause = resp->auto_pause;
1025         link_info->force_pause = resp->force_pause;
1026         link_info->auto_mode = resp->auto_mode;
1027         link_info->phy_type = resp->phy_type;
1028         link_info->media_type = resp->media_type;
1029
1030         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1031         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1032         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1033         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1034         link_info->phy_ver[0] = resp->phy_maj;
1035         link_info->phy_ver[1] = resp->phy_min;
1036         link_info->phy_ver[2] = resp->phy_bld;
1037
1038         HWRM_UNLOCK();
1039
1040         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1041         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1042         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1043         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1044         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1045                     link_info->auto_link_speed_mask);
1046         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1047                     link_info->force_link_speed);
1048
1049         return rc;
1050 }
1051
1052 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1053 {
1054         int rc = 0;
1055         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1056         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1057         int i;
1058
1059         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1060
1061         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1062         /* HWRM Version >= 1.9.1 */
1063         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1064                 req.drv_qmap_cap =
1065                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1066         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1067
1068         HWRM_CHECK_RESULT();
1069
1070 #define GET_QUEUE_INFO(x) \
1071         bp->cos_queue[x].id = resp->queue_id##x; \
1072         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1073
1074         GET_QUEUE_INFO(0);
1075         GET_QUEUE_INFO(1);
1076         GET_QUEUE_INFO(2);
1077         GET_QUEUE_INFO(3);
1078         GET_QUEUE_INFO(4);
1079         GET_QUEUE_INFO(5);
1080         GET_QUEUE_INFO(6);
1081         GET_QUEUE_INFO(7);
1082
1083         HWRM_UNLOCK();
1084
1085         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1086                 bp->tx_cosq_id = bp->cos_queue[0].id;
1087         } else {
1088                 /* iterate and find the COSq profile to use for Tx */
1089                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1090                         if (bp->cos_queue[i].profile ==
1091                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1092                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1093                                 break;
1094                         }
1095                 }
1096         }
1097         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1098
1099         return rc;
1100 }
1101
1102 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1103                          struct bnxt_ring *ring,
1104                          uint32_t ring_type, uint32_t map_index,
1105                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1106 {
1107         int rc = 0;
1108         uint32_t enables = 0;
1109         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1110         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1111
1112         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1113
1114         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1115         req.fbo = rte_cpu_to_le_32(0);
1116         /* Association of ring index with doorbell index */
1117         req.logical_id = rte_cpu_to_le_16(map_index);
1118         req.length = rte_cpu_to_le_32(ring->ring_size);
1119
1120         switch (ring_type) {
1121         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1122                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1123                 /* FALLTHROUGH */
1124         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1125                 req.ring_type = ring_type;
1126                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1127                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1128                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1129                         enables |=
1130                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1131                 break;
1132         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1133                 req.ring_type = ring_type;
1134                 /*
1135                  * TODO: Some HWRM versions crash with
1136                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1137                  */
1138                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1139                 break;
1140         default:
1141                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1142                         ring_type);
1143                 HWRM_UNLOCK();
1144                 return -1;
1145         }
1146         req.enables = rte_cpu_to_le_32(enables);
1147
1148         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1149
1150         if (rc || resp->error_code) {
1151                 if (rc == 0 && resp->error_code)
1152                         rc = rte_le_to_cpu_16(resp->error_code);
1153                 switch (ring_type) {
1154                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1155                         PMD_DRV_LOG(ERR,
1156                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1157                         HWRM_UNLOCK();
1158                         return rc;
1159                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1160                         PMD_DRV_LOG(ERR,
1161                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1162                         HWRM_UNLOCK();
1163                         return rc;
1164                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1165                         PMD_DRV_LOG(ERR,
1166                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1167                         HWRM_UNLOCK();
1168                         return rc;
1169                 default:
1170                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1171                         HWRM_UNLOCK();
1172                         return rc;
1173                 }
1174         }
1175
1176         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1177         HWRM_UNLOCK();
1178         return rc;
1179 }
1180
1181 int bnxt_hwrm_ring_free(struct bnxt *bp,
1182                         struct bnxt_ring *ring, uint32_t ring_type)
1183 {
1184         int rc;
1185         struct hwrm_ring_free_input req = {.req_type = 0 };
1186         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1187
1188         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1189
1190         req.ring_type = ring_type;
1191         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1192
1193         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1194
1195         if (rc || resp->error_code) {
1196                 if (rc == 0 && resp->error_code)
1197                         rc = rte_le_to_cpu_16(resp->error_code);
1198                 HWRM_UNLOCK();
1199
1200                 switch (ring_type) {
1201                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1202                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1203                                 rc);
1204                         return rc;
1205                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1206                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1207                                 rc);
1208                         return rc;
1209                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1210                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1211                                 rc);
1212                         return rc;
1213                 default:
1214                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1215                         return rc;
1216                 }
1217         }
1218         HWRM_UNLOCK();
1219         return 0;
1220 }
1221
1222 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1223 {
1224         int rc = 0;
1225         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1226         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1227
1228         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1229
1230         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1231         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1232         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1233         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1234
1235         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1236
1237         HWRM_CHECK_RESULT();
1238
1239         bp->grp_info[idx].fw_grp_id =
1240             rte_le_to_cpu_16(resp->ring_group_id);
1241
1242         HWRM_UNLOCK();
1243
1244         return rc;
1245 }
1246
1247 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1248 {
1249         int rc;
1250         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1251         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1252
1253         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1254
1255         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1256
1257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1258
1259         HWRM_CHECK_RESULT();
1260         HWRM_UNLOCK();
1261
1262         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1263         return rc;
1264 }
1265
1266 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1267 {
1268         int rc = 0;
1269         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1270         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1271
1272         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1273                 return rc;
1274
1275         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1276
1277         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1278
1279         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1280
1281         HWRM_CHECK_RESULT();
1282         HWRM_UNLOCK();
1283
1284         return rc;
1285 }
1286
1287 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1288                                 unsigned int idx __rte_unused)
1289 {
1290         int rc;
1291         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1292         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1293
1294         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1295
1296         req.update_period_ms = rte_cpu_to_le_32(0);
1297
1298         req.stats_dma_addr =
1299             rte_cpu_to_le_64(cpr->hw_stats_map);
1300
1301         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1302
1303         HWRM_CHECK_RESULT();
1304
1305         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1306
1307         HWRM_UNLOCK();
1308
1309         return rc;
1310 }
1311
1312 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1313                                 unsigned int idx __rte_unused)
1314 {
1315         int rc;
1316         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1317         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1318
1319         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1320
1321         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1322
1323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1324
1325         HWRM_CHECK_RESULT();
1326         HWRM_UNLOCK();
1327
1328         return rc;
1329 }
1330
1331 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1332 {
1333         int rc = 0, i, j;
1334         struct hwrm_vnic_alloc_input req = { 0 };
1335         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1336
1337         /* map ring groups to this vnic */
1338         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1339                 vnic->start_grp_id, vnic->end_grp_id);
1340         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1341                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1342
1343         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1344         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1345         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1346         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1347         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1348                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1349         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1350
1351         if (vnic->func_default)
1352                 req.flags =
1353                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1354         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1355
1356         HWRM_CHECK_RESULT();
1357
1358         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1359         HWRM_UNLOCK();
1360         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1361         return rc;
1362 }
1363
1364 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1365                                         struct bnxt_vnic_info *vnic,
1366                                         struct bnxt_plcmodes_cfg *pmode)
1367 {
1368         int rc = 0;
1369         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1370         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1371
1372         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1373
1374         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1375
1376         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1377
1378         HWRM_CHECK_RESULT();
1379
1380         pmode->flags = rte_le_to_cpu_32(resp->flags);
1381         /* dflt_vnic bit doesn't exist in the _cfg command */
1382         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1383         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1384         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1385         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1386
1387         HWRM_UNLOCK();
1388
1389         return rc;
1390 }
1391
1392 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1393                                        struct bnxt_vnic_info *vnic,
1394                                        struct bnxt_plcmodes_cfg *pmode)
1395 {
1396         int rc = 0;
1397         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1398         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1399
1400         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1401
1402         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1403         req.flags = rte_cpu_to_le_32(pmode->flags);
1404         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1405         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1406         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1407         req.enables = rte_cpu_to_le_32(
1408             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1409             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1410             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1411         );
1412
1413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1414
1415         HWRM_CHECK_RESULT();
1416         HWRM_UNLOCK();
1417
1418         return rc;
1419 }
1420
1421 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1422 {
1423         int rc = 0;
1424         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1425         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1426         uint32_t ctx_enable_flag = 0;
1427         struct bnxt_plcmodes_cfg pmodes;
1428
1429         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1430                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1431                 return rc;
1432         }
1433
1434         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1435         if (rc)
1436                 return rc;
1437
1438         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1439
1440         /* Only RSS support for now TBD: COS & LB */
1441         req.enables =
1442             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1443         if (vnic->lb_rule != 0xffff)
1444                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1445         if (vnic->cos_rule != 0xffff)
1446                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1447         if (vnic->rss_rule != 0xffff) {
1448                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1449                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1450         }
1451         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1452         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1453         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1454         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1455         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1456         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1457         req.mru = rte_cpu_to_le_16(vnic->mru);
1458         if (vnic->func_default)
1459                 req.flags |=
1460                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1461         if (vnic->vlan_strip)
1462                 req.flags |=
1463                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1464         if (vnic->bd_stall)
1465                 req.flags |=
1466                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1467         if (vnic->roce_dual)
1468                 req.flags |= rte_cpu_to_le_32(
1469                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1470         if (vnic->roce_only)
1471                 req.flags |= rte_cpu_to_le_32(
1472                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1473         if (vnic->rss_dflt_cr)
1474                 req.flags |= rte_cpu_to_le_32(
1475                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1476
1477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1478
1479         HWRM_CHECK_RESULT();
1480         HWRM_UNLOCK();
1481
1482         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1483
1484         return rc;
1485 }
1486
1487 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1488                 int16_t fw_vf_id)
1489 {
1490         int rc = 0;
1491         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1492         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1493
1494         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1495                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1496                 return rc;
1497         }
1498         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1499
1500         req.enables =
1501                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1502         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1503         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1504
1505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1506
1507         HWRM_CHECK_RESULT();
1508
1509         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1510         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1511         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1512         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1513         vnic->mru = rte_le_to_cpu_16(resp->mru);
1514         vnic->func_default = rte_le_to_cpu_32(
1515                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1516         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1517                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1518         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1519                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1520         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1521                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1522         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1523                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1524         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1525                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1526
1527         HWRM_UNLOCK();
1528
1529         return rc;
1530 }
1531
1532 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1533 {
1534         int rc = 0;
1535         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1536         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1537                                                 bp->hwrm_cmd_resp_addr;
1538
1539         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1540
1541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1542
1543         HWRM_CHECK_RESULT();
1544
1545         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1546         HWRM_UNLOCK();
1547         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1548
1549         return rc;
1550 }
1551
1552 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1553 {
1554         int rc = 0;
1555         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1556         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1557                                                 bp->hwrm_cmd_resp_addr;
1558
1559         if (vnic->rss_rule == 0xffff) {
1560                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1561                 return rc;
1562         }
1563         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1564
1565         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1566
1567         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1568
1569         HWRM_CHECK_RESULT();
1570         HWRM_UNLOCK();
1571
1572         vnic->rss_rule = INVALID_HW_RING_ID;
1573
1574         return rc;
1575 }
1576
1577 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1578 {
1579         int rc = 0;
1580         struct hwrm_vnic_free_input req = {.req_type = 0 };
1581         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1582
1583         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1584                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1585                 return rc;
1586         }
1587
1588         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1589
1590         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1591
1592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1593
1594         HWRM_CHECK_RESULT();
1595         HWRM_UNLOCK();
1596
1597         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1598         return rc;
1599 }
1600
1601 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1602                            struct bnxt_vnic_info *vnic)
1603 {
1604         int rc = 0;
1605         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1606         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1607
1608         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1609
1610         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1611         req.hash_mode_flags = vnic->hash_mode;
1612
1613         req.ring_grp_tbl_addr =
1614             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1615         req.hash_key_tbl_addr =
1616             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1617         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1618
1619         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1620
1621         HWRM_CHECK_RESULT();
1622         HWRM_UNLOCK();
1623
1624         return rc;
1625 }
1626
1627 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1628                         struct bnxt_vnic_info *vnic)
1629 {
1630         int rc = 0;
1631         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1632         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1633         uint16_t size;
1634
1635         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1636                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1637                 return rc;
1638         }
1639
1640         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1641
1642         req.flags = rte_cpu_to_le_32(
1643                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1644
1645         req.enables = rte_cpu_to_le_32(
1646                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1647
1648         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1649         size -= RTE_PKTMBUF_HEADROOM;
1650
1651         req.jumbo_thresh = rte_cpu_to_le_16(size);
1652         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1653
1654         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1655
1656         HWRM_CHECK_RESULT();
1657         HWRM_UNLOCK();
1658
1659         return rc;
1660 }
1661
1662 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1663                         struct bnxt_vnic_info *vnic, bool enable)
1664 {
1665         int rc = 0;
1666         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1667         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1668
1669         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1670
1671         if (enable) {
1672                 req.enables = rte_cpu_to_le_32(
1673                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1674                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1675                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1676                 req.flags = rte_cpu_to_le_32(
1677                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1678                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1679                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1680                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1681                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1682                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1683                 req.max_agg_segs = rte_cpu_to_le_16(5);
1684                 req.max_aggs =
1685                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1686                 req.min_agg_len = rte_cpu_to_le_32(512);
1687         }
1688         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1689
1690         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1691
1692         HWRM_CHECK_RESULT();
1693         HWRM_UNLOCK();
1694
1695         return rc;
1696 }
1697
1698 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1699 {
1700         struct hwrm_func_cfg_input req = {0};
1701         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1702         int rc;
1703
1704         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1705         req.enables = rte_cpu_to_le_32(
1706                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1707         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1708         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1709
1710         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1711
1712         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1713         HWRM_CHECK_RESULT();
1714         HWRM_UNLOCK();
1715
1716         bp->pf.vf_info[vf].random_mac = false;
1717
1718         return rc;
1719 }
1720
1721 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1722                                   uint64_t *dropped)
1723 {
1724         int rc = 0;
1725         struct hwrm_func_qstats_input req = {.req_type = 0};
1726         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1727
1728         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1729
1730         req.fid = rte_cpu_to_le_16(fid);
1731
1732         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1733
1734         HWRM_CHECK_RESULT();
1735
1736         if (dropped)
1737                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1738
1739         HWRM_UNLOCK();
1740
1741         return rc;
1742 }
1743
1744 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1745                           struct rte_eth_stats *stats)
1746 {
1747         int rc = 0;
1748         struct hwrm_func_qstats_input req = {.req_type = 0};
1749         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1750
1751         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1752
1753         req.fid = rte_cpu_to_le_16(fid);
1754
1755         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1756
1757         HWRM_CHECK_RESULT();
1758
1759         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1760         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1761         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1762         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1763         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1764         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1765
1766         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1767         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1768         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1769         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1770         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1771         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1772
1773         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1774         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1775         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1776
1777         HWRM_UNLOCK();
1778
1779         return rc;
1780 }
1781
1782 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1783 {
1784         int rc = 0;
1785         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1786         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1787
1788         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1789
1790         req.fid = rte_cpu_to_le_16(fid);
1791
1792         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1793
1794         HWRM_CHECK_RESULT();
1795         HWRM_UNLOCK();
1796
1797         return rc;
1798 }
1799
1800 /*
1801  * HWRM utility functions
1802  */
1803
1804 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1805 {
1806         unsigned int i;
1807         int rc = 0;
1808
1809         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1810                 struct bnxt_tx_queue *txq;
1811                 struct bnxt_rx_queue *rxq;
1812                 struct bnxt_cp_ring_info *cpr;
1813
1814                 if (i >= bp->rx_cp_nr_rings) {
1815                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1816                         cpr = txq->cp_ring;
1817                 } else {
1818                         rxq = bp->rx_queues[i];
1819                         cpr = rxq->cp_ring;
1820                 }
1821
1822                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1823                 if (rc)
1824                         return rc;
1825         }
1826         return 0;
1827 }
1828
1829 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1830 {
1831         int rc;
1832         unsigned int i;
1833         struct bnxt_cp_ring_info *cpr;
1834
1835         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1836
1837                 if (i >= bp->rx_cp_nr_rings) {
1838                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1839                 } else {
1840                         cpr = bp->rx_queues[i]->cp_ring;
1841                         bp->grp_info[i].fw_stats_ctx = -1;
1842                 }
1843                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1844                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1845                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1846                         if (rc)
1847                                 return rc;
1848                 }
1849         }
1850         return 0;
1851 }
1852
1853 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1854 {
1855         unsigned int i;
1856         int rc = 0;
1857
1858         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1859                 struct bnxt_tx_queue *txq;
1860                 struct bnxt_rx_queue *rxq;
1861                 struct bnxt_cp_ring_info *cpr;
1862
1863                 if (i >= bp->rx_cp_nr_rings) {
1864                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1865                         cpr = txq->cp_ring;
1866                 } else {
1867                         rxq = bp->rx_queues[i];
1868                         cpr = rxq->cp_ring;
1869                 }
1870
1871                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1872
1873                 if (rc)
1874                         return rc;
1875         }
1876         return rc;
1877 }
1878
1879 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1880 {
1881         uint16_t idx;
1882         uint32_t rc = 0;
1883
1884         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1885
1886                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1887                         continue;
1888
1889                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1890
1891                 if (rc)
1892                         return rc;
1893         }
1894         return rc;
1895 }
1896
1897 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1898 {
1899         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1900
1901         bnxt_hwrm_ring_free(bp, cp_ring,
1902                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1903         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1904         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1905                         sizeof(*cpr->cp_desc_ring));
1906         cpr->cp_raw_cons = 0;
1907 }
1908
1909 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1910 {
1911         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1912         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1913         struct bnxt_ring *ring = rxr->rx_ring_struct;
1914         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1915
1916         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1917                 bnxt_hwrm_ring_free(bp, ring,
1918                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1919                 ring->fw_ring_id = INVALID_HW_RING_ID;
1920                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1921                 memset(rxr->rx_desc_ring, 0,
1922                        rxr->rx_ring_struct->ring_size *
1923                        sizeof(*rxr->rx_desc_ring));
1924                 memset(rxr->rx_buf_ring, 0,
1925                        rxr->rx_ring_struct->ring_size *
1926                        sizeof(*rxr->rx_buf_ring));
1927                 rxr->rx_prod = 0;
1928         }
1929         ring = rxr->ag_ring_struct;
1930         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1931                 bnxt_hwrm_ring_free(bp, ring,
1932                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1933                 ring->fw_ring_id = INVALID_HW_RING_ID;
1934                 memset(rxr->ag_buf_ring, 0,
1935                        rxr->ag_ring_struct->ring_size *
1936                        sizeof(*rxr->ag_buf_ring));
1937                 rxr->ag_prod = 0;
1938                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1939         }
1940         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1941                 bnxt_free_cp_ring(bp, cpr);
1942
1943         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1944 }
1945
1946 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1947 {
1948         unsigned int i;
1949
1950         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1951                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1952                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1953                 struct bnxt_ring *ring = txr->tx_ring_struct;
1954                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1955
1956                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1957                         bnxt_hwrm_ring_free(bp, ring,
1958                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1959                         ring->fw_ring_id = INVALID_HW_RING_ID;
1960                         memset(txr->tx_desc_ring, 0,
1961                                         txr->tx_ring_struct->ring_size *
1962                                         sizeof(*txr->tx_desc_ring));
1963                         memset(txr->tx_buf_ring, 0,
1964                                         txr->tx_ring_struct->ring_size *
1965                                         sizeof(*txr->tx_buf_ring));
1966                         txr->tx_prod = 0;
1967                         txr->tx_cons = 0;
1968                 }
1969                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1970                         bnxt_free_cp_ring(bp, cpr);
1971                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1972                 }
1973         }
1974
1975         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1976                 bnxt_free_hwrm_rx_ring(bp, i);
1977
1978         return 0;
1979 }
1980
1981 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1982 {
1983         uint16_t i;
1984         uint32_t rc = 0;
1985
1986         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1987                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1988                 if (rc)
1989                         return rc;
1990         }
1991         return rc;
1992 }
1993
1994 void bnxt_free_hwrm_resources(struct bnxt *bp)
1995 {
1996         /* Release memzone */
1997         rte_free(bp->hwrm_cmd_resp_addr);
1998         rte_free(bp->hwrm_short_cmd_req_addr);
1999         bp->hwrm_cmd_resp_addr = NULL;
2000         bp->hwrm_short_cmd_req_addr = NULL;
2001         bp->hwrm_cmd_resp_dma_addr = 0;
2002         bp->hwrm_short_cmd_req_dma_addr = 0;
2003 }
2004
2005 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2006 {
2007         struct rte_pci_device *pdev = bp->pdev;
2008         char type[RTE_MEMZONE_NAMESIZE];
2009
2010         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2011                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2012         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2013         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2014         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2015         if (bp->hwrm_cmd_resp_addr == NULL)
2016                 return -ENOMEM;
2017         bp->hwrm_cmd_resp_dma_addr =
2018                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2019         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2020                 PMD_DRV_LOG(ERR,
2021                         "unable to map response address to physical memory\n");
2022                 return -ENOMEM;
2023         }
2024         rte_spinlock_init(&bp->hwrm_lock);
2025
2026         return 0;
2027 }
2028
2029 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2030 {
2031         struct bnxt_filter_info *filter;
2032         int rc = 0;
2033
2034         STAILQ_FOREACH(filter, &vnic->filter, next) {
2035                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2036                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2037                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2038                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2039                 else
2040                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2041                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2042                 //if (rc)
2043                         //break;
2044         }
2045         return rc;
2046 }
2047
2048 static int
2049 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2050 {
2051         struct bnxt_filter_info *filter;
2052         struct rte_flow *flow;
2053         int rc = 0;
2054
2055         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2056                 filter = flow->filter;
2057                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2058                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2059                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2060                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2061                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2062                 else
2063                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2064
2065                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2066                 rte_free(flow);
2067                 //if (rc)
2068                         //break;
2069         }
2070         return rc;
2071 }
2072
2073 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2074 {
2075         struct bnxt_filter_info *filter;
2076         int rc = 0;
2077
2078         STAILQ_FOREACH(filter, &vnic->filter, next) {
2079                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2080                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2081                                                      filter);
2082                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2083                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2084                                                          filter);
2085                 else
2086                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2087                                                      filter);
2088                 if (rc)
2089                         break;
2090         }
2091         return rc;
2092 }
2093
2094 void bnxt_free_tunnel_ports(struct bnxt *bp)
2095 {
2096         if (bp->vxlan_port_cnt)
2097                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2098                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2099         bp->vxlan_port = 0;
2100         if (bp->geneve_port_cnt)
2101                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2102                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2103         bp->geneve_port = 0;
2104 }
2105
2106 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2107 {
2108         int i;
2109
2110         if (bp->vnic_info == NULL)
2111                 return;
2112
2113         /*
2114          * Cleanup VNICs in reverse order, to make sure the L2 filter
2115          * from vnic0 is last to be cleaned up.
2116          */
2117         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2118                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2119
2120                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2121
2122                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2123
2124                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2125
2126                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2127
2128                 bnxt_hwrm_vnic_free(bp, vnic);
2129
2130                 rte_free(vnic->fw_grp_ids);
2131         }
2132         /* Ring resources */
2133         bnxt_free_all_hwrm_rings(bp);
2134         bnxt_free_all_hwrm_ring_grps(bp);
2135         bnxt_free_all_hwrm_stat_ctxs(bp);
2136         bnxt_free_tunnel_ports(bp);
2137 }
2138
2139 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2140 {
2141         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2142
2143         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2144                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2145
2146         switch (conf_link_speed) {
2147         case ETH_LINK_SPEED_10M_HD:
2148         case ETH_LINK_SPEED_100M_HD:
2149                 /* FALLTHROUGH */
2150                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2151         }
2152         return hw_link_duplex;
2153 }
2154
2155 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2156 {
2157         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2158 }
2159
2160 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2161 {
2162         uint16_t eth_link_speed = 0;
2163
2164         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2165                 return ETH_LINK_SPEED_AUTONEG;
2166
2167         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2168         case ETH_LINK_SPEED_100M:
2169         case ETH_LINK_SPEED_100M_HD:
2170                 /* FALLTHROUGH */
2171                 eth_link_speed =
2172                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2173                 break;
2174         case ETH_LINK_SPEED_1G:
2175                 eth_link_speed =
2176                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2177                 break;
2178         case ETH_LINK_SPEED_2_5G:
2179                 eth_link_speed =
2180                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2181                 break;
2182         case ETH_LINK_SPEED_10G:
2183                 eth_link_speed =
2184                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2185                 break;
2186         case ETH_LINK_SPEED_20G:
2187                 eth_link_speed =
2188                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2189                 break;
2190         case ETH_LINK_SPEED_25G:
2191                 eth_link_speed =
2192                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2193                 break;
2194         case ETH_LINK_SPEED_40G:
2195                 eth_link_speed =
2196                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2197                 break;
2198         case ETH_LINK_SPEED_50G:
2199                 eth_link_speed =
2200                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2201                 break;
2202         case ETH_LINK_SPEED_100G:
2203                 eth_link_speed =
2204                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2205                 break;
2206         default:
2207                 PMD_DRV_LOG(ERR,
2208                         "Unsupported link speed %d; default to AUTO\n",
2209                         conf_link_speed);
2210                 break;
2211         }
2212         return eth_link_speed;
2213 }
2214
2215 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2216                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2217                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2218                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2219
2220 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2221 {
2222         uint32_t one_speed;
2223
2224         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2225                 return 0;
2226
2227         if (link_speed & ETH_LINK_SPEED_FIXED) {
2228                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2229
2230                 if (one_speed & (one_speed - 1)) {
2231                         PMD_DRV_LOG(ERR,
2232                                 "Invalid advertised speeds (%u) for port %u\n",
2233                                 link_speed, port_id);
2234                         return -EINVAL;
2235                 }
2236                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2237                         PMD_DRV_LOG(ERR,
2238                                 "Unsupported advertised speed (%u) for port %u\n",
2239                                 link_speed, port_id);
2240                         return -EINVAL;
2241                 }
2242         } else {
2243                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2244                         PMD_DRV_LOG(ERR,
2245                                 "Unsupported advertised speeds (%u) for port %u\n",
2246                                 link_speed, port_id);
2247                         return -EINVAL;
2248                 }
2249         }
2250         return 0;
2251 }
2252
2253 static uint16_t
2254 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2255 {
2256         uint16_t ret = 0;
2257
2258         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2259                 if (bp->link_info.support_speeds)
2260                         return bp->link_info.support_speeds;
2261                 link_speed = BNXT_SUPPORTED_SPEEDS;
2262         }
2263
2264         if (link_speed & ETH_LINK_SPEED_100M)
2265                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2266         if (link_speed & ETH_LINK_SPEED_100M_HD)
2267                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2268         if (link_speed & ETH_LINK_SPEED_1G)
2269                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2270         if (link_speed & ETH_LINK_SPEED_2_5G)
2271                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2272         if (link_speed & ETH_LINK_SPEED_10G)
2273                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2274         if (link_speed & ETH_LINK_SPEED_20G)
2275                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2276         if (link_speed & ETH_LINK_SPEED_25G)
2277                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2278         if (link_speed & ETH_LINK_SPEED_40G)
2279                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2280         if (link_speed & ETH_LINK_SPEED_50G)
2281                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2282         if (link_speed & ETH_LINK_SPEED_100G)
2283                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2284         return ret;
2285 }
2286
2287 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2288 {
2289         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2290
2291         switch (hw_link_speed) {
2292         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2293                 eth_link_speed = ETH_SPEED_NUM_100M;
2294                 break;
2295         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2296                 eth_link_speed = ETH_SPEED_NUM_1G;
2297                 break;
2298         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2299                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2300                 break;
2301         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2302                 eth_link_speed = ETH_SPEED_NUM_10G;
2303                 break;
2304         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2305                 eth_link_speed = ETH_SPEED_NUM_20G;
2306                 break;
2307         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2308                 eth_link_speed = ETH_SPEED_NUM_25G;
2309                 break;
2310         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2311                 eth_link_speed = ETH_SPEED_NUM_40G;
2312                 break;
2313         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2314                 eth_link_speed = ETH_SPEED_NUM_50G;
2315                 break;
2316         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2317                 eth_link_speed = ETH_SPEED_NUM_100G;
2318                 break;
2319         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2320         default:
2321                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2322                         hw_link_speed);
2323                 break;
2324         }
2325         return eth_link_speed;
2326 }
2327
2328 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2329 {
2330         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2331
2332         switch (hw_link_duplex) {
2333         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2334         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2335                 /* FALLTHROUGH */
2336                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2337                 break;
2338         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2339                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2340                 break;
2341         default:
2342                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2343                         hw_link_duplex);
2344                 break;
2345         }
2346         return eth_link_duplex;
2347 }
2348
2349 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2350 {
2351         int rc = 0;
2352         struct bnxt_link_info *link_info = &bp->link_info;
2353
2354         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2355         if (rc) {
2356                 PMD_DRV_LOG(ERR,
2357                         "Get link config failed with rc %d\n", rc);
2358                 goto exit;
2359         }
2360         if (link_info->link_speed)
2361                 link->link_speed =
2362                         bnxt_parse_hw_link_speed(link_info->link_speed);
2363         else
2364                 link->link_speed = ETH_SPEED_NUM_NONE;
2365         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2366         link->link_status = link_info->link_up;
2367         link->link_autoneg = link_info->auto_mode ==
2368                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2369                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2370 exit:
2371         return rc;
2372 }
2373
2374 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2375 {
2376         int rc = 0;
2377         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2378         struct bnxt_link_info link_req;
2379         uint16_t speed, autoneg;
2380
2381         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2382                 return 0;
2383
2384         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2385                         bp->eth_dev->data->port_id);
2386         if (rc)
2387                 goto error;
2388
2389         memset(&link_req, 0, sizeof(link_req));
2390         link_req.link_up = link_up;
2391         if (!link_up)
2392                 goto port_phy_cfg;
2393
2394         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2395         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2396         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2397         /* Autoneg can be done only when the FW allows */
2398         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2399                                 bp->link_info.force_link_speed)) {
2400                 link_req.phy_flags |=
2401                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2402                 link_req.auto_link_speed_mask =
2403                         bnxt_parse_eth_link_speed_mask(bp,
2404                                                        dev_conf->link_speeds);
2405         } else {
2406                 if (bp->link_info.phy_type ==
2407                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2408                     bp->link_info.phy_type ==
2409                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2410                     bp->link_info.media_type ==
2411                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2412                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2413                         return -EINVAL;
2414                 }
2415
2416                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2417                 /* If user wants a particular speed try that first. */
2418                 if (speed)
2419                         link_req.link_speed = speed;
2420                 else if (bp->link_info.force_link_speed)
2421                         link_req.link_speed = bp->link_info.force_link_speed;
2422                 else
2423                         link_req.link_speed = bp->link_info.auto_link_speed;
2424         }
2425         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2426         link_req.auto_pause = bp->link_info.auto_pause;
2427         link_req.force_pause = bp->link_info.force_pause;
2428
2429 port_phy_cfg:
2430         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2431         if (rc) {
2432                 PMD_DRV_LOG(ERR,
2433                         "Set link config failed with rc %d\n", rc);
2434         }
2435
2436 error:
2437         return rc;
2438 }
2439
2440 /* JIRA 22088 */
2441 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2442 {
2443         struct hwrm_func_qcfg_input req = {0};
2444         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2445         uint16_t flags;
2446         int rc = 0;
2447
2448         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2449         req.fid = rte_cpu_to_le_16(0xffff);
2450
2451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2452
2453         HWRM_CHECK_RESULT();
2454
2455         /* Hard Coded.. 0xfff VLAN ID mask */
2456         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2457         flags = rte_le_to_cpu_16(resp->flags);
2458         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2459                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2460
2461         switch (resp->port_partition_type) {
2462         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2463         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2464         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2465                 /* FALLTHROUGH */
2466                 bp->port_partition_type = resp->port_partition_type;
2467                 break;
2468         default:
2469                 bp->port_partition_type = 0;
2470                 break;
2471         }
2472
2473         HWRM_UNLOCK();
2474
2475         return rc;
2476 }
2477
2478 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2479                                    struct hwrm_func_qcaps_output *qcaps)
2480 {
2481         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2482         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2483                sizeof(qcaps->mac_address));
2484         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2485         qcaps->max_rx_rings = fcfg->num_rx_rings;
2486         qcaps->max_tx_rings = fcfg->num_tx_rings;
2487         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2488         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2489         qcaps->max_vfs = 0;
2490         qcaps->first_vf_id = 0;
2491         qcaps->max_vnics = fcfg->num_vnics;
2492         qcaps->max_decap_records = 0;
2493         qcaps->max_encap_records = 0;
2494         qcaps->max_tx_wm_flows = 0;
2495         qcaps->max_tx_em_flows = 0;
2496         qcaps->max_rx_wm_flows = 0;
2497         qcaps->max_rx_em_flows = 0;
2498         qcaps->max_flow_id = 0;
2499         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2500         qcaps->max_sp_tx_rings = 0;
2501         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2502 }
2503
2504 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2505 {
2506         struct hwrm_func_cfg_input req = {0};
2507         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2508         int rc;
2509
2510         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2511                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2512                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2513                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2514                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2515                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2516                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2517                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2518                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2519                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2520         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2521         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2522         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2523                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2524                                    BNXT_NUM_VLANS);
2525         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2526         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2527         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2528         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2529         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2530         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2531         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2532         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2533         req.fid = rte_cpu_to_le_16(0xffff);
2534
2535         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2536
2537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2538
2539         HWRM_CHECK_RESULT();
2540         HWRM_UNLOCK();
2541
2542         return rc;
2543 }
2544
2545 static void populate_vf_func_cfg_req(struct bnxt *bp,
2546                                      struct hwrm_func_cfg_input *req,
2547                                      int num_vfs)
2548 {
2549         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2550                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2551                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2552                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2553                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2554                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2555                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2556                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2557                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2558                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2559
2560         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2561                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2562                                     BNXT_NUM_VLANS);
2563         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2564                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2565                                     BNXT_NUM_VLANS);
2566         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2567                                                 (num_vfs + 1));
2568         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2569         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2570                                                (num_vfs + 1));
2571         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2572         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2573         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2574         /* TODO: For now, do not support VMDq/RFS on VFs. */
2575         req->num_vnics = rte_cpu_to_le_16(1);
2576         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2577                                                  (num_vfs + 1));
2578 }
2579
2580 static void add_random_mac_if_needed(struct bnxt *bp,
2581                                      struct hwrm_func_cfg_input *cfg_req,
2582                                      int vf)
2583 {
2584         struct ether_addr mac;
2585
2586         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2587                 return;
2588
2589         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2590                 cfg_req->enables |=
2591                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2592                 eth_random_addr(cfg_req->dflt_mac_addr);
2593                 bp->pf.vf_info[vf].random_mac = true;
2594         } else {
2595                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2596         }
2597 }
2598
2599 static void reserve_resources_from_vf(struct bnxt *bp,
2600                                       struct hwrm_func_cfg_input *cfg_req,
2601                                       int vf)
2602 {
2603         struct hwrm_func_qcaps_input req = {0};
2604         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2605         int rc;
2606
2607         /* Get the actual allocated values now */
2608         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2609         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2610         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2611
2612         if (rc) {
2613                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2614                 copy_func_cfg_to_qcaps(cfg_req, resp);
2615         } else if (resp->error_code) {
2616                 rc = rte_le_to_cpu_16(resp->error_code);
2617                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2618                 copy_func_cfg_to_qcaps(cfg_req, resp);
2619         }
2620
2621         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2622         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2623         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2624         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2625         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2626         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2627         /*
2628          * TODO: While not supporting VMDq with VFs, max_vnics is always
2629          * forced to 1 in this case
2630          */
2631         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2632         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2633
2634         HWRM_UNLOCK();
2635 }
2636
2637 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2638 {
2639         struct hwrm_func_qcfg_input req = {0};
2640         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2641         int rc;
2642
2643         /* Check for zero MAC address */
2644         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2645         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2646         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2647         if (rc) {
2648                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2649                 return -1;
2650         } else if (resp->error_code) {
2651                 rc = rte_le_to_cpu_16(resp->error_code);
2652                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2653                 return -1;
2654         }
2655         rc = rte_le_to_cpu_16(resp->vlan);
2656
2657         HWRM_UNLOCK();
2658
2659         return rc;
2660 }
2661
2662 static int update_pf_resource_max(struct bnxt *bp)
2663 {
2664         struct hwrm_func_qcfg_input req = {0};
2665         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2666         int rc;
2667
2668         /* And copy the allocated numbers into the pf struct */
2669         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2670         req.fid = rte_cpu_to_le_16(0xffff);
2671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2672         HWRM_CHECK_RESULT();
2673
2674         /* Only TX ring value reflects actual allocation? TODO */
2675         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2676         bp->pf.evb_mode = resp->evb_mode;
2677
2678         HWRM_UNLOCK();
2679
2680         return rc;
2681 }
2682
2683 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2684 {
2685         int rc;
2686
2687         if (!BNXT_PF(bp)) {
2688                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2689                 return -1;
2690         }
2691
2692         rc = bnxt_hwrm_func_qcaps(bp);
2693         if (rc)
2694                 return rc;
2695
2696         bp->pf.func_cfg_flags &=
2697                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2698                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2699         bp->pf.func_cfg_flags |=
2700                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2701         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2702         return rc;
2703 }
2704
2705 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2706 {
2707         struct hwrm_func_cfg_input req = {0};
2708         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2709         int i;
2710         size_t sz;
2711         int rc = 0;
2712         size_t req_buf_sz;
2713
2714         if (!BNXT_PF(bp)) {
2715                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2716                 return -1;
2717         }
2718
2719         rc = bnxt_hwrm_func_qcaps(bp);
2720
2721         if (rc)
2722                 return rc;
2723
2724         bp->pf.active_vfs = num_vfs;
2725
2726         /*
2727          * First, configure the PF to only use one TX ring.  This ensures that
2728          * there are enough rings for all VFs.
2729          *
2730          * If we don't do this, when we call func_alloc() later, we will lock
2731          * extra rings to the PF that won't be available during func_cfg() of
2732          * the VFs.
2733          *
2734          * This has been fixed with firmware versions above 20.6.54
2735          */
2736         bp->pf.func_cfg_flags &=
2737                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2738                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2739         bp->pf.func_cfg_flags |=
2740                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2741         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2742         if (rc)
2743                 return rc;
2744
2745         /*
2746          * Now, create and register a buffer to hold forwarded VF requests
2747          */
2748         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2749         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2750                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2751         if (bp->pf.vf_req_buf == NULL) {
2752                 rc = -ENOMEM;
2753                 goto error_free;
2754         }
2755         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2756                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2757         for (i = 0; i < num_vfs; i++)
2758                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2759                                         (i * HWRM_MAX_REQ_LEN);
2760
2761         rc = bnxt_hwrm_func_buf_rgtr(bp);
2762         if (rc)
2763                 goto error_free;
2764
2765         populate_vf_func_cfg_req(bp, &req, num_vfs);
2766
2767         bp->pf.active_vfs = 0;
2768         for (i = 0; i < num_vfs; i++) {
2769                 add_random_mac_if_needed(bp, &req, i);
2770
2771                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2772                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2773                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2774                 rc = bnxt_hwrm_send_message(bp,
2775                                             &req,
2776                                             sizeof(req),
2777                                             BNXT_USE_CHIMP_MB);
2778
2779                 /* Clear enable flag for next pass */
2780                 req.enables &= ~rte_cpu_to_le_32(
2781                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2782
2783                 if (rc || resp->error_code) {
2784                         PMD_DRV_LOG(ERR,
2785                                 "Failed to initizlie VF %d\n", i);
2786                         PMD_DRV_LOG(ERR,
2787                                 "Not all VFs available. (%d, %d)\n",
2788                                 rc, resp->error_code);
2789                         HWRM_UNLOCK();
2790                         break;
2791                 }
2792
2793                 HWRM_UNLOCK();
2794
2795                 reserve_resources_from_vf(bp, &req, i);
2796                 bp->pf.active_vfs++;
2797                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2798         }
2799
2800         /*
2801          * Now configure the PF to use "the rest" of the resources
2802          * We're using STD_TX_RING_MODE here though which will limit the TX
2803          * rings.  This will allow QoS to function properly.  Not setting this
2804          * will cause PF rings to break bandwidth settings.
2805          */
2806         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2807         if (rc)
2808                 goto error_free;
2809
2810         rc = update_pf_resource_max(bp);
2811         if (rc)
2812                 goto error_free;
2813
2814         return rc;
2815
2816 error_free:
2817         bnxt_hwrm_func_buf_unrgtr(bp);
2818         return rc;
2819 }
2820
2821 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2822 {
2823         struct hwrm_func_cfg_input req = {0};
2824         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2825         int rc;
2826
2827         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2828
2829         req.fid = rte_cpu_to_le_16(0xffff);
2830         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2831         req.evb_mode = bp->pf.evb_mode;
2832
2833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2834         HWRM_CHECK_RESULT();
2835         HWRM_UNLOCK();
2836
2837         return rc;
2838 }
2839
2840 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2841                                 uint8_t tunnel_type)
2842 {
2843         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2844         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2845         int rc = 0;
2846
2847         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2848         req.tunnel_type = tunnel_type;
2849         req.tunnel_dst_port_val = port;
2850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2851         HWRM_CHECK_RESULT();
2852
2853         switch (tunnel_type) {
2854         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2855                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2856                 bp->vxlan_port = port;
2857                 break;
2858         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2859                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2860                 bp->geneve_port = port;
2861                 break;
2862         default:
2863                 break;
2864         }
2865
2866         HWRM_UNLOCK();
2867
2868         return rc;
2869 }
2870
2871 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2872                                 uint8_t tunnel_type)
2873 {
2874         struct hwrm_tunnel_dst_port_free_input req = {0};
2875         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2876         int rc = 0;
2877
2878         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2879
2880         req.tunnel_type = tunnel_type;
2881         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2882         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2883
2884         HWRM_CHECK_RESULT();
2885         HWRM_UNLOCK();
2886
2887         return rc;
2888 }
2889
2890 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2891                                         uint32_t flags)
2892 {
2893         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2894         struct hwrm_func_cfg_input req = {0};
2895         int rc;
2896
2897         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2898
2899         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2900         req.flags = rte_cpu_to_le_32(flags);
2901         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2902
2903         HWRM_CHECK_RESULT();
2904         HWRM_UNLOCK();
2905
2906         return rc;
2907 }
2908
2909 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2910 {
2911         uint32_t *flag = flagp;
2912
2913         vnic->flags = *flag;
2914 }
2915
2916 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2917 {
2918         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2919 }
2920
2921 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2922 {
2923         int rc = 0;
2924         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2925         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2926
2927         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2928
2929         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2930         req.req_buf_page_size = rte_cpu_to_le_16(
2931                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2932         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2933         req.req_buf_page_addr0 =
2934                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2935         if (req.req_buf_page_addr0 == 0) {
2936                 PMD_DRV_LOG(ERR,
2937                         "unable to map buffer address to physical memory\n");
2938                 return -ENOMEM;
2939         }
2940
2941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2942
2943         HWRM_CHECK_RESULT();
2944         HWRM_UNLOCK();
2945
2946         return rc;
2947 }
2948
2949 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2950 {
2951         int rc = 0;
2952         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2953         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2954
2955         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2956
2957         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2958
2959         HWRM_CHECK_RESULT();
2960         HWRM_UNLOCK();
2961
2962         return rc;
2963 }
2964
2965 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2966 {
2967         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2968         struct hwrm_func_cfg_input req = {0};
2969         int rc;
2970
2971         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2972
2973         req.fid = rte_cpu_to_le_16(0xffff);
2974         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2975         req.enables = rte_cpu_to_le_32(
2976                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2977         req.async_event_cr = rte_cpu_to_le_16(
2978                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2980
2981         HWRM_CHECK_RESULT();
2982         HWRM_UNLOCK();
2983
2984         return rc;
2985 }
2986
2987 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2988 {
2989         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2990         struct hwrm_func_vf_cfg_input req = {0};
2991         int rc;
2992
2993         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
2994
2995         req.enables = rte_cpu_to_le_32(
2996                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2997         req.async_event_cr = rte_cpu_to_le_16(
2998                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2999         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3000
3001         HWRM_CHECK_RESULT();
3002         HWRM_UNLOCK();
3003
3004         return rc;
3005 }
3006
3007 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3008 {
3009         struct hwrm_func_cfg_input req = {0};
3010         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3011         uint16_t dflt_vlan, fid;
3012         uint32_t func_cfg_flags;
3013         int rc = 0;
3014
3015         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3016
3017         if (is_vf) {
3018                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3019                 fid = bp->pf.vf_info[vf].fid;
3020                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3021         } else {
3022                 fid = rte_cpu_to_le_16(0xffff);
3023                 func_cfg_flags = bp->pf.func_cfg_flags;
3024                 dflt_vlan = bp->vlan;
3025         }
3026
3027         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3028         req.fid = rte_cpu_to_le_16(fid);
3029         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3030         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3031
3032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3033
3034         HWRM_CHECK_RESULT();
3035         HWRM_UNLOCK();
3036
3037         return rc;
3038 }
3039
3040 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3041                         uint16_t max_bw, uint16_t enables)
3042 {
3043         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3044         struct hwrm_func_cfg_input req = {0};
3045         int rc;
3046
3047         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3048
3049         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3050         req.enables |= rte_cpu_to_le_32(enables);
3051         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3052         req.max_bw = rte_cpu_to_le_32(max_bw);
3053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3054
3055         HWRM_CHECK_RESULT();
3056         HWRM_UNLOCK();
3057
3058         return rc;
3059 }
3060
3061 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3062 {
3063         struct hwrm_func_cfg_input req = {0};
3064         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3065         int rc = 0;
3066
3067         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3068
3069         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3070         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3071         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3072         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3073
3074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3075
3076         HWRM_CHECK_RESULT();
3077         HWRM_UNLOCK();
3078
3079         return rc;
3080 }
3081
3082 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3083 {
3084         int rc;
3085
3086         if (BNXT_PF(bp))
3087                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3088         else
3089                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3090
3091         return rc;
3092 }
3093
3094 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3095                               void *encaped, size_t ec_size)
3096 {
3097         int rc = 0;
3098         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3099         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3100
3101         if (ec_size > sizeof(req.encap_request))
3102                 return -1;
3103
3104         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3105
3106         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3107         memcpy(req.encap_request, encaped, ec_size);
3108
3109         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3110
3111         HWRM_CHECK_RESULT();
3112         HWRM_UNLOCK();
3113
3114         return rc;
3115 }
3116
3117 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3118                                        struct ether_addr *mac)
3119 {
3120         struct hwrm_func_qcfg_input req = {0};
3121         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3122         int rc;
3123
3124         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3125
3126         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3127         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3128
3129         HWRM_CHECK_RESULT();
3130
3131         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3132
3133         HWRM_UNLOCK();
3134
3135         return rc;
3136 }
3137
3138 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3139                             void *encaped, size_t ec_size)
3140 {
3141         int rc = 0;
3142         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3143         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3144
3145         if (ec_size > sizeof(req.encap_request))
3146                 return -1;
3147
3148         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3149
3150         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3151         memcpy(req.encap_request, encaped, ec_size);
3152
3153         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3154
3155         HWRM_CHECK_RESULT();
3156         HWRM_UNLOCK();
3157
3158         return rc;
3159 }
3160
3161 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3162                          struct rte_eth_stats *stats, uint8_t rx)
3163 {
3164         int rc = 0;
3165         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3166         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3167
3168         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3169
3170         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3171
3172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3173
3174         HWRM_CHECK_RESULT();
3175
3176         if (rx) {
3177                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3178                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3179                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3180                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3181                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3182                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3183                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3184                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3185         } else {
3186                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3187                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3188                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3189                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3190                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3191                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3192                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3193         }
3194
3195
3196         HWRM_UNLOCK();
3197
3198         return rc;
3199 }
3200
3201 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3202 {
3203         struct hwrm_port_qstats_input req = {0};
3204         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3205         struct bnxt_pf_info *pf = &bp->pf;
3206         int rc;
3207
3208         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3209
3210         req.port_id = rte_cpu_to_le_16(pf->port_id);
3211         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3212         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3213         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3214
3215         HWRM_CHECK_RESULT();
3216         HWRM_UNLOCK();
3217
3218         return rc;
3219 }
3220
3221 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3222 {
3223         struct hwrm_port_clr_stats_input req = {0};
3224         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3225         struct bnxt_pf_info *pf = &bp->pf;
3226         int rc;
3227
3228         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3229         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3230             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3231                 return 0;
3232
3233         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3234
3235         req.port_id = rte_cpu_to_le_16(pf->port_id);
3236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3237
3238         HWRM_CHECK_RESULT();
3239         HWRM_UNLOCK();
3240
3241         return rc;
3242 }
3243
3244 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3245 {
3246         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3247         struct hwrm_port_led_qcaps_input req = {0};
3248         int rc;
3249
3250         if (BNXT_VF(bp))
3251                 return 0;
3252
3253         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3254         req.port_id = bp->pf.port_id;
3255         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3256
3257         HWRM_CHECK_RESULT();
3258
3259         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3260                 unsigned int i;
3261
3262                 bp->num_leds = resp->num_leds;
3263                 memcpy(bp->leds, &resp->led0_id,
3264                         sizeof(bp->leds[0]) * bp->num_leds);
3265                 for (i = 0; i < bp->num_leds; i++) {
3266                         struct bnxt_led_info *led = &bp->leds[i];
3267
3268                         uint16_t caps = led->led_state_caps;
3269
3270                         if (!led->led_group_id ||
3271                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3272                                 bp->num_leds = 0;
3273                                 break;
3274                         }
3275                 }
3276         }
3277
3278         HWRM_UNLOCK();
3279
3280         return rc;
3281 }
3282
3283 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3284 {
3285         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3286         struct hwrm_port_led_cfg_input req = {0};
3287         struct bnxt_led_cfg *led_cfg;
3288         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3289         uint16_t duration = 0;
3290         int rc, i;
3291
3292         if (!bp->num_leds || BNXT_VF(bp))
3293                 return -EOPNOTSUPP;
3294
3295         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3296
3297         if (led_on) {
3298                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3299                 duration = rte_cpu_to_le_16(500);
3300         }
3301         req.port_id = bp->pf.port_id;
3302         req.num_leds = bp->num_leds;
3303         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3304         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3305                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3306                 led_cfg->led_id = bp->leds[i].led_id;
3307                 led_cfg->led_state = led_state;
3308                 led_cfg->led_blink_on = duration;
3309                 led_cfg->led_blink_off = duration;
3310                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3311         }
3312
3313         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3314
3315         HWRM_CHECK_RESULT();
3316         HWRM_UNLOCK();
3317
3318         return rc;
3319 }
3320
3321 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3322                                uint32_t *length)
3323 {
3324         int rc;
3325         struct hwrm_nvm_get_dir_info_input req = {0};
3326         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3327
3328         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3329
3330         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3331
3332         HWRM_CHECK_RESULT();
3333         HWRM_UNLOCK();
3334
3335         if (!rc) {
3336                 *entries = rte_le_to_cpu_32(resp->entries);
3337                 *length = rte_le_to_cpu_32(resp->entry_length);
3338         }
3339         return rc;
3340 }
3341
3342 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3343 {
3344         int rc;
3345         uint32_t dir_entries;
3346         uint32_t entry_length;
3347         uint8_t *buf;
3348         size_t buflen;
3349         rte_iova_t dma_handle;
3350         struct hwrm_nvm_get_dir_entries_input req = {0};
3351         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3352
3353         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3354         if (rc != 0)
3355                 return rc;
3356
3357         *data++ = dir_entries;
3358         *data++ = entry_length;
3359         len -= 2;
3360         memset(data, 0xff, len);
3361
3362         buflen = dir_entries * entry_length;
3363         buf = rte_malloc("nvm_dir", buflen, 0);
3364         rte_mem_lock_page(buf);
3365         if (buf == NULL)
3366                 return -ENOMEM;
3367         dma_handle = rte_mem_virt2iova(buf);
3368         if (dma_handle == 0) {
3369                 PMD_DRV_LOG(ERR,
3370                         "unable to map response address to physical memory\n");
3371                 return -ENOMEM;
3372         }
3373         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3374         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3375         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3376
3377         if (rc == 0)
3378                 memcpy(data, buf, len > buflen ? buflen : len);
3379
3380         rte_free(buf);
3381         HWRM_CHECK_RESULT();
3382         HWRM_UNLOCK();
3383
3384         return rc;
3385 }
3386
3387 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3388                              uint32_t offset, uint32_t length,
3389                              uint8_t *data)
3390 {
3391         int rc;
3392         uint8_t *buf;
3393         rte_iova_t dma_handle;
3394         struct hwrm_nvm_read_input req = {0};
3395         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3396
3397         buf = rte_malloc("nvm_item", length, 0);
3398         rte_mem_lock_page(buf);
3399         if (!buf)
3400                 return -ENOMEM;
3401
3402         dma_handle = rte_mem_virt2iova(buf);
3403         if (dma_handle == 0) {
3404                 PMD_DRV_LOG(ERR,
3405                         "unable to map response address to physical memory\n");
3406                 return -ENOMEM;
3407         }
3408         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3409         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3410         req.dir_idx = rte_cpu_to_le_16(index);
3411         req.offset = rte_cpu_to_le_32(offset);
3412         req.len = rte_cpu_to_le_32(length);
3413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3414         if (rc == 0)
3415                 memcpy(data, buf, length);
3416
3417         rte_free(buf);
3418         HWRM_CHECK_RESULT();
3419         HWRM_UNLOCK();
3420
3421         return rc;
3422 }
3423
3424 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3425 {
3426         int rc;
3427         struct hwrm_nvm_erase_dir_entry_input req = {0};
3428         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3429
3430         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3431         req.dir_idx = rte_cpu_to_le_16(index);
3432         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3433         HWRM_CHECK_RESULT();
3434         HWRM_UNLOCK();
3435
3436         return rc;
3437 }
3438
3439
3440 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3441                           uint16_t dir_ordinal, uint16_t dir_ext,
3442                           uint16_t dir_attr, const uint8_t *data,
3443                           size_t data_len)
3444 {
3445         int rc;
3446         struct hwrm_nvm_write_input req = {0};
3447         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3448         rte_iova_t dma_handle;
3449         uint8_t *buf;
3450
3451         buf = rte_malloc("nvm_write", data_len, 0);
3452         rte_mem_lock_page(buf);
3453         if (!buf)
3454                 return -ENOMEM;
3455
3456         dma_handle = rte_mem_virt2iova(buf);
3457         if (dma_handle == 0) {
3458                 PMD_DRV_LOG(ERR,
3459                         "unable to map response address to physical memory\n");
3460                 return -ENOMEM;
3461         }
3462         memcpy(buf, data, data_len);
3463
3464         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3465
3466         req.dir_type = rte_cpu_to_le_16(dir_type);
3467         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3468         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3469         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3470         req.dir_data_length = rte_cpu_to_le_32(data_len);
3471         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3472
3473         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3474
3475         rte_free(buf);
3476         HWRM_CHECK_RESULT();
3477         HWRM_UNLOCK();
3478
3479         return rc;
3480 }
3481
3482 static void
3483 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3484 {
3485         uint32_t *count = cbdata;
3486
3487         *count = *count + 1;
3488 }
3489
3490 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3491                                      struct bnxt_vnic_info *vnic __rte_unused)
3492 {
3493         return 0;
3494 }
3495
3496 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3497 {
3498         uint32_t count = 0;
3499
3500         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3501             &count, bnxt_vnic_count_hwrm_stub);
3502
3503         return count;
3504 }
3505
3506 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3507                                         uint16_t *vnic_ids)
3508 {
3509         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3510         struct hwrm_func_vf_vnic_ids_query_output *resp =
3511                                                 bp->hwrm_cmd_resp_addr;
3512         int rc;
3513
3514         /* First query all VNIC ids */
3515         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3516
3517         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3518         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3519         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3520
3521         if (req.vnic_id_tbl_addr == 0) {
3522                 HWRM_UNLOCK();
3523                 PMD_DRV_LOG(ERR,
3524                 "unable to map VNIC ID table address to physical memory\n");
3525                 return -ENOMEM;
3526         }
3527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3528         if (rc) {
3529                 HWRM_UNLOCK();
3530                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3531                 return -1;
3532         } else if (resp->error_code) {
3533                 rc = rte_le_to_cpu_16(resp->error_code);
3534                 HWRM_UNLOCK();
3535                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3536                 return -1;
3537         }
3538         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3539
3540         HWRM_UNLOCK();
3541
3542         return rc;
3543 }
3544
3545 /*
3546  * This function queries the VNIC IDs  for a specified VF. It then calls
3547  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3548  * Then it calls the hwrm_cb function to program this new vnic configuration.
3549  */
3550 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3551         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3552         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3553 {
3554         struct bnxt_vnic_info vnic;
3555         int rc = 0;
3556         int i, num_vnic_ids;
3557         uint16_t *vnic_ids;
3558         size_t vnic_id_sz;
3559         size_t sz;
3560
3561         /* First query all VNIC ids */
3562         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3563         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3564                         RTE_CACHE_LINE_SIZE);
3565         if (vnic_ids == NULL) {
3566                 rc = -ENOMEM;
3567                 return rc;
3568         }
3569         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3570                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3571
3572         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3573
3574         if (num_vnic_ids < 0)
3575                 return num_vnic_ids;
3576
3577         /* Retrieve VNIC, update bd_stall then update */
3578
3579         for (i = 0; i < num_vnic_ids; i++) {
3580                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3581                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3582                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3583                 if (rc)
3584                         break;
3585                 if (vnic.mru <= 4)      /* Indicates unallocated */
3586                         continue;
3587
3588                 vnic_cb(&vnic, cbdata);
3589
3590                 rc = hwrm_cb(bp, &vnic);
3591                 if (rc)
3592                         break;
3593         }
3594
3595         rte_free(vnic_ids);
3596
3597         return rc;
3598 }
3599
3600 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3601                                               bool on)
3602 {
3603         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3604         struct hwrm_func_cfg_input req = {0};
3605         int rc;
3606
3607         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3608
3609         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3610         req.enables |= rte_cpu_to_le_32(
3611                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3612         req.vlan_antispoof_mode = on ?
3613                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3614                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3615         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3616
3617         HWRM_CHECK_RESULT();
3618         HWRM_UNLOCK();
3619
3620         return rc;
3621 }
3622
3623 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3624 {
3625         struct bnxt_vnic_info vnic;
3626         uint16_t *vnic_ids;
3627         size_t vnic_id_sz;
3628         int num_vnic_ids, i;
3629         size_t sz;
3630         int rc;
3631
3632         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3633         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3634                         RTE_CACHE_LINE_SIZE);
3635         if (vnic_ids == NULL) {
3636                 rc = -ENOMEM;
3637                 return rc;
3638         }
3639
3640         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3641                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3642
3643         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3644         if (rc <= 0)
3645                 goto exit;
3646         num_vnic_ids = rc;
3647
3648         /*
3649          * Loop through to find the default VNIC ID.
3650          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3651          * by sending the hwrm_func_qcfg command to the firmware.
3652          */
3653         for (i = 0; i < num_vnic_ids; i++) {
3654                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3655                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3656                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3657                                         bp->pf.first_vf_id + vf);
3658                 if (rc)
3659                         goto exit;
3660                 if (vnic.func_default) {
3661                         rte_free(vnic_ids);
3662                         return vnic.fw_vnic_id;
3663                 }
3664         }
3665         /* Could not find a default VNIC. */
3666         PMD_DRV_LOG(ERR, "No default VNIC\n");
3667 exit:
3668         rte_free(vnic_ids);
3669         return -1;
3670 }
3671
3672 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3673                          uint16_t dst_id,
3674                          struct bnxt_filter_info *filter)
3675 {
3676         int rc = 0;
3677         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3678         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3679         uint32_t enables = 0;
3680
3681         if (filter->fw_em_filter_id != UINT64_MAX)
3682                 bnxt_hwrm_clear_em_filter(bp, filter);
3683
3684         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3685
3686         req.flags = rte_cpu_to_le_32(filter->flags);
3687
3688         enables = filter->enables |
3689               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3690         req.dst_id = rte_cpu_to_le_16(dst_id);
3691
3692         if (filter->ip_addr_type) {
3693                 req.ip_addr_type = filter->ip_addr_type;
3694                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3695         }
3696         if (enables &
3697             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3698                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3699         if (enables &
3700             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3701                 memcpy(req.src_macaddr, filter->src_macaddr,
3702                        ETHER_ADDR_LEN);
3703         if (enables &
3704             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3705                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3706                        ETHER_ADDR_LEN);
3707         if (enables &
3708             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3709                 req.ovlan_vid = filter->l2_ovlan;
3710         if (enables &
3711             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3712                 req.ivlan_vid = filter->l2_ivlan;
3713         if (enables &
3714             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3715                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3716         if (enables &
3717             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3718                 req.ip_protocol = filter->ip_protocol;
3719         if (enables &
3720             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3721                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3722         if (enables &
3723             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3724                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3725         if (enables &
3726             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3727                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3728         if (enables &
3729             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3730                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3731         if (enables &
3732             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3733                 req.mirror_vnic_id = filter->mirror_vnic_id;
3734
3735         req.enables = rte_cpu_to_le_32(enables);
3736
3737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3738
3739         HWRM_CHECK_RESULT();
3740
3741         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3742         HWRM_UNLOCK();
3743
3744         return rc;
3745 }
3746
3747 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3748 {
3749         int rc = 0;
3750         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3751         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3752
3753         if (filter->fw_em_filter_id == UINT64_MAX)
3754                 return 0;
3755
3756         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3757         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3758
3759         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3760
3761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3762
3763         HWRM_CHECK_RESULT();
3764         HWRM_UNLOCK();
3765
3766         filter->fw_em_filter_id = UINT64_MAX;
3767         filter->fw_l2_filter_id = UINT64_MAX;
3768
3769         return 0;
3770 }
3771
3772 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3773                          uint16_t dst_id,
3774                          struct bnxt_filter_info *filter)
3775 {
3776         int rc = 0;
3777         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3778         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3779                                                 bp->hwrm_cmd_resp_addr;
3780         uint32_t enables = 0;
3781
3782         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3783                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3784
3785         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3786
3787         req.flags = rte_cpu_to_le_32(filter->flags);
3788
3789         enables = filter->enables |
3790               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3791         req.dst_id = rte_cpu_to_le_16(dst_id);
3792
3793
3794         if (filter->ip_addr_type) {
3795                 req.ip_addr_type = filter->ip_addr_type;
3796                 enables |=
3797                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3798         }
3799         if (enables &
3800             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3801                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3802         if (enables &
3803             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3804                 memcpy(req.src_macaddr, filter->src_macaddr,
3805                        ETHER_ADDR_LEN);
3806         //if (enables &
3807             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3808                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3809                        //ETHER_ADDR_LEN);
3810         if (enables &
3811             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3812                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3813         if (enables &
3814             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3815                 req.ip_protocol = filter->ip_protocol;
3816         if (enables &
3817             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3818                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3819         if (enables &
3820             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3821                 req.src_ipaddr_mask[0] =
3822                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3823         if (enables &
3824             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3825                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3826         if (enables &
3827             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3828                 req.dst_ipaddr_mask[0] =
3829                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3830         if (enables &
3831             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3832                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3833         if (enables &
3834             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3835                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3836         if (enables &
3837             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3838                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3839         if (enables &
3840             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3841                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3842         if (enables &
3843             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3844                 req.mirror_vnic_id = filter->mirror_vnic_id;
3845
3846         req.enables = rte_cpu_to_le_32(enables);
3847
3848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3849
3850         HWRM_CHECK_RESULT();
3851
3852         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3853         HWRM_UNLOCK();
3854
3855         return rc;
3856 }
3857
3858 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3859                                 struct bnxt_filter_info *filter)
3860 {
3861         int rc = 0;
3862         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3863         struct hwrm_cfa_ntuple_filter_free_output *resp =
3864                                                 bp->hwrm_cmd_resp_addr;
3865
3866         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3867                 return 0;
3868
3869         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3870
3871         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3872
3873         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3874
3875         HWRM_CHECK_RESULT();
3876         HWRM_UNLOCK();
3877
3878         filter->fw_ntuple_filter_id = UINT64_MAX;
3879
3880         return 0;
3881 }
3882
3883 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3884 {
3885         unsigned int rss_idx, fw_idx, i;
3886
3887         if (vnic->rss_table && vnic->hash_type) {
3888                 /*
3889                  * Fill the RSS hash & redirection table with
3890                  * ring group ids for all VNICs
3891                  */
3892                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3893                         rss_idx++, fw_idx++) {
3894                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3895                                 fw_idx %= bp->rx_cp_nr_rings;
3896                                 if (vnic->fw_grp_ids[fw_idx] !=
3897                                     INVALID_HW_RING_ID)
3898                                         break;
3899                                 fw_idx++;
3900                         }
3901                         if (i == bp->rx_cp_nr_rings)
3902                                 return 0;
3903                         vnic->rss_table[rss_idx] =
3904                                 vnic->fw_grp_ids[fw_idx];
3905                 }
3906                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3907         }
3908         return 0;
3909 }
3910
3911 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3912         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3913 {
3914         uint16_t flags;
3915
3916         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3917
3918         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3919         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3920
3921         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3922         req->num_cmpl_dma_aggr_during_int =
3923                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3924
3925         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3926
3927         /* min timer set to 1/2 of interrupt timer */
3928         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3929
3930         /* buf timer set to 1/4 of interrupt timer */
3931         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3932
3933         req->cmpl_aggr_dma_tmr_during_int =
3934                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3935
3936         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3937                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3938         req->flags = rte_cpu_to_le_16(flags);
3939 }
3940
3941 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3942                         struct bnxt_coal *coal, uint16_t ring_id)
3943 {
3944         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3945         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3946                                                 bp->hwrm_cmd_resp_addr;
3947         int rc;
3948
3949         /* Set ring coalesce parameters only for Stratus 100G NIC */
3950         if (!bnxt_stratus_device(bp))
3951                 return 0;
3952
3953         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3954         bnxt_hwrm_set_coal_params(coal, &req);
3955         req.ring_id = rte_cpu_to_le_16(ring_id);
3956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3957         HWRM_CHECK_RESULT();
3958         HWRM_UNLOCK();
3959         return 0;
3960 }
3961
3962 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3963 {
3964         struct hwrm_port_qstats_ext_input req = {0};
3965         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3966         struct bnxt_pf_info *pf = &bp->pf;
3967         int rc;
3968
3969         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3970               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3971                 return 0;
3972
3973         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3974
3975         req.port_id = rte_cpu_to_le_16(pf->port_id);
3976         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3977                 req.tx_stat_host_addr =
3978                         rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3979                 req.tx_stat_size =
3980                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3981         }
3982         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3983                 req.rx_stat_host_addr =
3984                         rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3985                 req.rx_stat_size =
3986                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3987         }
3988         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3989
3990         if (rc) {
3991                 bp->fw_rx_port_stats_ext_size = 0;
3992                 bp->fw_tx_port_stats_ext_size = 0;
3993         } else {
3994                 bp->fw_rx_port_stats_ext_size =
3995                         rte_le_to_cpu_16(resp->rx_stat_size);
3996                 bp->fw_tx_port_stats_ext_size =
3997                         rte_le_to_cpu_16(resp->tx_stat_size);
3998         }
3999
4000         HWRM_CHECK_RESULT();
4001         HWRM_UNLOCK();
4002
4003         return rc;
4004 }