net/bnxt: fix registration of VF async event completion ring
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                   uint32_t msg_len, bool use_kong_mb)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83         uint16_t bar_offset = use_kong_mb ?
84                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85         uint16_t mb_trigger_offset = use_kong_mb ?
86                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
87
88         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
89                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
90
91                 memset(short_cmd_req, 0, bp->max_req_len);
92                 memcpy(short_cmd_req, req, msg_len);
93
94                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
95                 short_input.signature = rte_cpu_to_le_16(
96                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
97                 short_input.size = rte_cpu_to_le_16(msg_len);
98                 short_input.req_addr =
99                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
100
101                 data = (uint32_t *)&short_input;
102                 msg_len = sizeof(short_input);
103
104                 /* Sync memory write before updating doorbell */
105                 rte_wmb();
106
107                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
108         }
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 /*
153  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
154  * spinlock, and does initial processing.
155  *
156  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
157  * releases the spinlock only if it returns.  If the regular int return codes
158  * are not used by the function, HWRM_CHECK_RESULT() should not be used
159  * directly, rather it should be copied and modified to suit the function.
160  *
161  * HWRM_UNLOCK() must be called after all response processing is completed.
162  */
163 #define HWRM_PREP(req, type, kong) do { \
164         rte_spinlock_lock(&bp->hwrm_lock); \
165         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167         req.cmpl_ring = rte_cpu_to_le_16(-1); \
168         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170         req.target_id = rte_cpu_to_le_16(0xffff); \
171         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
172 } while (0)
173
174 #define HWRM_CHECK_RESULT_SILENT() do {\
175         if (rc) { \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179         if (resp->error_code) { \
180                 rc = rte_le_to_cpu_16(resp->error_code); \
181                 rte_spinlock_unlock(&bp->hwrm_lock); \
182                 return rc; \
183         } \
184 } while (0)
185
186 #define HWRM_CHECK_RESULT() do {\
187         if (rc) { \
188                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
191                         rc = -EACCES; \
192                 else if (rc > 0) \
193                         rc = -EINVAL; \
194                 return rc; \
195         } \
196         if (resp->error_code) { \
197                 rc = rte_le_to_cpu_16(resp->error_code); \
198                 if (resp->resp_len >= 16) { \
199                         struct hwrm_err_output *tmp_hwrm_err_op = \
200                                                 (void *)resp; \
201                         PMD_DRV_LOG(ERR, \
202                                 "error %d:%d:%08x:%04x\n", \
203                                 rc, tmp_hwrm_err_op->cmd_err, \
204                                 rte_le_to_cpu_32(\
205                                         tmp_hwrm_err_op->opaque_0), \
206                                 rte_le_to_cpu_16(\
207                                         tmp_hwrm_err_op->opaque_1)); \
208                 } else { \
209                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
210                 } \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
213                         rc = -EACCES; \
214                 else if (rc > 0) \
215                         rc = -EINVAL; \
216                 return rc; \
217         } \
218 } while (0)
219
220 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
221
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
223 {
224         int rc = 0;
225         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230         req.mask = 0;
231
232         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
233
234         HWRM_CHECK_RESULT();
235         HWRM_UNLOCK();
236
237         return rc;
238 }
239
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241                                  struct bnxt_vnic_info *vnic,
242                                  uint16_t vlan_count,
243                                  struct bnxt_vlan_table_entry *vlan_table)
244 {
245         int rc = 0;
246         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
248         uint32_t mask = 0;
249
250         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
251                 return rc;
252
253         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
255
256         /* FIXME add multicast flag, when multicast adding options is supported
257          * by ethtool.
258          */
259         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269         if (vnic->mc_addr_cnt) {
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
273         }
274         if (vlan_table) {
275                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278                          rte_mem_virt2iova(vlan_table));
279                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
280         }
281         req.mask = rte_cpu_to_le_32(mask);
282
283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
284
285         HWRM_CHECK_RESULT();
286         HWRM_UNLOCK();
287
288         return rc;
289 }
290
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
292                         uint16_t vlan_count,
293                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
294 {
295         int rc = 0;
296         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298                                                 bp->hwrm_cmd_resp_addr;
299
300         /*
301          * Older HWRM versions did not support this command, and the set_rx_mask
302          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303          * removed from set_rx_mask call, and this command was added.
304          *
305          * This command is also present from 1.7.8.11 and higher,
306          * as well as 1.7.8.0
307          */
308         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
311                                         (11)))
312                                 return 0;
313                 }
314         }
315         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316         req.fid = rte_cpu_to_le_16(fid);
317
318         req.vlan_tag_mask_tbl_addr =
319                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
321
322         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
323
324         HWRM_CHECK_RESULT();
325         HWRM_UNLOCK();
326
327         return rc;
328 }
329
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331                            struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
336
337         if (filter->fw_l2_filter_id == UINT64_MAX)
338                 return 0;
339
340         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
341
342         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
343
344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
345
346         HWRM_CHECK_RESULT();
347         HWRM_UNLOCK();
348
349         filter->fw_l2_filter_id = UINT64_MAX;
350
351         return 0;
352 }
353
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
355                          uint16_t dst_id,
356                          struct bnxt_filter_info *filter)
357 {
358         int rc = 0;
359         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362         const struct rte_eth_vmdq_rx_conf *conf =
363                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
364         uint32_t enables = 0;
365         uint16_t j = dst_id - 1;
366
367         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369             conf->pool_map[j].pools & (1UL << j)) {
370                 PMD_DRV_LOG(DEBUG,
371                         "Add vlan %u to vmdq pool %u\n",
372                         conf->pool_map[j].vlan_id, j);
373
374                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
375                 filter->enables |=
376                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
378         }
379
380         if (filter->fw_l2_filter_id != UINT64_MAX)
381                 bnxt_hwrm_clear_l2_filter(bp, filter);
382
383         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
384
385         req.flags = rte_cpu_to_le_32(filter->flags);
386
387         enables = filter->enables |
388               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
389         req.dst_id = rte_cpu_to_le_16(dst_id);
390
391         if (enables &
392             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
393                 memcpy(req.l2_addr, filter->l2_addr,
394                        ETHER_ADDR_LEN);
395         if (enables &
396             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
397                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
398                        ETHER_ADDR_LEN);
399         if (enables &
400             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
401                 req.l2_ovlan = filter->l2_ovlan;
402         if (enables &
403             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
404                 req.l2_ivlan = filter->l2_ivlan;
405         if (enables &
406             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
407                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
408         if (enables &
409             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
410                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
411         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
412                 req.src_id = rte_cpu_to_le_32(filter->src_id);
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
414                 req.src_type = filter->src_type;
415
416         req.enables = rte_cpu_to_le_32(enables);
417
418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
419
420         HWRM_CHECK_RESULT();
421
422         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
423         HWRM_UNLOCK();
424
425         return rc;
426 }
427
428 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
429 {
430         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
431         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
432         uint32_t flags = 0;
433         int rc;
434
435         if (!ptp)
436                 return 0;
437
438         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
439
440         if (ptp->rx_filter)
441                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
442         else
443                 flags |=
444                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
445         if (ptp->tx_tstamp_en)
446                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
447         else
448                 flags |=
449                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450         req.flags = rte_cpu_to_le_32(flags);
451         req.enables = rte_cpu_to_le_32
452                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
454
455         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
456         HWRM_UNLOCK();
457
458         return rc;
459 }
460
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
462 {
463         int rc = 0;
464         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
467
468 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
469         if (ptp)
470                 return 0;
471
472         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
473
474         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
475
476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
477
478         HWRM_CHECK_RESULT();
479
480         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
481                 return 0;
482
483         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
484         if (!ptp)
485                 return -ENOMEM;
486
487         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
505
506         ptp->bp = bp;
507         bp->ptp_cfg = ptp;
508
509         return 0;
510 }
511
512 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
513 {
514         int rc = 0;
515         struct hwrm_func_qcaps_input req = {.req_type = 0 };
516         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517         uint16_t new_max_vfs;
518         uint32_t flags;
519         int i;
520
521         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
522
523         req.fid = rte_cpu_to_le_16(0xffff);
524
525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
526
527         HWRM_CHECK_RESULT();
528
529         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530         flags = rte_le_to_cpu_32(resp->flags);
531         if (BNXT_PF(bp)) {
532                 bp->pf.port_id = resp->port_id;
533                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
535                 new_max_vfs = bp->pdev->max_vfs;
536                 if (new_max_vfs != bp->pf.max_vfs) {
537                         if (bp->pf.vf_info)
538                                 rte_free(bp->pf.vf_info);
539                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
540                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
541                         bp->pf.max_vfs = new_max_vfs;
542                         for (i = 0; i < new_max_vfs; i++) {
543                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
544                                 bp->pf.vf_info[i].vlan_table =
545                                         rte_zmalloc("VF VLAN table",
546                                                     getpagesize(),
547                                                     getpagesize());
548                                 if (bp->pf.vf_info[i].vlan_table == NULL)
549                                         PMD_DRV_LOG(ERR,
550                                         "Fail to alloc VLAN table for VF %d\n",
551                                         i);
552                                 else
553                                         rte_mem_lock_page(
554                                                 bp->pf.vf_info[i].vlan_table);
555                                 bp->pf.vf_info[i].vlan_as_table =
556                                         rte_zmalloc("VF VLAN AS table",
557                                                     getpagesize(),
558                                                     getpagesize());
559                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
560                                         PMD_DRV_LOG(ERR,
561                                         "Alloc VLAN AS table for VF %d fail\n",
562                                         i);
563                                 else
564                                         rte_mem_lock_page(
565                                                bp->pf.vf_info[i].vlan_as_table);
566                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
567                         }
568                 }
569         }
570
571         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
572         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
573         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
574         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
575         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
576         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
577         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
578         /* TODO: For now, do not support VMDq/RFS on VFs. */
579         if (BNXT_PF(bp)) {
580                 if (bp->pf.max_vfs)
581                         bp->max_vnics = 1;
582                 else
583                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
584         } else {
585                 bp->max_vnics = 1;
586         }
587         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
588         if (BNXT_PF(bp)) {
589                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
590                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
591                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
592                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
593                         HWRM_UNLOCK();
594                         bnxt_hwrm_ptp_qcfg(bp);
595                 }
596         }
597
598         HWRM_UNLOCK();
599
600         return rc;
601 }
602
603 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
604 {
605         int rc;
606
607         rc = __bnxt_hwrm_func_qcaps(bp);
608         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
609                 rc = bnxt_hwrm_func_resc_qcaps(bp);
610                 if (!rc)
611                         bp->flags |= BNXT_FLAG_NEW_RM;
612         }
613
614         return rc;
615 }
616
617 int bnxt_hwrm_func_reset(struct bnxt *bp)
618 {
619         int rc = 0;
620         struct hwrm_func_reset_input req = {.req_type = 0 };
621         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
622
623         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
624
625         req.enables = rte_cpu_to_le_32(0);
626
627         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
628
629         HWRM_CHECK_RESULT();
630         HWRM_UNLOCK();
631
632         return rc;
633 }
634
635 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
636 {
637         int rc;
638         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
639         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
640
641         if (bp->flags & BNXT_FLAG_REGISTERED)
642                 return 0;
643
644         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
645         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
646                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
647         req.ver_maj = RTE_VER_YEAR;
648         req.ver_min = RTE_VER_MONTH;
649         req.ver_upd = RTE_VER_MINOR;
650
651         if (BNXT_PF(bp)) {
652                 req.enables |= rte_cpu_to_le_32(
653                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
654                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
655                        RTE_MIN(sizeof(req.vf_req_fwd),
656                                sizeof(bp->pf.vf_req_fwd)));
657
658                 /*
659                  * PF can sniff HWRM API issued by VF. This can be set up by
660                  * linux driver and inherited by the DPDK PF driver. Clear
661                  * this HWRM sniffer list in FW because DPDK PF driver does
662                  * not support this.
663                  */
664                 req.flags =
665                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
666         }
667
668         req.async_event_fwd[0] |=
669                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
670                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
671                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
672         req.async_event_fwd[1] |=
673                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
674                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
675
676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
677
678         HWRM_CHECK_RESULT();
679         HWRM_UNLOCK();
680
681         bp->flags |= BNXT_FLAG_REGISTERED;
682
683         return rc;
684 }
685
686 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
687 {
688         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
689                 return 0;
690
691         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
692 }
693
694 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
695 {
696         int rc;
697         uint32_t flags = 0;
698         uint32_t enables;
699         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
700         struct hwrm_func_vf_cfg_input req = {0};
701
702         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
703
704         req.enables = rte_cpu_to_le_32
705                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
706                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
707                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
708                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
709                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
710                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
711
712         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
713         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
714                                             AGG_RING_MULTIPLIER);
715         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
716         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
717                                               bp->tx_nr_rings);
718         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
719         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
720         if (bp->vf_resv_strategy ==
721             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
722                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
723                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
724                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
725                 req.enables |= rte_cpu_to_le_32(enables);
726                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
727                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
728                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
729         }
730
731         if (test)
732                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
733                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
734                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
735                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
736                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
737                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
738
739         req.flags = rte_cpu_to_le_32(flags);
740
741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
742
743         if (test)
744                 HWRM_CHECK_RESULT_SILENT();
745         else
746                 HWRM_CHECK_RESULT();
747
748         HWRM_UNLOCK();
749         return rc;
750 }
751
752 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
753 {
754         int rc;
755         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
756         struct hwrm_func_resource_qcaps_input req = {0};
757
758         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
759         req.fid = rte_cpu_to_le_16(0xffff);
760
761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
762
763         HWRM_CHECK_RESULT();
764
765         if (BNXT_VF(bp)) {
766                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
767                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
768                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
769                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
770                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
771                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
772                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
773                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
774         }
775         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
776         if (bp->vf_resv_strategy >
777             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
778                 bp->vf_resv_strategy =
779                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
780
781         HWRM_UNLOCK();
782         return rc;
783 }
784
785 int bnxt_hwrm_ver_get(struct bnxt *bp)
786 {
787         int rc = 0;
788         struct hwrm_ver_get_input req = {.req_type = 0 };
789         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
790         uint32_t my_version;
791         uint32_t fw_version;
792         uint16_t max_resp_len;
793         char type[RTE_MEMZONE_NAMESIZE];
794         uint32_t dev_caps_cfg;
795
796         bp->max_req_len = HWRM_MAX_REQ_LEN;
797         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
798
799         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
800         req.hwrm_intf_min = HWRM_VERSION_MINOR;
801         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
802
803         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
804
805         HWRM_CHECK_RESULT();
806
807         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
808                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
809                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
810                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
811         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
812                      (resp->hwrm_fw_min_8b << 16) |
813                      (resp->hwrm_fw_bld_8b << 8) |
814                      resp->hwrm_fw_rsvd_8b;
815         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
816                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
817
818         my_version = HWRM_VERSION_MAJOR << 16;
819         my_version |= HWRM_VERSION_MINOR << 8;
820         my_version |= HWRM_VERSION_UPDATE;
821
822         fw_version = resp->hwrm_intf_maj_8b << 16;
823         fw_version |= resp->hwrm_intf_min_8b << 8;
824         fw_version |= resp->hwrm_intf_upd_8b;
825         bp->hwrm_spec_code = fw_version;
826
827         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
828                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
829                 rc = -EINVAL;
830                 goto error;
831         }
832
833         if (my_version != fw_version) {
834                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
835                 if (my_version < fw_version) {
836                         PMD_DRV_LOG(INFO,
837                                 "Firmware API version is newer than driver.\n");
838                         PMD_DRV_LOG(INFO,
839                                 "The driver may be missing features.\n");
840                 } else {
841                         PMD_DRV_LOG(INFO,
842                                 "Firmware API version is older than driver.\n");
843                         PMD_DRV_LOG(INFO,
844                                 "Not all driver features may be functional.\n");
845                 }
846         }
847
848         if (bp->max_req_len > resp->max_req_win_len) {
849                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
850                 rc = -EINVAL;
851         }
852         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
853         max_resp_len = resp->max_resp_len;
854         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
855
856         if (bp->max_resp_len != max_resp_len) {
857                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
858                         bp->pdev->addr.domain, bp->pdev->addr.bus,
859                         bp->pdev->addr.devid, bp->pdev->addr.function);
860
861                 rte_free(bp->hwrm_cmd_resp_addr);
862
863                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
864                 if (bp->hwrm_cmd_resp_addr == NULL) {
865                         rc = -ENOMEM;
866                         goto error;
867                 }
868                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
869                 bp->hwrm_cmd_resp_dma_addr =
870                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
871                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
872                         PMD_DRV_LOG(ERR,
873                         "Unable to map response buffer to physical memory.\n");
874                         rc = -ENOMEM;
875                         goto error;
876                 }
877                 bp->max_resp_len = max_resp_len;
878         }
879
880         if ((dev_caps_cfg &
881                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
882             (dev_caps_cfg &
883              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
884                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
885
886                 rte_free(bp->hwrm_short_cmd_req_addr);
887
888                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
889                                                         bp->max_req_len, 0);
890                 if (bp->hwrm_short_cmd_req_addr == NULL) {
891                         rc = -ENOMEM;
892                         goto error;
893                 }
894                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
895                 bp->hwrm_short_cmd_req_dma_addr =
896                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
897                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
898                         rte_free(bp->hwrm_short_cmd_req_addr);
899                         PMD_DRV_LOG(ERR,
900                                 "Unable to map buffer to physical memory.\n");
901                         rc = -ENOMEM;
902                         goto error;
903                 }
904
905                 bp->flags |= BNXT_FLAG_SHORT_CMD;
906         }
907         if (dev_caps_cfg &
908             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
909                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
910                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
911         }
912         if (dev_caps_cfg &
913             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
914                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
915
916 error:
917         HWRM_UNLOCK();
918         return rc;
919 }
920
921 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
922 {
923         int rc;
924         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
925         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
926
927         if (!(bp->flags & BNXT_FLAG_REGISTERED))
928                 return 0;
929
930         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
931         req.flags = flags;
932
933         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
934
935         HWRM_CHECK_RESULT();
936         HWRM_UNLOCK();
937
938         bp->flags &= ~BNXT_FLAG_REGISTERED;
939
940         return rc;
941 }
942
943 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
944 {
945         int rc = 0;
946         struct hwrm_port_phy_cfg_input req = {0};
947         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
948         uint32_t enables = 0;
949
950         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
951
952         if (conf->link_up) {
953                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
954                 if (bp->link_info.auto_mode && conf->link_speed) {
955                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
956                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
957                 }
958
959                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
960                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
961                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
962                 /*
963                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
964                  * any auto mode, even "none".
965                  */
966                 if (!conf->link_speed) {
967                         /* No speeds specified. Enable AutoNeg - all speeds */
968                         req.auto_mode =
969                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
970                 }
971                 /* AutoNeg - Advertise speeds specified. */
972                 if (conf->auto_link_speed_mask &&
973                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
974                         req.auto_mode =
975                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
976                         req.auto_link_speed_mask =
977                                 conf->auto_link_speed_mask;
978                         enables |=
979                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
980                 }
981
982                 req.auto_duplex = conf->duplex;
983                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
984                 req.auto_pause = conf->auto_pause;
985                 req.force_pause = conf->force_pause;
986                 /* Set force_pause if there is no auto or if there is a force */
987                 if (req.auto_pause && !req.force_pause)
988                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
989                 else
990                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
991
992                 req.enables = rte_cpu_to_le_32(enables);
993         } else {
994                 req.flags =
995                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
996                 PMD_DRV_LOG(INFO, "Force Link Down\n");
997         }
998
999         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1000
1001         HWRM_CHECK_RESULT();
1002         HWRM_UNLOCK();
1003
1004         return rc;
1005 }
1006
1007 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1008                                    struct bnxt_link_info *link_info)
1009 {
1010         int rc = 0;
1011         struct hwrm_port_phy_qcfg_input req = {0};
1012         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1013
1014         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1015
1016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1017
1018         HWRM_CHECK_RESULT();
1019
1020         link_info->phy_link_status = resp->link;
1021         link_info->link_up =
1022                 (link_info->phy_link_status ==
1023                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1024         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1025         link_info->duplex = resp->duplex_cfg;
1026         link_info->pause = resp->pause;
1027         link_info->auto_pause = resp->auto_pause;
1028         link_info->force_pause = resp->force_pause;
1029         link_info->auto_mode = resp->auto_mode;
1030         link_info->phy_type = resp->phy_type;
1031         link_info->media_type = resp->media_type;
1032
1033         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1034         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1035         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1036         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1037         link_info->phy_ver[0] = resp->phy_maj;
1038         link_info->phy_ver[1] = resp->phy_min;
1039         link_info->phy_ver[2] = resp->phy_bld;
1040
1041         HWRM_UNLOCK();
1042
1043         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1044         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1045         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1046         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1047         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1048                     link_info->auto_link_speed_mask);
1049         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1050                     link_info->force_link_speed);
1051
1052         return rc;
1053 }
1054
1055 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1056 {
1057         int rc = 0;
1058         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1059         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1060         int i;
1061
1062         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1063
1064         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1065         /* HWRM Version >= 1.9.1 */
1066         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1067                 req.drv_qmap_cap =
1068                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1069         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1070
1071         HWRM_CHECK_RESULT();
1072
1073 #define GET_QUEUE_INFO(x) \
1074         bp->cos_queue[x].id = resp->queue_id##x; \
1075         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1076
1077         GET_QUEUE_INFO(0);
1078         GET_QUEUE_INFO(1);
1079         GET_QUEUE_INFO(2);
1080         GET_QUEUE_INFO(3);
1081         GET_QUEUE_INFO(4);
1082         GET_QUEUE_INFO(5);
1083         GET_QUEUE_INFO(6);
1084         GET_QUEUE_INFO(7);
1085
1086         HWRM_UNLOCK();
1087
1088         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1089                 bp->tx_cosq_id = bp->cos_queue[0].id;
1090         } else {
1091                 /* iterate and find the COSq profile to use for Tx */
1092                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1093                         if (bp->cos_queue[i].profile ==
1094                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1095                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1096                                 break;
1097                         }
1098                 }
1099         }
1100         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1101
1102         return rc;
1103 }
1104
1105 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1106                          struct bnxt_ring *ring,
1107                          uint32_t ring_type, uint32_t map_index,
1108                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1109 {
1110         int rc = 0;
1111         uint32_t enables = 0;
1112         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1113         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1114
1115         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1116
1117         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1118         req.fbo = rte_cpu_to_le_32(0);
1119         /* Association of ring index with doorbell index */
1120         req.logical_id = rte_cpu_to_le_16(map_index);
1121         req.length = rte_cpu_to_le_32(ring->ring_size);
1122
1123         switch (ring_type) {
1124         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1125                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1126                 /* FALLTHROUGH */
1127         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1128                 req.ring_type = ring_type;
1129                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1130                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1131                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1132                         enables |=
1133                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1134                 break;
1135         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1136                 req.ring_type = ring_type;
1137                 /*
1138                  * TODO: Some HWRM versions crash with
1139                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1140                  */
1141                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1142                 break;
1143         default:
1144                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1145                         ring_type);
1146                 HWRM_UNLOCK();
1147                 return -1;
1148         }
1149         req.enables = rte_cpu_to_le_32(enables);
1150
1151         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1152
1153         if (rc || resp->error_code) {
1154                 if (rc == 0 && resp->error_code)
1155                         rc = rte_le_to_cpu_16(resp->error_code);
1156                 switch (ring_type) {
1157                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1158                         PMD_DRV_LOG(ERR,
1159                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1160                         HWRM_UNLOCK();
1161                         return rc;
1162                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1163                         PMD_DRV_LOG(ERR,
1164                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1165                         HWRM_UNLOCK();
1166                         return rc;
1167                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1168                         PMD_DRV_LOG(ERR,
1169                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1170                         HWRM_UNLOCK();
1171                         return rc;
1172                 default:
1173                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1174                         HWRM_UNLOCK();
1175                         return rc;
1176                 }
1177         }
1178
1179         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1180         HWRM_UNLOCK();
1181         return rc;
1182 }
1183
1184 int bnxt_hwrm_ring_free(struct bnxt *bp,
1185                         struct bnxt_ring *ring, uint32_t ring_type)
1186 {
1187         int rc;
1188         struct hwrm_ring_free_input req = {.req_type = 0 };
1189         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1190
1191         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1192
1193         req.ring_type = ring_type;
1194         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1195
1196         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1197
1198         if (rc || resp->error_code) {
1199                 if (rc == 0 && resp->error_code)
1200                         rc = rte_le_to_cpu_16(resp->error_code);
1201                 HWRM_UNLOCK();
1202
1203                 switch (ring_type) {
1204                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1205                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1206                                 rc);
1207                         return rc;
1208                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1209                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1210                                 rc);
1211                         return rc;
1212                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1213                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1214                                 rc);
1215                         return rc;
1216                 default:
1217                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1218                         return rc;
1219                 }
1220         }
1221         HWRM_UNLOCK();
1222         return 0;
1223 }
1224
1225 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1226 {
1227         int rc = 0;
1228         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1229         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1230
1231         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1232
1233         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1234         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1235         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1236         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1237
1238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1239
1240         HWRM_CHECK_RESULT();
1241
1242         bp->grp_info[idx].fw_grp_id =
1243             rte_le_to_cpu_16(resp->ring_group_id);
1244
1245         HWRM_UNLOCK();
1246
1247         return rc;
1248 }
1249
1250 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1251 {
1252         int rc;
1253         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1254         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1255
1256         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1257
1258         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1259
1260         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1261
1262         HWRM_CHECK_RESULT();
1263         HWRM_UNLOCK();
1264
1265         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1266         return rc;
1267 }
1268
1269 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1270 {
1271         int rc = 0;
1272         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1273         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1274
1275         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1276                 return rc;
1277
1278         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1279
1280         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1281
1282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1283
1284         HWRM_CHECK_RESULT();
1285         HWRM_UNLOCK();
1286
1287         return rc;
1288 }
1289
1290 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1291                                 unsigned int idx __rte_unused)
1292 {
1293         int rc;
1294         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1295         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1296
1297         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1298
1299         req.update_period_ms = rte_cpu_to_le_32(0);
1300
1301         req.stats_dma_addr =
1302             rte_cpu_to_le_64(cpr->hw_stats_map);
1303
1304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1305
1306         HWRM_CHECK_RESULT();
1307
1308         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1309
1310         HWRM_UNLOCK();
1311
1312         return rc;
1313 }
1314
1315 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1316                                 unsigned int idx __rte_unused)
1317 {
1318         int rc;
1319         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1320         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1321
1322         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1323
1324         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1325
1326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1327
1328         HWRM_CHECK_RESULT();
1329         HWRM_UNLOCK();
1330
1331         return rc;
1332 }
1333
1334 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1335 {
1336         int rc = 0, i, j;
1337         struct hwrm_vnic_alloc_input req = { 0 };
1338         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1339
1340         /* map ring groups to this vnic */
1341         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1342                 vnic->start_grp_id, vnic->end_grp_id);
1343         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1344                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1345
1346         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1347         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1348         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1349         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1350         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1351                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1352         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1353
1354         if (vnic->func_default)
1355                 req.flags =
1356                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1357         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1358
1359         HWRM_CHECK_RESULT();
1360
1361         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1362         HWRM_UNLOCK();
1363         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1364         return rc;
1365 }
1366
1367 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1368                                         struct bnxt_vnic_info *vnic,
1369                                         struct bnxt_plcmodes_cfg *pmode)
1370 {
1371         int rc = 0;
1372         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1373         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1374
1375         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1376
1377         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1378
1379         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1380
1381         HWRM_CHECK_RESULT();
1382
1383         pmode->flags = rte_le_to_cpu_32(resp->flags);
1384         /* dflt_vnic bit doesn't exist in the _cfg command */
1385         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1386         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1387         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1388         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1389
1390         HWRM_UNLOCK();
1391
1392         return rc;
1393 }
1394
1395 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1396                                        struct bnxt_vnic_info *vnic,
1397                                        struct bnxt_plcmodes_cfg *pmode)
1398 {
1399         int rc = 0;
1400         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1401         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1402
1403         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1404
1405         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1406         req.flags = rte_cpu_to_le_32(pmode->flags);
1407         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1408         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1409         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1410         req.enables = rte_cpu_to_le_32(
1411             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1412             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1413             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1414         );
1415
1416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1417
1418         HWRM_CHECK_RESULT();
1419         HWRM_UNLOCK();
1420
1421         return rc;
1422 }
1423
1424 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1425 {
1426         int rc = 0;
1427         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1428         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1429         uint32_t ctx_enable_flag = 0;
1430         struct bnxt_plcmodes_cfg pmodes;
1431
1432         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1433                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1434                 return rc;
1435         }
1436
1437         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1438         if (rc)
1439                 return rc;
1440
1441         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1442
1443         /* Only RSS support for now TBD: COS & LB */
1444         req.enables =
1445             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1446         if (vnic->lb_rule != 0xffff)
1447                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1448         if (vnic->cos_rule != 0xffff)
1449                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1450         if (vnic->rss_rule != 0xffff) {
1451                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1452                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1453         }
1454         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1455         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1456         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1457         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1458         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1459         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1460         req.mru = rte_cpu_to_le_16(vnic->mru);
1461         if (vnic->func_default)
1462                 req.flags |=
1463                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1464         if (vnic->vlan_strip)
1465                 req.flags |=
1466                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1467         if (vnic->bd_stall)
1468                 req.flags |=
1469                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1470         if (vnic->roce_dual)
1471                 req.flags |= rte_cpu_to_le_32(
1472                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1473         if (vnic->roce_only)
1474                 req.flags |= rte_cpu_to_le_32(
1475                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1476         if (vnic->rss_dflt_cr)
1477                 req.flags |= rte_cpu_to_le_32(
1478                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1479
1480         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1481
1482         HWRM_CHECK_RESULT();
1483         HWRM_UNLOCK();
1484
1485         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1486
1487         return rc;
1488 }
1489
1490 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1491                 int16_t fw_vf_id)
1492 {
1493         int rc = 0;
1494         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1495         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1496
1497         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1498                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1499                 return rc;
1500         }
1501         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1502
1503         req.enables =
1504                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1505         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1506         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1507
1508         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1509
1510         HWRM_CHECK_RESULT();
1511
1512         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1513         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1514         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1515         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1516         vnic->mru = rte_le_to_cpu_16(resp->mru);
1517         vnic->func_default = rte_le_to_cpu_32(
1518                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1519         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1520                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1521         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1522                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1523         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1524                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1525         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1526                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1527         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1528                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1529
1530         HWRM_UNLOCK();
1531
1532         return rc;
1533 }
1534
1535 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1536 {
1537         int rc = 0;
1538         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1539         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1540                                                 bp->hwrm_cmd_resp_addr;
1541
1542         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1543
1544         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1545
1546         HWRM_CHECK_RESULT();
1547
1548         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1549         HWRM_UNLOCK();
1550         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1551
1552         return rc;
1553 }
1554
1555 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1556 {
1557         int rc = 0;
1558         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1559         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1560                                                 bp->hwrm_cmd_resp_addr;
1561
1562         if (vnic->rss_rule == 0xffff) {
1563                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1564                 return rc;
1565         }
1566         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1567
1568         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1569
1570         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1571
1572         HWRM_CHECK_RESULT();
1573         HWRM_UNLOCK();
1574
1575         vnic->rss_rule = INVALID_HW_RING_ID;
1576
1577         return rc;
1578 }
1579
1580 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1581 {
1582         int rc = 0;
1583         struct hwrm_vnic_free_input req = {.req_type = 0 };
1584         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1585
1586         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1587                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1588                 return rc;
1589         }
1590
1591         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1592
1593         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1594
1595         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1596
1597         HWRM_CHECK_RESULT();
1598         HWRM_UNLOCK();
1599
1600         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1601         return rc;
1602 }
1603
1604 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1605                            struct bnxt_vnic_info *vnic)
1606 {
1607         int rc = 0;
1608         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1609         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1610
1611         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1612
1613         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1614         req.hash_mode_flags = vnic->hash_mode;
1615
1616         req.ring_grp_tbl_addr =
1617             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1618         req.hash_key_tbl_addr =
1619             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1620         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1621
1622         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1623
1624         HWRM_CHECK_RESULT();
1625         HWRM_UNLOCK();
1626
1627         return rc;
1628 }
1629
1630 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1631                         struct bnxt_vnic_info *vnic)
1632 {
1633         int rc = 0;
1634         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1635         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1636         uint16_t size;
1637
1638         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1639                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1640                 return rc;
1641         }
1642
1643         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1644
1645         req.flags = rte_cpu_to_le_32(
1646                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1647
1648         req.enables = rte_cpu_to_le_32(
1649                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1650
1651         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1652         size -= RTE_PKTMBUF_HEADROOM;
1653
1654         req.jumbo_thresh = rte_cpu_to_le_16(size);
1655         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1656
1657         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1658
1659         HWRM_CHECK_RESULT();
1660         HWRM_UNLOCK();
1661
1662         return rc;
1663 }
1664
1665 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1666                         struct bnxt_vnic_info *vnic, bool enable)
1667 {
1668         int rc = 0;
1669         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1670         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1671
1672         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1673
1674         if (enable) {
1675                 req.enables = rte_cpu_to_le_32(
1676                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1677                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1678                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1679                 req.flags = rte_cpu_to_le_32(
1680                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1681                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1682                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1683                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1684                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1685                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1686                 req.max_agg_segs = rte_cpu_to_le_16(5);
1687                 req.max_aggs =
1688                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1689                 req.min_agg_len = rte_cpu_to_le_32(512);
1690         }
1691         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1692
1693         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1694
1695         HWRM_CHECK_RESULT();
1696         HWRM_UNLOCK();
1697
1698         return rc;
1699 }
1700
1701 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1702 {
1703         struct hwrm_func_cfg_input req = {0};
1704         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1705         int rc;
1706
1707         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1708         req.enables = rte_cpu_to_le_32(
1709                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1710         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1711         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1712
1713         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1714
1715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1716         HWRM_CHECK_RESULT();
1717         HWRM_UNLOCK();
1718
1719         bp->pf.vf_info[vf].random_mac = false;
1720
1721         return rc;
1722 }
1723
1724 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1725                                   uint64_t *dropped)
1726 {
1727         int rc = 0;
1728         struct hwrm_func_qstats_input req = {.req_type = 0};
1729         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1730
1731         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1732
1733         req.fid = rte_cpu_to_le_16(fid);
1734
1735         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1736
1737         HWRM_CHECK_RESULT();
1738
1739         if (dropped)
1740                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1741
1742         HWRM_UNLOCK();
1743
1744         return rc;
1745 }
1746
1747 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1748                           struct rte_eth_stats *stats)
1749 {
1750         int rc = 0;
1751         struct hwrm_func_qstats_input req = {.req_type = 0};
1752         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1753
1754         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1755
1756         req.fid = rte_cpu_to_le_16(fid);
1757
1758         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1759
1760         HWRM_CHECK_RESULT();
1761
1762         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1763         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1764         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1765         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1766         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1767         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1768
1769         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1770         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1771         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1772         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1773         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1774         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1775
1776         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1777         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1778         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1779
1780         HWRM_UNLOCK();
1781
1782         return rc;
1783 }
1784
1785 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1786 {
1787         int rc = 0;
1788         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1789         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1790
1791         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1792
1793         req.fid = rte_cpu_to_le_16(fid);
1794
1795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1796
1797         HWRM_CHECK_RESULT();
1798         HWRM_UNLOCK();
1799
1800         return rc;
1801 }
1802
1803 /*
1804  * HWRM utility functions
1805  */
1806
1807 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1808 {
1809         unsigned int i;
1810         int rc = 0;
1811
1812         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1813                 struct bnxt_tx_queue *txq;
1814                 struct bnxt_rx_queue *rxq;
1815                 struct bnxt_cp_ring_info *cpr;
1816
1817                 if (i >= bp->rx_cp_nr_rings) {
1818                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1819                         cpr = txq->cp_ring;
1820                 } else {
1821                         rxq = bp->rx_queues[i];
1822                         cpr = rxq->cp_ring;
1823                 }
1824
1825                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1826                 if (rc)
1827                         return rc;
1828         }
1829         return 0;
1830 }
1831
1832 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1833 {
1834         int rc;
1835         unsigned int i;
1836         struct bnxt_cp_ring_info *cpr;
1837
1838         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1839
1840                 if (i >= bp->rx_cp_nr_rings) {
1841                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1842                 } else {
1843                         cpr = bp->rx_queues[i]->cp_ring;
1844                         bp->grp_info[i].fw_stats_ctx = -1;
1845                 }
1846                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1847                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1848                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1849                         if (rc)
1850                                 return rc;
1851                 }
1852         }
1853         return 0;
1854 }
1855
1856 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1857 {
1858         unsigned int i;
1859         int rc = 0;
1860
1861         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1862                 struct bnxt_tx_queue *txq;
1863                 struct bnxt_rx_queue *rxq;
1864                 struct bnxt_cp_ring_info *cpr;
1865
1866                 if (i >= bp->rx_cp_nr_rings) {
1867                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1868                         cpr = txq->cp_ring;
1869                 } else {
1870                         rxq = bp->rx_queues[i];
1871                         cpr = rxq->cp_ring;
1872                 }
1873
1874                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1875
1876                 if (rc)
1877                         return rc;
1878         }
1879         return rc;
1880 }
1881
1882 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1883 {
1884         uint16_t idx;
1885         uint32_t rc = 0;
1886
1887         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1888
1889                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1890                         continue;
1891
1892                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1893
1894                 if (rc)
1895                         return rc;
1896         }
1897         return rc;
1898 }
1899
1900 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1901 {
1902         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1903
1904         bnxt_hwrm_ring_free(bp, cp_ring,
1905                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1906         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1907         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1908                         sizeof(*cpr->cp_desc_ring));
1909         cpr->cp_raw_cons = 0;
1910 }
1911
1912 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1913 {
1914         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1915         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1916         struct bnxt_ring *ring = rxr->rx_ring_struct;
1917         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1918
1919         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1920                 bnxt_hwrm_ring_free(bp, ring,
1921                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1922                 ring->fw_ring_id = INVALID_HW_RING_ID;
1923                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1924                 memset(rxr->rx_desc_ring, 0,
1925                        rxr->rx_ring_struct->ring_size *
1926                        sizeof(*rxr->rx_desc_ring));
1927                 memset(rxr->rx_buf_ring, 0,
1928                        rxr->rx_ring_struct->ring_size *
1929                        sizeof(*rxr->rx_buf_ring));
1930                 rxr->rx_prod = 0;
1931         }
1932         ring = rxr->ag_ring_struct;
1933         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1934                 bnxt_hwrm_ring_free(bp, ring,
1935                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1936                 ring->fw_ring_id = INVALID_HW_RING_ID;
1937                 memset(rxr->ag_buf_ring, 0,
1938                        rxr->ag_ring_struct->ring_size *
1939                        sizeof(*rxr->ag_buf_ring));
1940                 rxr->ag_prod = 0;
1941                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1942         }
1943         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1944                 bnxt_free_cp_ring(bp, cpr);
1945
1946         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1947 }
1948
1949 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1950 {
1951         unsigned int i;
1952
1953         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1954                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1955                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1956                 struct bnxt_ring *ring = txr->tx_ring_struct;
1957                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1958
1959                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1960                         bnxt_hwrm_ring_free(bp, ring,
1961                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1962                         ring->fw_ring_id = INVALID_HW_RING_ID;
1963                         memset(txr->tx_desc_ring, 0,
1964                                         txr->tx_ring_struct->ring_size *
1965                                         sizeof(*txr->tx_desc_ring));
1966                         memset(txr->tx_buf_ring, 0,
1967                                         txr->tx_ring_struct->ring_size *
1968                                         sizeof(*txr->tx_buf_ring));
1969                         txr->tx_prod = 0;
1970                         txr->tx_cons = 0;
1971                 }
1972                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1973                         bnxt_free_cp_ring(bp, cpr);
1974                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1975                 }
1976         }
1977
1978         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1979                 bnxt_free_hwrm_rx_ring(bp, i);
1980
1981         return 0;
1982 }
1983
1984 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1985 {
1986         uint16_t i;
1987         uint32_t rc = 0;
1988
1989         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1990                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1991                 if (rc)
1992                         return rc;
1993         }
1994         return rc;
1995 }
1996
1997 void bnxt_free_hwrm_resources(struct bnxt *bp)
1998 {
1999         /* Release memzone */
2000         rte_free(bp->hwrm_cmd_resp_addr);
2001         rte_free(bp->hwrm_short_cmd_req_addr);
2002         bp->hwrm_cmd_resp_addr = NULL;
2003         bp->hwrm_short_cmd_req_addr = NULL;
2004         bp->hwrm_cmd_resp_dma_addr = 0;
2005         bp->hwrm_short_cmd_req_dma_addr = 0;
2006 }
2007
2008 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2009 {
2010         struct rte_pci_device *pdev = bp->pdev;
2011         char type[RTE_MEMZONE_NAMESIZE];
2012
2013         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2014                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2015         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2016         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2017         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2018         if (bp->hwrm_cmd_resp_addr == NULL)
2019                 return -ENOMEM;
2020         bp->hwrm_cmd_resp_dma_addr =
2021                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2022         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2023                 PMD_DRV_LOG(ERR,
2024                         "unable to map response address to physical memory\n");
2025                 return -ENOMEM;
2026         }
2027         rte_spinlock_init(&bp->hwrm_lock);
2028
2029         return 0;
2030 }
2031
2032 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2033 {
2034         struct bnxt_filter_info *filter;
2035         int rc = 0;
2036
2037         STAILQ_FOREACH(filter, &vnic->filter, next) {
2038                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2039                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2040                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2041                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2042                 else
2043                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2044                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2045                 //if (rc)
2046                         //break;
2047         }
2048         return rc;
2049 }
2050
2051 static int
2052 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2053 {
2054         struct bnxt_filter_info *filter;
2055         struct rte_flow *flow;
2056         int rc = 0;
2057
2058         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2059                 filter = flow->filter;
2060                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2061                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2062                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2063                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2064                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2065                 else
2066                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2067
2068                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2069                 rte_free(flow);
2070                 //if (rc)
2071                         //break;
2072         }
2073         return rc;
2074 }
2075
2076 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2077 {
2078         struct bnxt_filter_info *filter;
2079         int rc = 0;
2080
2081         STAILQ_FOREACH(filter, &vnic->filter, next) {
2082                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2083                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2084                                                      filter);
2085                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2086                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2087                                                          filter);
2088                 else
2089                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2090                                                      filter);
2091                 if (rc)
2092                         break;
2093         }
2094         return rc;
2095 }
2096
2097 void bnxt_free_tunnel_ports(struct bnxt *bp)
2098 {
2099         if (bp->vxlan_port_cnt)
2100                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2101                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2102         bp->vxlan_port = 0;
2103         if (bp->geneve_port_cnt)
2104                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2105                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2106         bp->geneve_port = 0;
2107 }
2108
2109 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2110 {
2111         int i;
2112
2113         if (bp->vnic_info == NULL)
2114                 return;
2115
2116         /*
2117          * Cleanup VNICs in reverse order, to make sure the L2 filter
2118          * from vnic0 is last to be cleaned up.
2119          */
2120         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2121                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2122
2123                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2124
2125                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2126
2127                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2128
2129                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2130
2131                 bnxt_hwrm_vnic_free(bp, vnic);
2132
2133                 rte_free(vnic->fw_grp_ids);
2134         }
2135         /* Ring resources */
2136         bnxt_free_all_hwrm_rings(bp);
2137         bnxt_free_all_hwrm_ring_grps(bp);
2138         bnxt_free_all_hwrm_stat_ctxs(bp);
2139         bnxt_free_tunnel_ports(bp);
2140 }
2141
2142 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2143 {
2144         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2145
2146         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2147                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2148
2149         switch (conf_link_speed) {
2150         case ETH_LINK_SPEED_10M_HD:
2151         case ETH_LINK_SPEED_100M_HD:
2152                 /* FALLTHROUGH */
2153                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2154         }
2155         return hw_link_duplex;
2156 }
2157
2158 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2159 {
2160         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2161 }
2162
2163 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2164 {
2165         uint16_t eth_link_speed = 0;
2166
2167         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2168                 return ETH_LINK_SPEED_AUTONEG;
2169
2170         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2171         case ETH_LINK_SPEED_100M:
2172         case ETH_LINK_SPEED_100M_HD:
2173                 /* FALLTHROUGH */
2174                 eth_link_speed =
2175                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2176                 break;
2177         case ETH_LINK_SPEED_1G:
2178                 eth_link_speed =
2179                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2180                 break;
2181         case ETH_LINK_SPEED_2_5G:
2182                 eth_link_speed =
2183                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2184                 break;
2185         case ETH_LINK_SPEED_10G:
2186                 eth_link_speed =
2187                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2188                 break;
2189         case ETH_LINK_SPEED_20G:
2190                 eth_link_speed =
2191                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2192                 break;
2193         case ETH_LINK_SPEED_25G:
2194                 eth_link_speed =
2195                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2196                 break;
2197         case ETH_LINK_SPEED_40G:
2198                 eth_link_speed =
2199                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2200                 break;
2201         case ETH_LINK_SPEED_50G:
2202                 eth_link_speed =
2203                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2204                 break;
2205         case ETH_LINK_SPEED_100G:
2206                 eth_link_speed =
2207                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2208                 break;
2209         default:
2210                 PMD_DRV_LOG(ERR,
2211                         "Unsupported link speed %d; default to AUTO\n",
2212                         conf_link_speed);
2213                 break;
2214         }
2215         return eth_link_speed;
2216 }
2217
2218 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2219                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2220                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2221                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2222
2223 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2224 {
2225         uint32_t one_speed;
2226
2227         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2228                 return 0;
2229
2230         if (link_speed & ETH_LINK_SPEED_FIXED) {
2231                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2232
2233                 if (one_speed & (one_speed - 1)) {
2234                         PMD_DRV_LOG(ERR,
2235                                 "Invalid advertised speeds (%u) for port %u\n",
2236                                 link_speed, port_id);
2237                         return -EINVAL;
2238                 }
2239                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2240                         PMD_DRV_LOG(ERR,
2241                                 "Unsupported advertised speed (%u) for port %u\n",
2242                                 link_speed, port_id);
2243                         return -EINVAL;
2244                 }
2245         } else {
2246                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2247                         PMD_DRV_LOG(ERR,
2248                                 "Unsupported advertised speeds (%u) for port %u\n",
2249                                 link_speed, port_id);
2250                         return -EINVAL;
2251                 }
2252         }
2253         return 0;
2254 }
2255
2256 static uint16_t
2257 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2258 {
2259         uint16_t ret = 0;
2260
2261         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2262                 if (bp->link_info.support_speeds)
2263                         return bp->link_info.support_speeds;
2264                 link_speed = BNXT_SUPPORTED_SPEEDS;
2265         }
2266
2267         if (link_speed & ETH_LINK_SPEED_100M)
2268                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2269         if (link_speed & ETH_LINK_SPEED_100M_HD)
2270                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2271         if (link_speed & ETH_LINK_SPEED_1G)
2272                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2273         if (link_speed & ETH_LINK_SPEED_2_5G)
2274                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2275         if (link_speed & ETH_LINK_SPEED_10G)
2276                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2277         if (link_speed & ETH_LINK_SPEED_20G)
2278                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2279         if (link_speed & ETH_LINK_SPEED_25G)
2280                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2281         if (link_speed & ETH_LINK_SPEED_40G)
2282                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2283         if (link_speed & ETH_LINK_SPEED_50G)
2284                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2285         if (link_speed & ETH_LINK_SPEED_100G)
2286                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2287         return ret;
2288 }
2289
2290 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2291 {
2292         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2293
2294         switch (hw_link_speed) {
2295         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2296                 eth_link_speed = ETH_SPEED_NUM_100M;
2297                 break;
2298         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2299                 eth_link_speed = ETH_SPEED_NUM_1G;
2300                 break;
2301         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2302                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2303                 break;
2304         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2305                 eth_link_speed = ETH_SPEED_NUM_10G;
2306                 break;
2307         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2308                 eth_link_speed = ETH_SPEED_NUM_20G;
2309                 break;
2310         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2311                 eth_link_speed = ETH_SPEED_NUM_25G;
2312                 break;
2313         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2314                 eth_link_speed = ETH_SPEED_NUM_40G;
2315                 break;
2316         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2317                 eth_link_speed = ETH_SPEED_NUM_50G;
2318                 break;
2319         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2320                 eth_link_speed = ETH_SPEED_NUM_100G;
2321                 break;
2322         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2323         default:
2324                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2325                         hw_link_speed);
2326                 break;
2327         }
2328         return eth_link_speed;
2329 }
2330
2331 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2332 {
2333         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2334
2335         switch (hw_link_duplex) {
2336         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2337         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2338                 /* FALLTHROUGH */
2339                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2340                 break;
2341         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2342                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2343                 break;
2344         default:
2345                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2346                         hw_link_duplex);
2347                 break;
2348         }
2349         return eth_link_duplex;
2350 }
2351
2352 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2353 {
2354         int rc = 0;
2355         struct bnxt_link_info *link_info = &bp->link_info;
2356
2357         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2358         if (rc) {
2359                 PMD_DRV_LOG(ERR,
2360                         "Get link config failed with rc %d\n", rc);
2361                 goto exit;
2362         }
2363         if (link_info->link_speed)
2364                 link->link_speed =
2365                         bnxt_parse_hw_link_speed(link_info->link_speed);
2366         else
2367                 link->link_speed = ETH_SPEED_NUM_NONE;
2368         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2369         link->link_status = link_info->link_up;
2370         link->link_autoneg = link_info->auto_mode ==
2371                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2372                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2373 exit:
2374         return rc;
2375 }
2376
2377 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2378 {
2379         int rc = 0;
2380         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2381         struct bnxt_link_info link_req;
2382         uint16_t speed, autoneg;
2383
2384         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2385                 return 0;
2386
2387         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2388                         bp->eth_dev->data->port_id);
2389         if (rc)
2390                 goto error;
2391
2392         memset(&link_req, 0, sizeof(link_req));
2393         link_req.link_up = link_up;
2394         if (!link_up)
2395                 goto port_phy_cfg;
2396
2397         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2398         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2399         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2400         /* Autoneg can be done only when the FW allows */
2401         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2402                                 bp->link_info.force_link_speed)) {
2403                 link_req.phy_flags |=
2404                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2405                 link_req.auto_link_speed_mask =
2406                         bnxt_parse_eth_link_speed_mask(bp,
2407                                                        dev_conf->link_speeds);
2408         } else {
2409                 if (bp->link_info.phy_type ==
2410                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2411                     bp->link_info.phy_type ==
2412                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2413                     bp->link_info.media_type ==
2414                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2415                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2416                         return -EINVAL;
2417                 }
2418
2419                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2420                 /* If user wants a particular speed try that first. */
2421                 if (speed)
2422                         link_req.link_speed = speed;
2423                 else if (bp->link_info.force_link_speed)
2424                         link_req.link_speed = bp->link_info.force_link_speed;
2425                 else
2426                         link_req.link_speed = bp->link_info.auto_link_speed;
2427         }
2428         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2429         link_req.auto_pause = bp->link_info.auto_pause;
2430         link_req.force_pause = bp->link_info.force_pause;
2431
2432 port_phy_cfg:
2433         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2434         if (rc) {
2435                 PMD_DRV_LOG(ERR,
2436                         "Set link config failed with rc %d\n", rc);
2437         }
2438
2439 error:
2440         return rc;
2441 }
2442
2443 /* JIRA 22088 */
2444 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2445 {
2446         struct hwrm_func_qcfg_input req = {0};
2447         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2448         uint16_t flags;
2449         int rc = 0;
2450
2451         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2452         req.fid = rte_cpu_to_le_16(0xffff);
2453
2454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2455
2456         HWRM_CHECK_RESULT();
2457
2458         /* Hard Coded.. 0xfff VLAN ID mask */
2459         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2460         flags = rte_le_to_cpu_16(resp->flags);
2461         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2462                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2463
2464         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2465                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2466                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2467         }
2468
2469         switch (resp->port_partition_type) {
2470         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2471         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2472         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2473                 /* FALLTHROUGH */
2474                 bp->port_partition_type = resp->port_partition_type;
2475                 break;
2476         default:
2477                 bp->port_partition_type = 0;
2478                 break;
2479         }
2480
2481         HWRM_UNLOCK();
2482
2483         return rc;
2484 }
2485
2486 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2487                                    struct hwrm_func_qcaps_output *qcaps)
2488 {
2489         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2490         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2491                sizeof(qcaps->mac_address));
2492         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2493         qcaps->max_rx_rings = fcfg->num_rx_rings;
2494         qcaps->max_tx_rings = fcfg->num_tx_rings;
2495         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2496         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2497         qcaps->max_vfs = 0;
2498         qcaps->first_vf_id = 0;
2499         qcaps->max_vnics = fcfg->num_vnics;
2500         qcaps->max_decap_records = 0;
2501         qcaps->max_encap_records = 0;
2502         qcaps->max_tx_wm_flows = 0;
2503         qcaps->max_tx_em_flows = 0;
2504         qcaps->max_rx_wm_flows = 0;
2505         qcaps->max_rx_em_flows = 0;
2506         qcaps->max_flow_id = 0;
2507         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2508         qcaps->max_sp_tx_rings = 0;
2509         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2510 }
2511
2512 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2513 {
2514         struct hwrm_func_cfg_input req = {0};
2515         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2516         int rc;
2517
2518         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2519                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2520                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2521                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2522                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2523                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2524                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2525                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2526                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2527                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2528         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2529         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2530         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2531                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2532                                    BNXT_NUM_VLANS);
2533         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2534         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2535         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2536         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2537         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2538         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2539         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2540         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2541         req.fid = rte_cpu_to_le_16(0xffff);
2542
2543         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2544
2545         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2546
2547         HWRM_CHECK_RESULT();
2548         HWRM_UNLOCK();
2549
2550         return rc;
2551 }
2552
2553 static void populate_vf_func_cfg_req(struct bnxt *bp,
2554                                      struct hwrm_func_cfg_input *req,
2555                                      int num_vfs)
2556 {
2557         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2558                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2559                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2560                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2561                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2562                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2563                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2564                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2565                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2566                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2567
2568         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2569                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2570                                     BNXT_NUM_VLANS);
2571         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2572                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2573                                     BNXT_NUM_VLANS);
2574         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2575                                                 (num_vfs + 1));
2576         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2577         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2578                                                (num_vfs + 1));
2579         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2580         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2581         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2582         /* TODO: For now, do not support VMDq/RFS on VFs. */
2583         req->num_vnics = rte_cpu_to_le_16(1);
2584         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2585                                                  (num_vfs + 1));
2586 }
2587
2588 static void add_random_mac_if_needed(struct bnxt *bp,
2589                                      struct hwrm_func_cfg_input *cfg_req,
2590                                      int vf)
2591 {
2592         struct ether_addr mac;
2593
2594         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2595                 return;
2596
2597         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2598                 cfg_req->enables |=
2599                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2600                 eth_random_addr(cfg_req->dflt_mac_addr);
2601                 bp->pf.vf_info[vf].random_mac = true;
2602         } else {
2603                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2604         }
2605 }
2606
2607 static void reserve_resources_from_vf(struct bnxt *bp,
2608                                       struct hwrm_func_cfg_input *cfg_req,
2609                                       int vf)
2610 {
2611         struct hwrm_func_qcaps_input req = {0};
2612         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2613         int rc;
2614
2615         /* Get the actual allocated values now */
2616         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2617         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2619
2620         if (rc) {
2621                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2622                 copy_func_cfg_to_qcaps(cfg_req, resp);
2623         } else if (resp->error_code) {
2624                 rc = rte_le_to_cpu_16(resp->error_code);
2625                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2626                 copy_func_cfg_to_qcaps(cfg_req, resp);
2627         }
2628
2629         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2630         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2631         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2632         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2633         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2634         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2635         /*
2636          * TODO: While not supporting VMDq with VFs, max_vnics is always
2637          * forced to 1 in this case
2638          */
2639         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2640         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2641
2642         HWRM_UNLOCK();
2643 }
2644
2645 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2646 {
2647         struct hwrm_func_qcfg_input req = {0};
2648         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2649         int rc;
2650
2651         /* Check for zero MAC address */
2652         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2653         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2654         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2655         if (rc) {
2656                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2657                 return -1;
2658         } else if (resp->error_code) {
2659                 rc = rte_le_to_cpu_16(resp->error_code);
2660                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2661                 return -1;
2662         }
2663         rc = rte_le_to_cpu_16(resp->vlan);
2664
2665         HWRM_UNLOCK();
2666
2667         return rc;
2668 }
2669
2670 static int update_pf_resource_max(struct bnxt *bp)
2671 {
2672         struct hwrm_func_qcfg_input req = {0};
2673         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2674         int rc;
2675
2676         /* And copy the allocated numbers into the pf struct */
2677         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2678         req.fid = rte_cpu_to_le_16(0xffff);
2679         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2680         HWRM_CHECK_RESULT();
2681
2682         /* Only TX ring value reflects actual allocation? TODO */
2683         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2684         bp->pf.evb_mode = resp->evb_mode;
2685
2686         HWRM_UNLOCK();
2687
2688         return rc;
2689 }
2690
2691 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2692 {
2693         int rc;
2694
2695         if (!BNXT_PF(bp)) {
2696                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2697                 return -1;
2698         }
2699
2700         rc = bnxt_hwrm_func_qcaps(bp);
2701         if (rc)
2702                 return rc;
2703
2704         bp->pf.func_cfg_flags &=
2705                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2706                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2707         bp->pf.func_cfg_flags |=
2708                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2709         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2710         return rc;
2711 }
2712
2713 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2714 {
2715         struct hwrm_func_cfg_input req = {0};
2716         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2717         int i;
2718         size_t sz;
2719         int rc = 0;
2720         size_t req_buf_sz;
2721
2722         if (!BNXT_PF(bp)) {
2723                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2724                 return -1;
2725         }
2726
2727         rc = bnxt_hwrm_func_qcaps(bp);
2728
2729         if (rc)
2730                 return rc;
2731
2732         bp->pf.active_vfs = num_vfs;
2733
2734         /*
2735          * First, configure the PF to only use one TX ring.  This ensures that
2736          * there are enough rings for all VFs.
2737          *
2738          * If we don't do this, when we call func_alloc() later, we will lock
2739          * extra rings to the PF that won't be available during func_cfg() of
2740          * the VFs.
2741          *
2742          * This has been fixed with firmware versions above 20.6.54
2743          */
2744         bp->pf.func_cfg_flags &=
2745                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2746                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2747         bp->pf.func_cfg_flags |=
2748                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2749         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2750         if (rc)
2751                 return rc;
2752
2753         /*
2754          * Now, create and register a buffer to hold forwarded VF requests
2755          */
2756         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2757         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2758                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2759         if (bp->pf.vf_req_buf == NULL) {
2760                 rc = -ENOMEM;
2761                 goto error_free;
2762         }
2763         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2764                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2765         for (i = 0; i < num_vfs; i++)
2766                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2767                                         (i * HWRM_MAX_REQ_LEN);
2768
2769         rc = bnxt_hwrm_func_buf_rgtr(bp);
2770         if (rc)
2771                 goto error_free;
2772
2773         populate_vf_func_cfg_req(bp, &req, num_vfs);
2774
2775         bp->pf.active_vfs = 0;
2776         for (i = 0; i < num_vfs; i++) {
2777                 add_random_mac_if_needed(bp, &req, i);
2778
2779                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2780                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2781                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2782                 rc = bnxt_hwrm_send_message(bp,
2783                                             &req,
2784                                             sizeof(req),
2785                                             BNXT_USE_CHIMP_MB);
2786
2787                 /* Clear enable flag for next pass */
2788                 req.enables &= ~rte_cpu_to_le_32(
2789                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2790
2791                 if (rc || resp->error_code) {
2792                         PMD_DRV_LOG(ERR,
2793                                 "Failed to initizlie VF %d\n", i);
2794                         PMD_DRV_LOG(ERR,
2795                                 "Not all VFs available. (%d, %d)\n",
2796                                 rc, resp->error_code);
2797                         HWRM_UNLOCK();
2798                         break;
2799                 }
2800
2801                 HWRM_UNLOCK();
2802
2803                 reserve_resources_from_vf(bp, &req, i);
2804                 bp->pf.active_vfs++;
2805                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2806         }
2807
2808         /*
2809          * Now configure the PF to use "the rest" of the resources
2810          * We're using STD_TX_RING_MODE here though which will limit the TX
2811          * rings.  This will allow QoS to function properly.  Not setting this
2812          * will cause PF rings to break bandwidth settings.
2813          */
2814         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2815         if (rc)
2816                 goto error_free;
2817
2818         rc = update_pf_resource_max(bp);
2819         if (rc)
2820                 goto error_free;
2821
2822         return rc;
2823
2824 error_free:
2825         bnxt_hwrm_func_buf_unrgtr(bp);
2826         return rc;
2827 }
2828
2829 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2830 {
2831         struct hwrm_func_cfg_input req = {0};
2832         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2833         int rc;
2834
2835         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2836
2837         req.fid = rte_cpu_to_le_16(0xffff);
2838         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2839         req.evb_mode = bp->pf.evb_mode;
2840
2841         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2842         HWRM_CHECK_RESULT();
2843         HWRM_UNLOCK();
2844
2845         return rc;
2846 }
2847
2848 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2849                                 uint8_t tunnel_type)
2850 {
2851         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2852         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2853         int rc = 0;
2854
2855         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2856         req.tunnel_type = tunnel_type;
2857         req.tunnel_dst_port_val = port;
2858         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2859         HWRM_CHECK_RESULT();
2860
2861         switch (tunnel_type) {
2862         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2863                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2864                 bp->vxlan_port = port;
2865                 break;
2866         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2867                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2868                 bp->geneve_port = port;
2869                 break;
2870         default:
2871                 break;
2872         }
2873
2874         HWRM_UNLOCK();
2875
2876         return rc;
2877 }
2878
2879 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2880                                 uint8_t tunnel_type)
2881 {
2882         struct hwrm_tunnel_dst_port_free_input req = {0};
2883         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2884         int rc = 0;
2885
2886         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2887
2888         req.tunnel_type = tunnel_type;
2889         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2890         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2891
2892         HWRM_CHECK_RESULT();
2893         HWRM_UNLOCK();
2894
2895         return rc;
2896 }
2897
2898 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2899                                         uint32_t flags)
2900 {
2901         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2902         struct hwrm_func_cfg_input req = {0};
2903         int rc;
2904
2905         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2906
2907         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2908         req.flags = rte_cpu_to_le_32(flags);
2909         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2910
2911         HWRM_CHECK_RESULT();
2912         HWRM_UNLOCK();
2913
2914         return rc;
2915 }
2916
2917 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2918 {
2919         uint32_t *flag = flagp;
2920
2921         vnic->flags = *flag;
2922 }
2923
2924 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2925 {
2926         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2927 }
2928
2929 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2930 {
2931         int rc = 0;
2932         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2933         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2934
2935         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2936
2937         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2938         req.req_buf_page_size = rte_cpu_to_le_16(
2939                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2940         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2941         req.req_buf_page_addr0 =
2942                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2943         if (req.req_buf_page_addr0 == 0) {
2944                 PMD_DRV_LOG(ERR,
2945                         "unable to map buffer address to physical memory\n");
2946                 return -ENOMEM;
2947         }
2948
2949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2950
2951         HWRM_CHECK_RESULT();
2952         HWRM_UNLOCK();
2953
2954         return rc;
2955 }
2956
2957 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2958 {
2959         int rc = 0;
2960         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2961         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2962
2963         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2964
2965         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2966
2967         HWRM_CHECK_RESULT();
2968         HWRM_UNLOCK();
2969
2970         return rc;
2971 }
2972
2973 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2974 {
2975         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2976         struct hwrm_func_cfg_input req = {0};
2977         int rc;
2978
2979         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2980
2981         req.fid = rte_cpu_to_le_16(0xffff);
2982         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2983         req.enables = rte_cpu_to_le_32(
2984                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2985         req.async_event_cr = rte_cpu_to_le_16(
2986                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2988
2989         HWRM_CHECK_RESULT();
2990         HWRM_UNLOCK();
2991
2992         return rc;
2993 }
2994
2995 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2996 {
2997         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2998         struct hwrm_func_vf_cfg_input req = {0};
2999         int rc;
3000
3001         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3002
3003         req.enables = rte_cpu_to_le_32(
3004                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3005         req.async_event_cr = rte_cpu_to_le_16(
3006                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
3007         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3008
3009         HWRM_CHECK_RESULT();
3010         HWRM_UNLOCK();
3011
3012         return rc;
3013 }
3014
3015 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3016 {
3017         struct hwrm_func_cfg_input req = {0};
3018         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3019         uint16_t dflt_vlan, fid;
3020         uint32_t func_cfg_flags;
3021         int rc = 0;
3022
3023         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3024
3025         if (is_vf) {
3026                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3027                 fid = bp->pf.vf_info[vf].fid;
3028                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3029         } else {
3030                 fid = rte_cpu_to_le_16(0xffff);
3031                 func_cfg_flags = bp->pf.func_cfg_flags;
3032                 dflt_vlan = bp->vlan;
3033         }
3034
3035         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3036         req.fid = rte_cpu_to_le_16(fid);
3037         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3038         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3039
3040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3041
3042         HWRM_CHECK_RESULT();
3043         HWRM_UNLOCK();
3044
3045         return rc;
3046 }
3047
3048 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3049                         uint16_t max_bw, uint16_t enables)
3050 {
3051         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3052         struct hwrm_func_cfg_input req = {0};
3053         int rc;
3054
3055         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3056
3057         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3058         req.enables |= rte_cpu_to_le_32(enables);
3059         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3060         req.max_bw = rte_cpu_to_le_32(max_bw);
3061         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3062
3063         HWRM_CHECK_RESULT();
3064         HWRM_UNLOCK();
3065
3066         return rc;
3067 }
3068
3069 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3070 {
3071         struct hwrm_func_cfg_input req = {0};
3072         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3073         int rc = 0;
3074
3075         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3076
3077         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3078         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3079         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3080         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3081
3082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3083
3084         HWRM_CHECK_RESULT();
3085         HWRM_UNLOCK();
3086
3087         return rc;
3088 }
3089
3090 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3091 {
3092         int rc;
3093
3094         if (BNXT_PF(bp))
3095                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3096         else
3097                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3098
3099         return rc;
3100 }
3101
3102 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3103                               void *encaped, size_t ec_size)
3104 {
3105         int rc = 0;
3106         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3107         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3108
3109         if (ec_size > sizeof(req.encap_request))
3110                 return -1;
3111
3112         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3113
3114         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3115         memcpy(req.encap_request, encaped, ec_size);
3116
3117         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3118
3119         HWRM_CHECK_RESULT();
3120         HWRM_UNLOCK();
3121
3122         return rc;
3123 }
3124
3125 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3126                                        struct ether_addr *mac)
3127 {
3128         struct hwrm_func_qcfg_input req = {0};
3129         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3130         int rc;
3131
3132         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3133
3134         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3135         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3136
3137         HWRM_CHECK_RESULT();
3138
3139         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3140
3141         HWRM_UNLOCK();
3142
3143         return rc;
3144 }
3145
3146 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3147                             void *encaped, size_t ec_size)
3148 {
3149         int rc = 0;
3150         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3151         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3152
3153         if (ec_size > sizeof(req.encap_request))
3154                 return -1;
3155
3156         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3157
3158         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3159         memcpy(req.encap_request, encaped, ec_size);
3160
3161         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3162
3163         HWRM_CHECK_RESULT();
3164         HWRM_UNLOCK();
3165
3166         return rc;
3167 }
3168
3169 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3170                          struct rte_eth_stats *stats, uint8_t rx)
3171 {
3172         int rc = 0;
3173         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3174         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3175
3176         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3177
3178         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3179
3180         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3181
3182         HWRM_CHECK_RESULT();
3183
3184         if (rx) {
3185                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3186                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3187                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3188                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3189                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3190                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3191                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3192                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3193         } else {
3194                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3195                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3196                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3197                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3198                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3199                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3200                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3201         }
3202
3203
3204         HWRM_UNLOCK();
3205
3206         return rc;
3207 }
3208
3209 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3210 {
3211         struct hwrm_port_qstats_input req = {0};
3212         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3213         struct bnxt_pf_info *pf = &bp->pf;
3214         int rc;
3215
3216         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3217
3218         req.port_id = rte_cpu_to_le_16(pf->port_id);
3219         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3220         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3221         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3222
3223         HWRM_CHECK_RESULT();
3224         HWRM_UNLOCK();
3225
3226         return rc;
3227 }
3228
3229 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3230 {
3231         struct hwrm_port_clr_stats_input req = {0};
3232         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3233         struct bnxt_pf_info *pf = &bp->pf;
3234         int rc;
3235
3236         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3237         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3238             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3239                 return 0;
3240
3241         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3242
3243         req.port_id = rte_cpu_to_le_16(pf->port_id);
3244         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3245
3246         HWRM_CHECK_RESULT();
3247         HWRM_UNLOCK();
3248
3249         return rc;
3250 }
3251
3252 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3253 {
3254         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3255         struct hwrm_port_led_qcaps_input req = {0};
3256         int rc;
3257
3258         if (BNXT_VF(bp))
3259                 return 0;
3260
3261         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3262         req.port_id = bp->pf.port_id;
3263         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3264
3265         HWRM_CHECK_RESULT();
3266
3267         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3268                 unsigned int i;
3269
3270                 bp->num_leds = resp->num_leds;
3271                 memcpy(bp->leds, &resp->led0_id,
3272                         sizeof(bp->leds[0]) * bp->num_leds);
3273                 for (i = 0; i < bp->num_leds; i++) {
3274                         struct bnxt_led_info *led = &bp->leds[i];
3275
3276                         uint16_t caps = led->led_state_caps;
3277
3278                         if (!led->led_group_id ||
3279                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3280                                 bp->num_leds = 0;
3281                                 break;
3282                         }
3283                 }
3284         }
3285
3286         HWRM_UNLOCK();
3287
3288         return rc;
3289 }
3290
3291 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3292 {
3293         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3294         struct hwrm_port_led_cfg_input req = {0};
3295         struct bnxt_led_cfg *led_cfg;
3296         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3297         uint16_t duration = 0;
3298         int rc, i;
3299
3300         if (!bp->num_leds || BNXT_VF(bp))
3301                 return -EOPNOTSUPP;
3302
3303         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3304
3305         if (led_on) {
3306                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3307                 duration = rte_cpu_to_le_16(500);
3308         }
3309         req.port_id = bp->pf.port_id;
3310         req.num_leds = bp->num_leds;
3311         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3312         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3313                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3314                 led_cfg->led_id = bp->leds[i].led_id;
3315                 led_cfg->led_state = led_state;
3316                 led_cfg->led_blink_on = duration;
3317                 led_cfg->led_blink_off = duration;
3318                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3319         }
3320
3321         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3322
3323         HWRM_CHECK_RESULT();
3324         HWRM_UNLOCK();
3325
3326         return rc;
3327 }
3328
3329 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3330                                uint32_t *length)
3331 {
3332         int rc;
3333         struct hwrm_nvm_get_dir_info_input req = {0};
3334         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3335
3336         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3337
3338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3339
3340         HWRM_CHECK_RESULT();
3341         HWRM_UNLOCK();
3342
3343         if (!rc) {
3344                 *entries = rte_le_to_cpu_32(resp->entries);
3345                 *length = rte_le_to_cpu_32(resp->entry_length);
3346         }
3347         return rc;
3348 }
3349
3350 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3351 {
3352         int rc;
3353         uint32_t dir_entries;
3354         uint32_t entry_length;
3355         uint8_t *buf;
3356         size_t buflen;
3357         rte_iova_t dma_handle;
3358         struct hwrm_nvm_get_dir_entries_input req = {0};
3359         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3360
3361         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3362         if (rc != 0)
3363                 return rc;
3364
3365         *data++ = dir_entries;
3366         *data++ = entry_length;
3367         len -= 2;
3368         memset(data, 0xff, len);
3369
3370         buflen = dir_entries * entry_length;
3371         buf = rte_malloc("nvm_dir", buflen, 0);
3372         rte_mem_lock_page(buf);
3373         if (buf == NULL)
3374                 return -ENOMEM;
3375         dma_handle = rte_mem_virt2iova(buf);
3376         if (dma_handle == 0) {
3377                 PMD_DRV_LOG(ERR,
3378                         "unable to map response address to physical memory\n");
3379                 return -ENOMEM;
3380         }
3381         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3382         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3383         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3384
3385         if (rc == 0)
3386                 memcpy(data, buf, len > buflen ? buflen : len);
3387
3388         rte_free(buf);
3389         HWRM_CHECK_RESULT();
3390         HWRM_UNLOCK();
3391
3392         return rc;
3393 }
3394
3395 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3396                              uint32_t offset, uint32_t length,
3397                              uint8_t *data)
3398 {
3399         int rc;
3400         uint8_t *buf;
3401         rte_iova_t dma_handle;
3402         struct hwrm_nvm_read_input req = {0};
3403         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3404
3405         buf = rte_malloc("nvm_item", length, 0);
3406         rte_mem_lock_page(buf);
3407         if (!buf)
3408                 return -ENOMEM;
3409
3410         dma_handle = rte_mem_virt2iova(buf);
3411         if (dma_handle == 0) {
3412                 PMD_DRV_LOG(ERR,
3413                         "unable to map response address to physical memory\n");
3414                 return -ENOMEM;
3415         }
3416         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3417         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3418         req.dir_idx = rte_cpu_to_le_16(index);
3419         req.offset = rte_cpu_to_le_32(offset);
3420         req.len = rte_cpu_to_le_32(length);
3421         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3422         if (rc == 0)
3423                 memcpy(data, buf, length);
3424
3425         rte_free(buf);
3426         HWRM_CHECK_RESULT();
3427         HWRM_UNLOCK();
3428
3429         return rc;
3430 }
3431
3432 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3433 {
3434         int rc;
3435         struct hwrm_nvm_erase_dir_entry_input req = {0};
3436         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3437
3438         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3439         req.dir_idx = rte_cpu_to_le_16(index);
3440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3441         HWRM_CHECK_RESULT();
3442         HWRM_UNLOCK();
3443
3444         return rc;
3445 }
3446
3447
3448 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3449                           uint16_t dir_ordinal, uint16_t dir_ext,
3450                           uint16_t dir_attr, const uint8_t *data,
3451                           size_t data_len)
3452 {
3453         int rc;
3454         struct hwrm_nvm_write_input req = {0};
3455         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3456         rte_iova_t dma_handle;
3457         uint8_t *buf;
3458
3459         buf = rte_malloc("nvm_write", data_len, 0);
3460         rte_mem_lock_page(buf);
3461         if (!buf)
3462                 return -ENOMEM;
3463
3464         dma_handle = rte_mem_virt2iova(buf);
3465         if (dma_handle == 0) {
3466                 PMD_DRV_LOG(ERR,
3467                         "unable to map response address to physical memory\n");
3468                 return -ENOMEM;
3469         }
3470         memcpy(buf, data, data_len);
3471
3472         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3473
3474         req.dir_type = rte_cpu_to_le_16(dir_type);
3475         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3476         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3477         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3478         req.dir_data_length = rte_cpu_to_le_32(data_len);
3479         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3480
3481         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3482
3483         rte_free(buf);
3484         HWRM_CHECK_RESULT();
3485         HWRM_UNLOCK();
3486
3487         return rc;
3488 }
3489
3490 static void
3491 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3492 {
3493         uint32_t *count = cbdata;
3494
3495         *count = *count + 1;
3496 }
3497
3498 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3499                                      struct bnxt_vnic_info *vnic __rte_unused)
3500 {
3501         return 0;
3502 }
3503
3504 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3505 {
3506         uint32_t count = 0;
3507
3508         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3509             &count, bnxt_vnic_count_hwrm_stub);
3510
3511         return count;
3512 }
3513
3514 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3515                                         uint16_t *vnic_ids)
3516 {
3517         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3518         struct hwrm_func_vf_vnic_ids_query_output *resp =
3519                                                 bp->hwrm_cmd_resp_addr;
3520         int rc;
3521
3522         /* First query all VNIC ids */
3523         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3524
3525         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3526         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3527         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3528
3529         if (req.vnic_id_tbl_addr == 0) {
3530                 HWRM_UNLOCK();
3531                 PMD_DRV_LOG(ERR,
3532                 "unable to map VNIC ID table address to physical memory\n");
3533                 return -ENOMEM;
3534         }
3535         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3536         if (rc) {
3537                 HWRM_UNLOCK();
3538                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3539                 return -1;
3540         } else if (resp->error_code) {
3541                 rc = rte_le_to_cpu_16(resp->error_code);
3542                 HWRM_UNLOCK();
3543                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3544                 return -1;
3545         }
3546         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3547
3548         HWRM_UNLOCK();
3549
3550         return rc;
3551 }
3552
3553 /*
3554  * This function queries the VNIC IDs  for a specified VF. It then calls
3555  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3556  * Then it calls the hwrm_cb function to program this new vnic configuration.
3557  */
3558 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3559         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3560         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3561 {
3562         struct bnxt_vnic_info vnic;
3563         int rc = 0;
3564         int i, num_vnic_ids;
3565         uint16_t *vnic_ids;
3566         size_t vnic_id_sz;
3567         size_t sz;
3568
3569         /* First query all VNIC ids */
3570         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3571         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3572                         RTE_CACHE_LINE_SIZE);
3573         if (vnic_ids == NULL) {
3574                 rc = -ENOMEM;
3575                 return rc;
3576         }
3577         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3578                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3579
3580         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3581
3582         if (num_vnic_ids < 0)
3583                 return num_vnic_ids;
3584
3585         /* Retrieve VNIC, update bd_stall then update */
3586
3587         for (i = 0; i < num_vnic_ids; i++) {
3588                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3589                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3590                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3591                 if (rc)
3592                         break;
3593                 if (vnic.mru <= 4)      /* Indicates unallocated */
3594                         continue;
3595
3596                 vnic_cb(&vnic, cbdata);
3597
3598                 rc = hwrm_cb(bp, &vnic);
3599                 if (rc)
3600                         break;
3601         }
3602
3603         rte_free(vnic_ids);
3604
3605         return rc;
3606 }
3607
3608 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3609                                               bool on)
3610 {
3611         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3612         struct hwrm_func_cfg_input req = {0};
3613         int rc;
3614
3615         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3616
3617         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3618         req.enables |= rte_cpu_to_le_32(
3619                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3620         req.vlan_antispoof_mode = on ?
3621                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3622                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3623         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3624
3625         HWRM_CHECK_RESULT();
3626         HWRM_UNLOCK();
3627
3628         return rc;
3629 }
3630
3631 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3632 {
3633         struct bnxt_vnic_info vnic;
3634         uint16_t *vnic_ids;
3635         size_t vnic_id_sz;
3636         int num_vnic_ids, i;
3637         size_t sz;
3638         int rc;
3639
3640         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3641         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3642                         RTE_CACHE_LINE_SIZE);
3643         if (vnic_ids == NULL) {
3644                 rc = -ENOMEM;
3645                 return rc;
3646         }
3647
3648         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3649                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3650
3651         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3652         if (rc <= 0)
3653                 goto exit;
3654         num_vnic_ids = rc;
3655
3656         /*
3657          * Loop through to find the default VNIC ID.
3658          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3659          * by sending the hwrm_func_qcfg command to the firmware.
3660          */
3661         for (i = 0; i < num_vnic_ids; i++) {
3662                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3663                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3664                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3665                                         bp->pf.first_vf_id + vf);
3666                 if (rc)
3667                         goto exit;
3668                 if (vnic.func_default) {
3669                         rte_free(vnic_ids);
3670                         return vnic.fw_vnic_id;
3671                 }
3672         }
3673         /* Could not find a default VNIC. */
3674         PMD_DRV_LOG(ERR, "No default VNIC\n");
3675 exit:
3676         rte_free(vnic_ids);
3677         return -1;
3678 }
3679
3680 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3681                          uint16_t dst_id,
3682                          struct bnxt_filter_info *filter)
3683 {
3684         int rc = 0;
3685         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3686         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3687         uint32_t enables = 0;
3688
3689         if (filter->fw_em_filter_id != UINT64_MAX)
3690                 bnxt_hwrm_clear_em_filter(bp, filter);
3691
3692         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3693
3694         req.flags = rte_cpu_to_le_32(filter->flags);
3695
3696         enables = filter->enables |
3697               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3698         req.dst_id = rte_cpu_to_le_16(dst_id);
3699
3700         if (filter->ip_addr_type) {
3701                 req.ip_addr_type = filter->ip_addr_type;
3702                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3703         }
3704         if (enables &
3705             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3706                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3707         if (enables &
3708             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3709                 memcpy(req.src_macaddr, filter->src_macaddr,
3710                        ETHER_ADDR_LEN);
3711         if (enables &
3712             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3713                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3714                        ETHER_ADDR_LEN);
3715         if (enables &
3716             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3717                 req.ovlan_vid = filter->l2_ovlan;
3718         if (enables &
3719             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3720                 req.ivlan_vid = filter->l2_ivlan;
3721         if (enables &
3722             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3723                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3724         if (enables &
3725             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3726                 req.ip_protocol = filter->ip_protocol;
3727         if (enables &
3728             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3729                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3730         if (enables &
3731             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3732                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3733         if (enables &
3734             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3735                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3736         if (enables &
3737             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3738                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3739         if (enables &
3740             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3741                 req.mirror_vnic_id = filter->mirror_vnic_id;
3742
3743         req.enables = rte_cpu_to_le_32(enables);
3744
3745         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3746
3747         HWRM_CHECK_RESULT();
3748
3749         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3750         HWRM_UNLOCK();
3751
3752         return rc;
3753 }
3754
3755 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3756 {
3757         int rc = 0;
3758         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3759         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3760
3761         if (filter->fw_em_filter_id == UINT64_MAX)
3762                 return 0;
3763
3764         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3765         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3766
3767         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3768
3769         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3770
3771         HWRM_CHECK_RESULT();
3772         HWRM_UNLOCK();
3773
3774         filter->fw_em_filter_id = UINT64_MAX;
3775         filter->fw_l2_filter_id = UINT64_MAX;
3776
3777         return 0;
3778 }
3779
3780 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3781                          uint16_t dst_id,
3782                          struct bnxt_filter_info *filter)
3783 {
3784         int rc = 0;
3785         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3786         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3787                                                 bp->hwrm_cmd_resp_addr;
3788         uint32_t enables = 0;
3789
3790         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3791                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3792
3793         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3794
3795         req.flags = rte_cpu_to_le_32(filter->flags);
3796
3797         enables = filter->enables |
3798               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3799         req.dst_id = rte_cpu_to_le_16(dst_id);
3800
3801
3802         if (filter->ip_addr_type) {
3803                 req.ip_addr_type = filter->ip_addr_type;
3804                 enables |=
3805                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3806         }
3807         if (enables &
3808             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3809                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3810         if (enables &
3811             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3812                 memcpy(req.src_macaddr, filter->src_macaddr,
3813                        ETHER_ADDR_LEN);
3814         //if (enables &
3815             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3816                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3817                        //ETHER_ADDR_LEN);
3818         if (enables &
3819             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3820                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3821         if (enables &
3822             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3823                 req.ip_protocol = filter->ip_protocol;
3824         if (enables &
3825             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3826                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3827         if (enables &
3828             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3829                 req.src_ipaddr_mask[0] =
3830                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3831         if (enables &
3832             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3833                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3834         if (enables &
3835             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3836                 req.dst_ipaddr_mask[0] =
3837                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3838         if (enables &
3839             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3840                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3841         if (enables &
3842             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3843                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3844         if (enables &
3845             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3846                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3847         if (enables &
3848             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3849                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3850         if (enables &
3851             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3852                 req.mirror_vnic_id = filter->mirror_vnic_id;
3853
3854         req.enables = rte_cpu_to_le_32(enables);
3855
3856         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3857
3858         HWRM_CHECK_RESULT();
3859
3860         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3861         HWRM_UNLOCK();
3862
3863         return rc;
3864 }
3865
3866 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3867                                 struct bnxt_filter_info *filter)
3868 {
3869         int rc = 0;
3870         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3871         struct hwrm_cfa_ntuple_filter_free_output *resp =
3872                                                 bp->hwrm_cmd_resp_addr;
3873
3874         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3875                 return 0;
3876
3877         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3878
3879         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3880
3881         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3882
3883         HWRM_CHECK_RESULT();
3884         HWRM_UNLOCK();
3885
3886         filter->fw_ntuple_filter_id = UINT64_MAX;
3887
3888         return 0;
3889 }
3890
3891 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3892 {
3893         unsigned int rss_idx, fw_idx, i;
3894
3895         if (vnic->rss_table && vnic->hash_type) {
3896                 /*
3897                  * Fill the RSS hash & redirection table with
3898                  * ring group ids for all VNICs
3899                  */
3900                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3901                         rss_idx++, fw_idx++) {
3902                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3903                                 fw_idx %= bp->rx_cp_nr_rings;
3904                                 if (vnic->fw_grp_ids[fw_idx] !=
3905                                     INVALID_HW_RING_ID)
3906                                         break;
3907                                 fw_idx++;
3908                         }
3909                         if (i == bp->rx_cp_nr_rings)
3910                                 return 0;
3911                         vnic->rss_table[rss_idx] =
3912                                 vnic->fw_grp_ids[fw_idx];
3913                 }
3914                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3915         }
3916         return 0;
3917 }
3918
3919 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3920         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3921 {
3922         uint16_t flags;
3923
3924         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3925
3926         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3927         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3928
3929         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3930         req->num_cmpl_dma_aggr_during_int =
3931                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3932
3933         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3934
3935         /* min timer set to 1/2 of interrupt timer */
3936         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3937
3938         /* buf timer set to 1/4 of interrupt timer */
3939         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3940
3941         req->cmpl_aggr_dma_tmr_during_int =
3942                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3943
3944         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3945                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3946         req->flags = rte_cpu_to_le_16(flags);
3947 }
3948
3949 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3950                         struct bnxt_coal *coal, uint16_t ring_id)
3951 {
3952         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3953         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3954                                                 bp->hwrm_cmd_resp_addr;
3955         int rc;
3956
3957         /* Set ring coalesce parameters only for Stratus 100G NIC */
3958         if (!bnxt_stratus_device(bp))
3959                 return 0;
3960
3961         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3962         bnxt_hwrm_set_coal_params(coal, &req);
3963         req.ring_id = rte_cpu_to_le_16(ring_id);
3964         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3965         HWRM_CHECK_RESULT();
3966         HWRM_UNLOCK();
3967         return 0;
3968 }
3969
3970 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3971 {
3972         struct hwrm_port_qstats_ext_input req = {0};
3973         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3974         struct bnxt_pf_info *pf = &bp->pf;
3975         int rc;
3976
3977         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3978               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3979                 return 0;
3980
3981         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3982
3983         req.port_id = rte_cpu_to_le_16(pf->port_id);
3984         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3985                 req.tx_stat_host_addr =
3986                         rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3987                 req.tx_stat_size =
3988                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3989         }
3990         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3991                 req.rx_stat_host_addr =
3992                         rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3993                 req.rx_stat_size =
3994                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3995         }
3996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3997
3998         if (rc) {
3999                 bp->fw_rx_port_stats_ext_size = 0;
4000                 bp->fw_tx_port_stats_ext_size = 0;
4001         } else {
4002                 bp->fw_rx_port_stats_ext_size =
4003                         rte_le_to_cpu_16(resp->rx_stat_size);
4004                 bp->fw_tx_port_stats_ext_size =
4005                         rte_le_to_cpu_16(resp->tx_stat_size);
4006         }
4007
4008         HWRM_CHECK_RESULT();
4009         HWRM_UNLOCK();
4010
4011         return rc;
4012 }