net/bnxt: set MAC filtering as outer for non tunnel frames
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                   uint32_t msg_len, bool use_kong_mb)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83         uint16_t bar_offset = use_kong_mb ?
84                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85         uint16_t mb_trigger_offset = use_kong_mb ?
86                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
87
88         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
89                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
90
91                 memset(short_cmd_req, 0, bp->max_req_len);
92                 memcpy(short_cmd_req, req, msg_len);
93
94                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
95                 short_input.signature = rte_cpu_to_le_16(
96                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
97                 short_input.size = rte_cpu_to_le_16(msg_len);
98                 short_input.req_addr =
99                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
100
101                 data = (uint32_t *)&short_input;
102                 msg_len = sizeof(short_input);
103
104                 /* Sync memory write before updating doorbell */
105                 rte_wmb();
106
107                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
108         }
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 /*
153  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
154  * spinlock, and does initial processing.
155  *
156  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
157  * releases the spinlock only if it returns.  If the regular int return codes
158  * are not used by the function, HWRM_CHECK_RESULT() should not be used
159  * directly, rather it should be copied and modified to suit the function.
160  *
161  * HWRM_UNLOCK() must be called after all response processing is completed.
162  */
163 #define HWRM_PREP(req, type, kong) do { \
164         rte_spinlock_lock(&bp->hwrm_lock); \
165         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167         req.cmpl_ring = rte_cpu_to_le_16(-1); \
168         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170         req.target_id = rte_cpu_to_le_16(0xffff); \
171         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
172 } while (0)
173
174 #define HWRM_CHECK_RESULT_SILENT() do {\
175         if (rc) { \
176                 rte_spinlock_unlock(&bp->hwrm_lock); \
177                 return rc; \
178         } \
179         if (resp->error_code) { \
180                 rc = rte_le_to_cpu_16(resp->error_code); \
181                 rte_spinlock_unlock(&bp->hwrm_lock); \
182                 return rc; \
183         } \
184 } while (0)
185
186 #define HWRM_CHECK_RESULT() do {\
187         if (rc) { \
188                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
191                         rc = -EACCES; \
192                 else if (rc > 0) \
193                         rc = -EINVAL; \
194                 return rc; \
195         } \
196         if (resp->error_code) { \
197                 rc = rte_le_to_cpu_16(resp->error_code); \
198                 if (resp->resp_len >= 16) { \
199                         struct hwrm_err_output *tmp_hwrm_err_op = \
200                                                 (void *)resp; \
201                         PMD_DRV_LOG(ERR, \
202                                 "error %d:%d:%08x:%04x\n", \
203                                 rc, tmp_hwrm_err_op->cmd_err, \
204                                 rte_le_to_cpu_32(\
205                                         tmp_hwrm_err_op->opaque_0), \
206                                 rte_le_to_cpu_16(\
207                                         tmp_hwrm_err_op->opaque_1)); \
208                 } else { \
209                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
210                 } \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
213                         rc = -EACCES; \
214                 else if (rc > 0) \
215                         rc = -EINVAL; \
216                 return rc; \
217         } \
218 } while (0)
219
220 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
221
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
223 {
224         int rc = 0;
225         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230         req.mask = 0;
231
232         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
233
234         HWRM_CHECK_RESULT();
235         HWRM_UNLOCK();
236
237         return rc;
238 }
239
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241                                  struct bnxt_vnic_info *vnic,
242                                  uint16_t vlan_count,
243                                  struct bnxt_vlan_table_entry *vlan_table)
244 {
245         int rc = 0;
246         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
248         uint32_t mask = 0;
249
250         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
251                 return rc;
252
253         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
255
256         /* FIXME add multicast flag, when multicast adding options is supported
257          * by ethtool.
258          */
259         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269         if (vnic->mc_addr_cnt) {
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
273         }
274         if (vlan_table) {
275                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278                          rte_mem_virt2iova(vlan_table));
279                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
280         }
281         req.mask = rte_cpu_to_le_32(mask);
282
283         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
284
285         HWRM_CHECK_RESULT();
286         HWRM_UNLOCK();
287
288         return rc;
289 }
290
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
292                         uint16_t vlan_count,
293                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
294 {
295         int rc = 0;
296         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298                                                 bp->hwrm_cmd_resp_addr;
299
300         /*
301          * Older HWRM versions did not support this command, and the set_rx_mask
302          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303          * removed from set_rx_mask call, and this command was added.
304          *
305          * This command is also present from 1.7.8.11 and higher,
306          * as well as 1.7.8.0
307          */
308         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
311                                         (11)))
312                                 return 0;
313                 }
314         }
315         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316         req.fid = rte_cpu_to_le_16(fid);
317
318         req.vlan_tag_mask_tbl_addr =
319                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
321
322         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
323
324         HWRM_CHECK_RESULT();
325         HWRM_UNLOCK();
326
327         return rc;
328 }
329
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331                            struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
336
337         if (filter->fw_l2_filter_id == UINT64_MAX)
338                 return 0;
339
340         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
341
342         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
343
344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
345
346         HWRM_CHECK_RESULT();
347         HWRM_UNLOCK();
348
349         filter->fw_l2_filter_id = UINT64_MAX;
350
351         return 0;
352 }
353
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
355                          uint16_t dst_id,
356                          struct bnxt_filter_info *filter)
357 {
358         int rc = 0;
359         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362         const struct rte_eth_vmdq_rx_conf *conf =
363                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
364         uint32_t enables = 0;
365         uint16_t j = dst_id - 1;
366
367         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369             conf->pool_map[j].pools & (1UL << j)) {
370                 PMD_DRV_LOG(DEBUG,
371                         "Add vlan %u to vmdq pool %u\n",
372                         conf->pool_map[j].vlan_id, j);
373
374                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
375                 filter->enables |=
376                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
378         }
379
380         if (filter->fw_l2_filter_id != UINT64_MAX)
381                 bnxt_hwrm_clear_l2_filter(bp, filter);
382
383         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
384
385         req.flags = rte_cpu_to_le_32(filter->flags);
386         req.flags |=
387         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
388
389         enables = filter->enables |
390               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391         req.dst_id = rte_cpu_to_le_16(dst_id);
392
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395                 memcpy(req.l2_addr, filter->l2_addr,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400                        ETHER_ADDR_LEN);
401         if (enables &
402             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403                 req.l2_ovlan = filter->l2_ovlan;
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406                 req.l2_ivlan = filter->l2_ivlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
413         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414                 req.src_id = rte_cpu_to_le_32(filter->src_id);
415         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416                 req.src_type = filter->src_type;
417
418         req.enables = rte_cpu_to_le_32(enables);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
421
422         HWRM_CHECK_RESULT();
423
424         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
425         HWRM_UNLOCK();
426
427         return rc;
428 }
429
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
431 {
432         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
434         uint32_t flags = 0;
435         int rc;
436
437         if (!ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
441
442         if (ptp->rx_filter)
443                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444         else
445                 flags |=
446                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
447         if (ptp->tx_tstamp_en)
448                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
449         else
450                 flags |=
451                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
452         req.flags = rte_cpu_to_le_32(flags);
453         req.enables = rte_cpu_to_le_32
454                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
455         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
456
457         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
458         HWRM_UNLOCK();
459
460         return rc;
461 }
462
463 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
464 {
465         int rc = 0;
466         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
467         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
468         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
469
470 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
471         if (ptp)
472                 return 0;
473
474         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
475
476         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
477
478         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
479
480         HWRM_CHECK_RESULT();
481
482         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
483                 return 0;
484
485         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
486         if (!ptp)
487                 return -ENOMEM;
488
489         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
491         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
493         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
494                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
495         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
496                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
497         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
498                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
499         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
501         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
502                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
503         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
504                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
505         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
506                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
507
508         ptp->bp = bp;
509         bp->ptp_cfg = ptp;
510
511         return 0;
512 }
513
514 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
515 {
516         int rc = 0;
517         struct hwrm_func_qcaps_input req = {.req_type = 0 };
518         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
519         uint16_t new_max_vfs;
520         uint32_t flags;
521         int i;
522
523         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
524
525         req.fid = rte_cpu_to_le_16(0xffff);
526
527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
528
529         HWRM_CHECK_RESULT();
530
531         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
532         flags = rte_le_to_cpu_32(resp->flags);
533         if (BNXT_PF(bp)) {
534                 bp->pf.port_id = resp->port_id;
535                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
536                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
537                 new_max_vfs = bp->pdev->max_vfs;
538                 if (new_max_vfs != bp->pf.max_vfs) {
539                         if (bp->pf.vf_info)
540                                 rte_free(bp->pf.vf_info);
541                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
542                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
543                         bp->pf.max_vfs = new_max_vfs;
544                         for (i = 0; i < new_max_vfs; i++) {
545                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
546                                 bp->pf.vf_info[i].vlan_table =
547                                         rte_zmalloc("VF VLAN table",
548                                                     getpagesize(),
549                                                     getpagesize());
550                                 if (bp->pf.vf_info[i].vlan_table == NULL)
551                                         PMD_DRV_LOG(ERR,
552                                         "Fail to alloc VLAN table for VF %d\n",
553                                         i);
554                                 else
555                                         rte_mem_lock_page(
556                                                 bp->pf.vf_info[i].vlan_table);
557                                 bp->pf.vf_info[i].vlan_as_table =
558                                         rte_zmalloc("VF VLAN AS table",
559                                                     getpagesize(),
560                                                     getpagesize());
561                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
562                                         PMD_DRV_LOG(ERR,
563                                         "Alloc VLAN AS table for VF %d fail\n",
564                                         i);
565                                 else
566                                         rte_mem_lock_page(
567                                                bp->pf.vf_info[i].vlan_as_table);
568                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
569                         }
570                 }
571         }
572
573         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
574         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
575         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
576         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
577         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
578         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
579         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
580         /* TODO: For now, do not support VMDq/RFS on VFs. */
581         if (BNXT_PF(bp)) {
582                 if (bp->pf.max_vfs)
583                         bp->max_vnics = 1;
584                 else
585                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
586         } else {
587                 bp->max_vnics = 1;
588         }
589         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
590         if (BNXT_PF(bp)) {
591                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
592                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
593                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
594                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
595                         HWRM_UNLOCK();
596                         bnxt_hwrm_ptp_qcfg(bp);
597                 }
598         }
599
600         HWRM_UNLOCK();
601
602         return rc;
603 }
604
605 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
606 {
607         int rc;
608
609         rc = __bnxt_hwrm_func_qcaps(bp);
610         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
611                 rc = bnxt_hwrm_func_resc_qcaps(bp);
612                 if (!rc)
613                         bp->flags |= BNXT_FLAG_NEW_RM;
614         }
615
616         return rc;
617 }
618
619 int bnxt_hwrm_func_reset(struct bnxt *bp)
620 {
621         int rc = 0;
622         struct hwrm_func_reset_input req = {.req_type = 0 };
623         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
624
625         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
626
627         req.enables = rte_cpu_to_le_32(0);
628
629         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
630
631         HWRM_CHECK_RESULT();
632         HWRM_UNLOCK();
633
634         return rc;
635 }
636
637 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
638 {
639         int rc;
640         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
641         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
642
643         if (bp->flags & BNXT_FLAG_REGISTERED)
644                 return 0;
645
646         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
647         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
648                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
649         req.ver_maj = RTE_VER_YEAR;
650         req.ver_min = RTE_VER_MONTH;
651         req.ver_upd = RTE_VER_MINOR;
652
653         if (BNXT_PF(bp)) {
654                 req.enables |= rte_cpu_to_le_32(
655                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
656                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
657                        RTE_MIN(sizeof(req.vf_req_fwd),
658                                sizeof(bp->pf.vf_req_fwd)));
659
660                 /*
661                  * PF can sniff HWRM API issued by VF. This can be set up by
662                  * linux driver and inherited by the DPDK PF driver. Clear
663                  * this HWRM sniffer list in FW because DPDK PF driver does
664                  * not support this.
665                  */
666                 req.flags =
667                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
668         }
669
670         req.async_event_fwd[0] |=
671                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
672                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
673                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
674         req.async_event_fwd[1] |=
675                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
676                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
677
678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
679
680         HWRM_CHECK_RESULT();
681         HWRM_UNLOCK();
682
683         bp->flags |= BNXT_FLAG_REGISTERED;
684
685         return rc;
686 }
687
688 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
689 {
690         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
691                 return 0;
692
693         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
694 }
695
696 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
697 {
698         int rc;
699         uint32_t flags = 0;
700         uint32_t enables;
701         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
702         struct hwrm_func_vf_cfg_input req = {0};
703
704         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
705
706         req.enables = rte_cpu_to_le_32
707                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
708                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
709                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
710                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
711                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
712                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
713
714         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
715         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
716                                             AGG_RING_MULTIPLIER);
717         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
718         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
719                                               bp->tx_nr_rings);
720         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
721         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
722         if (bp->vf_resv_strategy ==
723             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
724                 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
725                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
726                                 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
727                 req.enables |= rte_cpu_to_le_32(enables);
728                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
729                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
730                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
731         }
732
733         if (test)
734                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
735                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
736                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
737                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
738                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
739                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
740
741         req.flags = rte_cpu_to_le_32(flags);
742
743         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
744
745         if (test)
746                 HWRM_CHECK_RESULT_SILENT();
747         else
748                 HWRM_CHECK_RESULT();
749
750         HWRM_UNLOCK();
751         return rc;
752 }
753
754 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
755 {
756         int rc;
757         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
758         struct hwrm_func_resource_qcaps_input req = {0};
759
760         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
761         req.fid = rte_cpu_to_le_16(0xffff);
762
763         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
764
765         HWRM_CHECK_RESULT();
766
767         if (BNXT_VF(bp)) {
768                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
769                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
770                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
771                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
772                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
773                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
774                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
775                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
776         }
777         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
778         if (bp->vf_resv_strategy >
779             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
780                 bp->vf_resv_strategy =
781                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
782
783         HWRM_UNLOCK();
784         return rc;
785 }
786
787 int bnxt_hwrm_ver_get(struct bnxt *bp)
788 {
789         int rc = 0;
790         struct hwrm_ver_get_input req = {.req_type = 0 };
791         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
792         uint32_t my_version;
793         uint32_t fw_version;
794         uint16_t max_resp_len;
795         char type[RTE_MEMZONE_NAMESIZE];
796         uint32_t dev_caps_cfg;
797
798         bp->max_req_len = HWRM_MAX_REQ_LEN;
799         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
800
801         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
802         req.hwrm_intf_min = HWRM_VERSION_MINOR;
803         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
804
805         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
806
807         HWRM_CHECK_RESULT();
808
809         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
810                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
811                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
812                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
813         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
814                      (resp->hwrm_fw_min_8b << 16) |
815                      (resp->hwrm_fw_bld_8b << 8) |
816                      resp->hwrm_fw_rsvd_8b;
817         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
818                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
819
820         my_version = HWRM_VERSION_MAJOR << 16;
821         my_version |= HWRM_VERSION_MINOR << 8;
822         my_version |= HWRM_VERSION_UPDATE;
823
824         fw_version = resp->hwrm_intf_maj_8b << 16;
825         fw_version |= resp->hwrm_intf_min_8b << 8;
826         fw_version |= resp->hwrm_intf_upd_8b;
827         bp->hwrm_spec_code = fw_version;
828
829         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
830                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
831                 rc = -EINVAL;
832                 goto error;
833         }
834
835         if (my_version != fw_version) {
836                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
837                 if (my_version < fw_version) {
838                         PMD_DRV_LOG(INFO,
839                                 "Firmware API version is newer than driver.\n");
840                         PMD_DRV_LOG(INFO,
841                                 "The driver may be missing features.\n");
842                 } else {
843                         PMD_DRV_LOG(INFO,
844                                 "Firmware API version is older than driver.\n");
845                         PMD_DRV_LOG(INFO,
846                                 "Not all driver features may be functional.\n");
847                 }
848         }
849
850         if (bp->max_req_len > resp->max_req_win_len) {
851                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
852                 rc = -EINVAL;
853         }
854         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
855         max_resp_len = resp->max_resp_len;
856         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
857
858         if (bp->max_resp_len != max_resp_len) {
859                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
860                         bp->pdev->addr.domain, bp->pdev->addr.bus,
861                         bp->pdev->addr.devid, bp->pdev->addr.function);
862
863                 rte_free(bp->hwrm_cmd_resp_addr);
864
865                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
866                 if (bp->hwrm_cmd_resp_addr == NULL) {
867                         rc = -ENOMEM;
868                         goto error;
869                 }
870                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
871                 bp->hwrm_cmd_resp_dma_addr =
872                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
873                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
874                         PMD_DRV_LOG(ERR,
875                         "Unable to map response buffer to physical memory.\n");
876                         rc = -ENOMEM;
877                         goto error;
878                 }
879                 bp->max_resp_len = max_resp_len;
880         }
881
882         if ((dev_caps_cfg &
883                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
884             (dev_caps_cfg &
885              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
886                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
887
888                 rte_free(bp->hwrm_short_cmd_req_addr);
889
890                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
891                                                         bp->max_req_len, 0);
892                 if (bp->hwrm_short_cmd_req_addr == NULL) {
893                         rc = -ENOMEM;
894                         goto error;
895                 }
896                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
897                 bp->hwrm_short_cmd_req_dma_addr =
898                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
899                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
900                         rte_free(bp->hwrm_short_cmd_req_addr);
901                         PMD_DRV_LOG(ERR,
902                                 "Unable to map buffer to physical memory.\n");
903                         rc = -ENOMEM;
904                         goto error;
905                 }
906
907                 bp->flags |= BNXT_FLAG_SHORT_CMD;
908         }
909         if (dev_caps_cfg &
910             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
911                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
912                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
913         }
914         if (dev_caps_cfg &
915             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
916                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
917
918 error:
919         HWRM_UNLOCK();
920         return rc;
921 }
922
923 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
924 {
925         int rc;
926         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
927         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
928
929         if (!(bp->flags & BNXT_FLAG_REGISTERED))
930                 return 0;
931
932         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
933         req.flags = flags;
934
935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
936
937         HWRM_CHECK_RESULT();
938         HWRM_UNLOCK();
939
940         bp->flags &= ~BNXT_FLAG_REGISTERED;
941
942         return rc;
943 }
944
945 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
946 {
947         int rc = 0;
948         struct hwrm_port_phy_cfg_input req = {0};
949         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
950         uint32_t enables = 0;
951
952         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
953
954         if (conf->link_up) {
955                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
956                 if (bp->link_info.auto_mode && conf->link_speed) {
957                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
958                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
959                 }
960
961                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
962                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
963                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
964                 /*
965                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
966                  * any auto mode, even "none".
967                  */
968                 if (!conf->link_speed) {
969                         /* No speeds specified. Enable AutoNeg - all speeds */
970                         req.auto_mode =
971                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
972                 }
973                 /* AutoNeg - Advertise speeds specified. */
974                 if (conf->auto_link_speed_mask &&
975                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
976                         req.auto_mode =
977                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
978                         req.auto_link_speed_mask =
979                                 conf->auto_link_speed_mask;
980                         enables |=
981                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
982                 }
983
984                 req.auto_duplex = conf->duplex;
985                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
986                 req.auto_pause = conf->auto_pause;
987                 req.force_pause = conf->force_pause;
988                 /* Set force_pause if there is no auto or if there is a force */
989                 if (req.auto_pause && !req.force_pause)
990                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
991                 else
992                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
993
994                 req.enables = rte_cpu_to_le_32(enables);
995         } else {
996                 req.flags =
997                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
998                 PMD_DRV_LOG(INFO, "Force Link Down\n");
999         }
1000
1001         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1002
1003         HWRM_CHECK_RESULT();
1004         HWRM_UNLOCK();
1005
1006         return rc;
1007 }
1008
1009 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1010                                    struct bnxt_link_info *link_info)
1011 {
1012         int rc = 0;
1013         struct hwrm_port_phy_qcfg_input req = {0};
1014         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1015
1016         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1017
1018         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1019
1020         HWRM_CHECK_RESULT();
1021
1022         link_info->phy_link_status = resp->link;
1023         link_info->link_up =
1024                 (link_info->phy_link_status ==
1025                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1026         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1027         link_info->duplex = resp->duplex_cfg;
1028         link_info->pause = resp->pause;
1029         link_info->auto_pause = resp->auto_pause;
1030         link_info->force_pause = resp->force_pause;
1031         link_info->auto_mode = resp->auto_mode;
1032         link_info->phy_type = resp->phy_type;
1033         link_info->media_type = resp->media_type;
1034
1035         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1036         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1037         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1038         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1039         link_info->phy_ver[0] = resp->phy_maj;
1040         link_info->phy_ver[1] = resp->phy_min;
1041         link_info->phy_ver[2] = resp->phy_bld;
1042
1043         HWRM_UNLOCK();
1044
1045         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1046         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1047         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1048         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1049         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1050                     link_info->auto_link_speed_mask);
1051         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1052                     link_info->force_link_speed);
1053
1054         return rc;
1055 }
1056
1057 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1058 {
1059         int rc = 0;
1060         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1061         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1062         int i;
1063
1064         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1065
1066         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1067         /* HWRM Version >= 1.9.1 */
1068         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1069                 req.drv_qmap_cap =
1070                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1071         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1072
1073         HWRM_CHECK_RESULT();
1074
1075 #define GET_QUEUE_INFO(x) \
1076         bp->cos_queue[x].id = resp->queue_id##x; \
1077         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1078
1079         GET_QUEUE_INFO(0);
1080         GET_QUEUE_INFO(1);
1081         GET_QUEUE_INFO(2);
1082         GET_QUEUE_INFO(3);
1083         GET_QUEUE_INFO(4);
1084         GET_QUEUE_INFO(5);
1085         GET_QUEUE_INFO(6);
1086         GET_QUEUE_INFO(7);
1087
1088         HWRM_UNLOCK();
1089
1090         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1091                 bp->tx_cosq_id = bp->cos_queue[0].id;
1092         } else {
1093                 /* iterate and find the COSq profile to use for Tx */
1094                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1095                         if (bp->cos_queue[i].profile ==
1096                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1097                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1098                                 break;
1099                         }
1100                 }
1101         }
1102         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1103
1104         return rc;
1105 }
1106
1107 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1108                          struct bnxt_ring *ring,
1109                          uint32_t ring_type, uint32_t map_index,
1110                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1111 {
1112         int rc = 0;
1113         uint32_t enables = 0;
1114         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1115         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1118
1119         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1120         req.fbo = rte_cpu_to_le_32(0);
1121         /* Association of ring index with doorbell index */
1122         req.logical_id = rte_cpu_to_le_16(map_index);
1123         req.length = rte_cpu_to_le_32(ring->ring_size);
1124
1125         switch (ring_type) {
1126         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1127                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1128                 /* FALLTHROUGH */
1129         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1130                 req.ring_type = ring_type;
1131                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1132                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1133                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1134                         enables |=
1135                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1136                 break;
1137         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1138                 req.ring_type = ring_type;
1139                 /*
1140                  * TODO: Some HWRM versions crash with
1141                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1142                  */
1143                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1144                 break;
1145         default:
1146                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1147                         ring_type);
1148                 HWRM_UNLOCK();
1149                 return -1;
1150         }
1151         req.enables = rte_cpu_to_le_32(enables);
1152
1153         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1154
1155         if (rc || resp->error_code) {
1156                 if (rc == 0 && resp->error_code)
1157                         rc = rte_le_to_cpu_16(resp->error_code);
1158                 switch (ring_type) {
1159                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1160                         PMD_DRV_LOG(ERR,
1161                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1162                         HWRM_UNLOCK();
1163                         return rc;
1164                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1165                         PMD_DRV_LOG(ERR,
1166                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1167                         HWRM_UNLOCK();
1168                         return rc;
1169                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1170                         PMD_DRV_LOG(ERR,
1171                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1172                         HWRM_UNLOCK();
1173                         return rc;
1174                 default:
1175                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1176                         HWRM_UNLOCK();
1177                         return rc;
1178                 }
1179         }
1180
1181         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1182         HWRM_UNLOCK();
1183         return rc;
1184 }
1185
1186 int bnxt_hwrm_ring_free(struct bnxt *bp,
1187                         struct bnxt_ring *ring, uint32_t ring_type)
1188 {
1189         int rc;
1190         struct hwrm_ring_free_input req = {.req_type = 0 };
1191         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1192
1193         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1194
1195         req.ring_type = ring_type;
1196         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1197
1198         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1199
1200         if (rc || resp->error_code) {
1201                 if (rc == 0 && resp->error_code)
1202                         rc = rte_le_to_cpu_16(resp->error_code);
1203                 HWRM_UNLOCK();
1204
1205                 switch (ring_type) {
1206                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1207                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1208                                 rc);
1209                         return rc;
1210                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1211                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1212                                 rc);
1213                         return rc;
1214                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1215                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1216                                 rc);
1217                         return rc;
1218                 default:
1219                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1220                         return rc;
1221                 }
1222         }
1223         HWRM_UNLOCK();
1224         return 0;
1225 }
1226
1227 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1228 {
1229         int rc = 0;
1230         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1231         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1232
1233         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1234
1235         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1236         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1237         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1238         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1239
1240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1241
1242         HWRM_CHECK_RESULT();
1243
1244         bp->grp_info[idx].fw_grp_id =
1245             rte_le_to_cpu_16(resp->ring_group_id);
1246
1247         HWRM_UNLOCK();
1248
1249         return rc;
1250 }
1251
1252 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1253 {
1254         int rc;
1255         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1256         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1257
1258         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1259
1260         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1261
1262         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1263
1264         HWRM_CHECK_RESULT();
1265         HWRM_UNLOCK();
1266
1267         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1268         return rc;
1269 }
1270
1271 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1272 {
1273         int rc = 0;
1274         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1275         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1276
1277         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1278                 return rc;
1279
1280         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1281
1282         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1283
1284         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1285
1286         HWRM_CHECK_RESULT();
1287         HWRM_UNLOCK();
1288
1289         return rc;
1290 }
1291
1292 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1293                                 unsigned int idx __rte_unused)
1294 {
1295         int rc;
1296         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1297         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1298
1299         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1300
1301         req.update_period_ms = rte_cpu_to_le_32(0);
1302
1303         req.stats_dma_addr =
1304             rte_cpu_to_le_64(cpr->hw_stats_map);
1305
1306         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1307
1308         HWRM_CHECK_RESULT();
1309
1310         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1311
1312         HWRM_UNLOCK();
1313
1314         return rc;
1315 }
1316
1317 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1318                                 unsigned int idx __rte_unused)
1319 {
1320         int rc;
1321         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1322         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1323
1324         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1325
1326         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1327
1328         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1329
1330         HWRM_CHECK_RESULT();
1331         HWRM_UNLOCK();
1332
1333         return rc;
1334 }
1335
1336 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1337 {
1338         int rc = 0, i, j;
1339         struct hwrm_vnic_alloc_input req = { 0 };
1340         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1341
1342         /* map ring groups to this vnic */
1343         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1344                 vnic->start_grp_id, vnic->end_grp_id);
1345         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1346                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1347
1348         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1349         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1350         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1351         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1352         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1353                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1354         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1355
1356         if (vnic->func_default)
1357                 req.flags =
1358                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1359         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1360
1361         HWRM_CHECK_RESULT();
1362
1363         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1364         HWRM_UNLOCK();
1365         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1366         return rc;
1367 }
1368
1369 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1370                                         struct bnxt_vnic_info *vnic,
1371                                         struct bnxt_plcmodes_cfg *pmode)
1372 {
1373         int rc = 0;
1374         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1375         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1376
1377         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1378
1379         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1380
1381         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1382
1383         HWRM_CHECK_RESULT();
1384
1385         pmode->flags = rte_le_to_cpu_32(resp->flags);
1386         /* dflt_vnic bit doesn't exist in the _cfg command */
1387         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1388         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1389         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1390         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1391
1392         HWRM_UNLOCK();
1393
1394         return rc;
1395 }
1396
1397 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1398                                        struct bnxt_vnic_info *vnic,
1399                                        struct bnxt_plcmodes_cfg *pmode)
1400 {
1401         int rc = 0;
1402         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1403         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1404
1405         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1406
1407         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1408         req.flags = rte_cpu_to_le_32(pmode->flags);
1409         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1410         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1411         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1412         req.enables = rte_cpu_to_le_32(
1413             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1414             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1415             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1416         );
1417
1418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1419
1420         HWRM_CHECK_RESULT();
1421         HWRM_UNLOCK();
1422
1423         return rc;
1424 }
1425
1426 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1427 {
1428         int rc = 0;
1429         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1430         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1431         uint32_t ctx_enable_flag = 0;
1432         struct bnxt_plcmodes_cfg pmodes;
1433
1434         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1435                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1436                 return rc;
1437         }
1438
1439         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1440         if (rc)
1441                 return rc;
1442
1443         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1444
1445         /* Only RSS support for now TBD: COS & LB */
1446         req.enables =
1447             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1448         if (vnic->lb_rule != 0xffff)
1449                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1450         if (vnic->cos_rule != 0xffff)
1451                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1452         if (vnic->rss_rule != 0xffff) {
1453                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1454                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1455         }
1456         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1457         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1458         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1459         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1460         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1461         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1462         req.mru = rte_cpu_to_le_16(vnic->mru);
1463         if (vnic->func_default)
1464                 req.flags |=
1465                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1466         if (vnic->vlan_strip)
1467                 req.flags |=
1468                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1469         if (vnic->bd_stall)
1470                 req.flags |=
1471                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1472         if (vnic->roce_dual)
1473                 req.flags |= rte_cpu_to_le_32(
1474                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1475         if (vnic->roce_only)
1476                 req.flags |= rte_cpu_to_le_32(
1477                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1478         if (vnic->rss_dflt_cr)
1479                 req.flags |= rte_cpu_to_le_32(
1480                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1481
1482         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1483
1484         HWRM_CHECK_RESULT();
1485         HWRM_UNLOCK();
1486
1487         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1488
1489         return rc;
1490 }
1491
1492 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1493                 int16_t fw_vf_id)
1494 {
1495         int rc = 0;
1496         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1497         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1498
1499         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1500                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1501                 return rc;
1502         }
1503         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1504
1505         req.enables =
1506                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1507         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1508         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1509
1510         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1511
1512         HWRM_CHECK_RESULT();
1513
1514         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1515         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1516         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1517         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1518         vnic->mru = rte_le_to_cpu_16(resp->mru);
1519         vnic->func_default = rte_le_to_cpu_32(
1520                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1521         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1522                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1523         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1524                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1525         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1526                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1527         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1528                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1529         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1530                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1531
1532         HWRM_UNLOCK();
1533
1534         return rc;
1535 }
1536
1537 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1538 {
1539         int rc = 0;
1540         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1541         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1542                                                 bp->hwrm_cmd_resp_addr;
1543
1544         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1545
1546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1547
1548         HWRM_CHECK_RESULT();
1549
1550         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1551         HWRM_UNLOCK();
1552         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1553
1554         return rc;
1555 }
1556
1557 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1558 {
1559         int rc = 0;
1560         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1561         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1562                                                 bp->hwrm_cmd_resp_addr;
1563
1564         if (vnic->rss_rule == 0xffff) {
1565                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1566                 return rc;
1567         }
1568         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1569
1570         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1571
1572         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1573
1574         HWRM_CHECK_RESULT();
1575         HWRM_UNLOCK();
1576
1577         vnic->rss_rule = INVALID_HW_RING_ID;
1578
1579         return rc;
1580 }
1581
1582 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1583 {
1584         int rc = 0;
1585         struct hwrm_vnic_free_input req = {.req_type = 0 };
1586         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1587
1588         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1589                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1590                 return rc;
1591         }
1592
1593         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1594
1595         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1596
1597         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1598
1599         HWRM_CHECK_RESULT();
1600         HWRM_UNLOCK();
1601
1602         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1603         return rc;
1604 }
1605
1606 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1607                            struct bnxt_vnic_info *vnic)
1608 {
1609         int rc = 0;
1610         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1611         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1612
1613         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1614
1615         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1616         req.hash_mode_flags = vnic->hash_mode;
1617
1618         req.ring_grp_tbl_addr =
1619             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1620         req.hash_key_tbl_addr =
1621             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1622         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1623
1624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1625
1626         HWRM_CHECK_RESULT();
1627         HWRM_UNLOCK();
1628
1629         return rc;
1630 }
1631
1632 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1633                         struct bnxt_vnic_info *vnic)
1634 {
1635         int rc = 0;
1636         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1637         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1638         uint16_t size;
1639
1640         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1641                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1642                 return rc;
1643         }
1644
1645         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1646
1647         req.flags = rte_cpu_to_le_32(
1648                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1649
1650         req.enables = rte_cpu_to_le_32(
1651                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1652
1653         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1654         size -= RTE_PKTMBUF_HEADROOM;
1655
1656         req.jumbo_thresh = rte_cpu_to_le_16(size);
1657         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1658
1659         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1660
1661         HWRM_CHECK_RESULT();
1662         HWRM_UNLOCK();
1663
1664         return rc;
1665 }
1666
1667 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1668                         struct bnxt_vnic_info *vnic, bool enable)
1669 {
1670         int rc = 0;
1671         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1672         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1673
1674         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1675
1676         if (enable) {
1677                 req.enables = rte_cpu_to_le_32(
1678                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1679                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1680                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1681                 req.flags = rte_cpu_to_le_32(
1682                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1683                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1684                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1685                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1686                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1687                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1688                 req.max_agg_segs = rte_cpu_to_le_16(5);
1689                 req.max_aggs =
1690                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1691                 req.min_agg_len = rte_cpu_to_le_32(512);
1692         }
1693         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1694
1695         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1696
1697         HWRM_CHECK_RESULT();
1698         HWRM_UNLOCK();
1699
1700         return rc;
1701 }
1702
1703 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1704 {
1705         struct hwrm_func_cfg_input req = {0};
1706         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1707         int rc;
1708
1709         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1710         req.enables = rte_cpu_to_le_32(
1711                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1712         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1713         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1714
1715         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1716
1717         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1718         HWRM_CHECK_RESULT();
1719         HWRM_UNLOCK();
1720
1721         bp->pf.vf_info[vf].random_mac = false;
1722
1723         return rc;
1724 }
1725
1726 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1727                                   uint64_t *dropped)
1728 {
1729         int rc = 0;
1730         struct hwrm_func_qstats_input req = {.req_type = 0};
1731         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1732
1733         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1734
1735         req.fid = rte_cpu_to_le_16(fid);
1736
1737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1738
1739         HWRM_CHECK_RESULT();
1740
1741         if (dropped)
1742                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1743
1744         HWRM_UNLOCK();
1745
1746         return rc;
1747 }
1748
1749 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1750                           struct rte_eth_stats *stats)
1751 {
1752         int rc = 0;
1753         struct hwrm_func_qstats_input req = {.req_type = 0};
1754         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1755
1756         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1757
1758         req.fid = rte_cpu_to_le_16(fid);
1759
1760         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1761
1762         HWRM_CHECK_RESULT();
1763
1764         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1765         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1766         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1767         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1768         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1769         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1770
1771         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1772         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1773         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1774         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1775         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1776         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1777
1778         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1779         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1780         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1781
1782         HWRM_UNLOCK();
1783
1784         return rc;
1785 }
1786
1787 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1788 {
1789         int rc = 0;
1790         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1791         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1792
1793         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1794
1795         req.fid = rte_cpu_to_le_16(fid);
1796
1797         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1798
1799         HWRM_CHECK_RESULT();
1800         HWRM_UNLOCK();
1801
1802         return rc;
1803 }
1804
1805 /*
1806  * HWRM utility functions
1807  */
1808
1809 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1810 {
1811         unsigned int i;
1812         int rc = 0;
1813
1814         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1815                 struct bnxt_tx_queue *txq;
1816                 struct bnxt_rx_queue *rxq;
1817                 struct bnxt_cp_ring_info *cpr;
1818
1819                 if (i >= bp->rx_cp_nr_rings) {
1820                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1821                         cpr = txq->cp_ring;
1822                 } else {
1823                         rxq = bp->rx_queues[i];
1824                         cpr = rxq->cp_ring;
1825                 }
1826
1827                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1828                 if (rc)
1829                         return rc;
1830         }
1831         return 0;
1832 }
1833
1834 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1835 {
1836         int rc;
1837         unsigned int i;
1838         struct bnxt_cp_ring_info *cpr;
1839
1840         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1841
1842                 if (i >= bp->rx_cp_nr_rings) {
1843                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1844                 } else {
1845                         cpr = bp->rx_queues[i]->cp_ring;
1846                         bp->grp_info[i].fw_stats_ctx = -1;
1847                 }
1848                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1849                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1850                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1851                         if (rc)
1852                                 return rc;
1853                 }
1854         }
1855         return 0;
1856 }
1857
1858 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1859 {
1860         unsigned int i;
1861         int rc = 0;
1862
1863         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1864                 struct bnxt_tx_queue *txq;
1865                 struct bnxt_rx_queue *rxq;
1866                 struct bnxt_cp_ring_info *cpr;
1867
1868                 if (i >= bp->rx_cp_nr_rings) {
1869                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1870                         cpr = txq->cp_ring;
1871                 } else {
1872                         rxq = bp->rx_queues[i];
1873                         cpr = rxq->cp_ring;
1874                 }
1875
1876                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1877
1878                 if (rc)
1879                         return rc;
1880         }
1881         return rc;
1882 }
1883
1884 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1885 {
1886         uint16_t idx;
1887         uint32_t rc = 0;
1888
1889         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1890
1891                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1892                         continue;
1893
1894                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1895
1896                 if (rc)
1897                         return rc;
1898         }
1899         return rc;
1900 }
1901
1902 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1903 {
1904         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1905
1906         bnxt_hwrm_ring_free(bp, cp_ring,
1907                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1908         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1909         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1910                         sizeof(*cpr->cp_desc_ring));
1911         cpr->cp_raw_cons = 0;
1912 }
1913
1914 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1915 {
1916         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1917         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1918         struct bnxt_ring *ring = rxr->rx_ring_struct;
1919         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1920
1921         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1922                 bnxt_hwrm_ring_free(bp, ring,
1923                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1924                 ring->fw_ring_id = INVALID_HW_RING_ID;
1925                 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1926                 memset(rxr->rx_desc_ring, 0,
1927                        rxr->rx_ring_struct->ring_size *
1928                        sizeof(*rxr->rx_desc_ring));
1929                 memset(rxr->rx_buf_ring, 0,
1930                        rxr->rx_ring_struct->ring_size *
1931                        sizeof(*rxr->rx_buf_ring));
1932                 rxr->rx_prod = 0;
1933         }
1934         ring = rxr->ag_ring_struct;
1935         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1936                 bnxt_hwrm_ring_free(bp, ring,
1937                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1938                 ring->fw_ring_id = INVALID_HW_RING_ID;
1939                 memset(rxr->ag_buf_ring, 0,
1940                        rxr->ag_ring_struct->ring_size *
1941                        sizeof(*rxr->ag_buf_ring));
1942                 rxr->ag_prod = 0;
1943                 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1944         }
1945         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1946                 bnxt_free_cp_ring(bp, cpr);
1947
1948         bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1949 }
1950
1951 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1952 {
1953         unsigned int i;
1954
1955         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1956                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1957                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1958                 struct bnxt_ring *ring = txr->tx_ring_struct;
1959                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1960
1961                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1962                         bnxt_hwrm_ring_free(bp, ring,
1963                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1964                         ring->fw_ring_id = INVALID_HW_RING_ID;
1965                         memset(txr->tx_desc_ring, 0,
1966                                         txr->tx_ring_struct->ring_size *
1967                                         sizeof(*txr->tx_desc_ring));
1968                         memset(txr->tx_buf_ring, 0,
1969                                         txr->tx_ring_struct->ring_size *
1970                                         sizeof(*txr->tx_buf_ring));
1971                         txr->tx_prod = 0;
1972                         txr->tx_cons = 0;
1973                 }
1974                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1975                         bnxt_free_cp_ring(bp, cpr);
1976                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1977                 }
1978         }
1979
1980         for (i = 0; i < bp->rx_cp_nr_rings; i++)
1981                 bnxt_free_hwrm_rx_ring(bp, i);
1982
1983         return 0;
1984 }
1985
1986 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1987 {
1988         uint16_t i;
1989         uint32_t rc = 0;
1990
1991         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1992                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1993                 if (rc)
1994                         return rc;
1995         }
1996         return rc;
1997 }
1998
1999 void bnxt_free_hwrm_resources(struct bnxt *bp)
2000 {
2001         /* Release memzone */
2002         rte_free(bp->hwrm_cmd_resp_addr);
2003         rte_free(bp->hwrm_short_cmd_req_addr);
2004         bp->hwrm_cmd_resp_addr = NULL;
2005         bp->hwrm_short_cmd_req_addr = NULL;
2006         bp->hwrm_cmd_resp_dma_addr = 0;
2007         bp->hwrm_short_cmd_req_dma_addr = 0;
2008 }
2009
2010 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2011 {
2012         struct rte_pci_device *pdev = bp->pdev;
2013         char type[RTE_MEMZONE_NAMESIZE];
2014
2015         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2016                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2017         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2018         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2019         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2020         if (bp->hwrm_cmd_resp_addr == NULL)
2021                 return -ENOMEM;
2022         bp->hwrm_cmd_resp_dma_addr =
2023                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2024         if (bp->hwrm_cmd_resp_dma_addr == 0) {
2025                 PMD_DRV_LOG(ERR,
2026                         "unable to map response address to physical memory\n");
2027                 return -ENOMEM;
2028         }
2029         rte_spinlock_init(&bp->hwrm_lock);
2030
2031         return 0;
2032 }
2033
2034 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2035 {
2036         struct bnxt_filter_info *filter;
2037         int rc = 0;
2038
2039         STAILQ_FOREACH(filter, &vnic->filter, next) {
2040                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2041                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2042                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2043                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2044                 else
2045                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2046                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2047                 //if (rc)
2048                         //break;
2049         }
2050         return rc;
2051 }
2052
2053 static int
2054 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2055 {
2056         struct bnxt_filter_info *filter;
2057         struct rte_flow *flow;
2058         int rc = 0;
2059
2060         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2061                 filter = flow->filter;
2062                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2063                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2064                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2065                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2066                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2067                 else
2068                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2069
2070                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2071                 rte_free(flow);
2072                 //if (rc)
2073                         //break;
2074         }
2075         return rc;
2076 }
2077
2078 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2079 {
2080         struct bnxt_filter_info *filter;
2081         int rc = 0;
2082
2083         STAILQ_FOREACH(filter, &vnic->filter, next) {
2084                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2085                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2086                                                      filter);
2087                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2088                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2089                                                          filter);
2090                 else
2091                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2092                                                      filter);
2093                 if (rc)
2094                         break;
2095         }
2096         return rc;
2097 }
2098
2099 void bnxt_free_tunnel_ports(struct bnxt *bp)
2100 {
2101         if (bp->vxlan_port_cnt)
2102                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2103                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2104         bp->vxlan_port = 0;
2105         if (bp->geneve_port_cnt)
2106                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2107                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2108         bp->geneve_port = 0;
2109 }
2110
2111 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2112 {
2113         int i;
2114
2115         if (bp->vnic_info == NULL)
2116                 return;
2117
2118         /*
2119          * Cleanup VNICs in reverse order, to make sure the L2 filter
2120          * from vnic0 is last to be cleaned up.
2121          */
2122         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2123                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2124
2125                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2126
2127                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2128
2129                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2130
2131                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2132
2133                 bnxt_hwrm_vnic_free(bp, vnic);
2134
2135                 rte_free(vnic->fw_grp_ids);
2136         }
2137         /* Ring resources */
2138         bnxt_free_all_hwrm_rings(bp);
2139         bnxt_free_all_hwrm_ring_grps(bp);
2140         bnxt_free_all_hwrm_stat_ctxs(bp);
2141         bnxt_free_tunnel_ports(bp);
2142 }
2143
2144 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2145 {
2146         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2147
2148         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2149                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2150
2151         switch (conf_link_speed) {
2152         case ETH_LINK_SPEED_10M_HD:
2153         case ETH_LINK_SPEED_100M_HD:
2154                 /* FALLTHROUGH */
2155                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2156         }
2157         return hw_link_duplex;
2158 }
2159
2160 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2161 {
2162         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2163 }
2164
2165 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2166 {
2167         uint16_t eth_link_speed = 0;
2168
2169         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2170                 return ETH_LINK_SPEED_AUTONEG;
2171
2172         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2173         case ETH_LINK_SPEED_100M:
2174         case ETH_LINK_SPEED_100M_HD:
2175                 /* FALLTHROUGH */
2176                 eth_link_speed =
2177                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2178                 break;
2179         case ETH_LINK_SPEED_1G:
2180                 eth_link_speed =
2181                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2182                 break;
2183         case ETH_LINK_SPEED_2_5G:
2184                 eth_link_speed =
2185                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2186                 break;
2187         case ETH_LINK_SPEED_10G:
2188                 eth_link_speed =
2189                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2190                 break;
2191         case ETH_LINK_SPEED_20G:
2192                 eth_link_speed =
2193                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2194                 break;
2195         case ETH_LINK_SPEED_25G:
2196                 eth_link_speed =
2197                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2198                 break;
2199         case ETH_LINK_SPEED_40G:
2200                 eth_link_speed =
2201                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2202                 break;
2203         case ETH_LINK_SPEED_50G:
2204                 eth_link_speed =
2205                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2206                 break;
2207         case ETH_LINK_SPEED_100G:
2208                 eth_link_speed =
2209                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2210                 break;
2211         default:
2212                 PMD_DRV_LOG(ERR,
2213                         "Unsupported link speed %d; default to AUTO\n",
2214                         conf_link_speed);
2215                 break;
2216         }
2217         return eth_link_speed;
2218 }
2219
2220 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2221                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2222                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2223                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2224
2225 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2226 {
2227         uint32_t one_speed;
2228
2229         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2230                 return 0;
2231
2232         if (link_speed & ETH_LINK_SPEED_FIXED) {
2233                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2234
2235                 if (one_speed & (one_speed - 1)) {
2236                         PMD_DRV_LOG(ERR,
2237                                 "Invalid advertised speeds (%u) for port %u\n",
2238                                 link_speed, port_id);
2239                         return -EINVAL;
2240                 }
2241                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2242                         PMD_DRV_LOG(ERR,
2243                                 "Unsupported advertised speed (%u) for port %u\n",
2244                                 link_speed, port_id);
2245                         return -EINVAL;
2246                 }
2247         } else {
2248                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2249                         PMD_DRV_LOG(ERR,
2250                                 "Unsupported advertised speeds (%u) for port %u\n",
2251                                 link_speed, port_id);
2252                         return -EINVAL;
2253                 }
2254         }
2255         return 0;
2256 }
2257
2258 static uint16_t
2259 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2260 {
2261         uint16_t ret = 0;
2262
2263         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2264                 if (bp->link_info.support_speeds)
2265                         return bp->link_info.support_speeds;
2266                 link_speed = BNXT_SUPPORTED_SPEEDS;
2267         }
2268
2269         if (link_speed & ETH_LINK_SPEED_100M)
2270                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2271         if (link_speed & ETH_LINK_SPEED_100M_HD)
2272                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2273         if (link_speed & ETH_LINK_SPEED_1G)
2274                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2275         if (link_speed & ETH_LINK_SPEED_2_5G)
2276                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2277         if (link_speed & ETH_LINK_SPEED_10G)
2278                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2279         if (link_speed & ETH_LINK_SPEED_20G)
2280                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2281         if (link_speed & ETH_LINK_SPEED_25G)
2282                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2283         if (link_speed & ETH_LINK_SPEED_40G)
2284                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2285         if (link_speed & ETH_LINK_SPEED_50G)
2286                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2287         if (link_speed & ETH_LINK_SPEED_100G)
2288                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2289         return ret;
2290 }
2291
2292 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2293 {
2294         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2295
2296         switch (hw_link_speed) {
2297         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2298                 eth_link_speed = ETH_SPEED_NUM_100M;
2299                 break;
2300         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2301                 eth_link_speed = ETH_SPEED_NUM_1G;
2302                 break;
2303         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2304                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2305                 break;
2306         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2307                 eth_link_speed = ETH_SPEED_NUM_10G;
2308                 break;
2309         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2310                 eth_link_speed = ETH_SPEED_NUM_20G;
2311                 break;
2312         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2313                 eth_link_speed = ETH_SPEED_NUM_25G;
2314                 break;
2315         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2316                 eth_link_speed = ETH_SPEED_NUM_40G;
2317                 break;
2318         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2319                 eth_link_speed = ETH_SPEED_NUM_50G;
2320                 break;
2321         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2322                 eth_link_speed = ETH_SPEED_NUM_100G;
2323                 break;
2324         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2325         default:
2326                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2327                         hw_link_speed);
2328                 break;
2329         }
2330         return eth_link_speed;
2331 }
2332
2333 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2334 {
2335         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2336
2337         switch (hw_link_duplex) {
2338         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2339         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2340                 /* FALLTHROUGH */
2341                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2342                 break;
2343         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2344                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2345                 break;
2346         default:
2347                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2348                         hw_link_duplex);
2349                 break;
2350         }
2351         return eth_link_duplex;
2352 }
2353
2354 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2355 {
2356         int rc = 0;
2357         struct bnxt_link_info *link_info = &bp->link_info;
2358
2359         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2360         if (rc) {
2361                 PMD_DRV_LOG(ERR,
2362                         "Get link config failed with rc %d\n", rc);
2363                 goto exit;
2364         }
2365         if (link_info->link_speed)
2366                 link->link_speed =
2367                         bnxt_parse_hw_link_speed(link_info->link_speed);
2368         else
2369                 link->link_speed = ETH_SPEED_NUM_NONE;
2370         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2371         link->link_status = link_info->link_up;
2372         link->link_autoneg = link_info->auto_mode ==
2373                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2374                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2375 exit:
2376         return rc;
2377 }
2378
2379 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2380 {
2381         int rc = 0;
2382         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2383         struct bnxt_link_info link_req;
2384         uint16_t speed, autoneg;
2385
2386         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2387                 return 0;
2388
2389         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2390                         bp->eth_dev->data->port_id);
2391         if (rc)
2392                 goto error;
2393
2394         memset(&link_req, 0, sizeof(link_req));
2395         link_req.link_up = link_up;
2396         if (!link_up)
2397                 goto port_phy_cfg;
2398
2399         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2400         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2401         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2402         /* Autoneg can be done only when the FW allows */
2403         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2404                                 bp->link_info.force_link_speed)) {
2405                 link_req.phy_flags |=
2406                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2407                 link_req.auto_link_speed_mask =
2408                         bnxt_parse_eth_link_speed_mask(bp,
2409                                                        dev_conf->link_speeds);
2410         } else {
2411                 if (bp->link_info.phy_type ==
2412                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2413                     bp->link_info.phy_type ==
2414                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2415                     bp->link_info.media_type ==
2416                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2417                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2418                         return -EINVAL;
2419                 }
2420
2421                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2422                 /* If user wants a particular speed try that first. */
2423                 if (speed)
2424                         link_req.link_speed = speed;
2425                 else if (bp->link_info.force_link_speed)
2426                         link_req.link_speed = bp->link_info.force_link_speed;
2427                 else
2428                         link_req.link_speed = bp->link_info.auto_link_speed;
2429         }
2430         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2431         link_req.auto_pause = bp->link_info.auto_pause;
2432         link_req.force_pause = bp->link_info.force_pause;
2433
2434 port_phy_cfg:
2435         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2436         if (rc) {
2437                 PMD_DRV_LOG(ERR,
2438                         "Set link config failed with rc %d\n", rc);
2439         }
2440
2441 error:
2442         return rc;
2443 }
2444
2445 /* JIRA 22088 */
2446 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2447 {
2448         struct hwrm_func_qcfg_input req = {0};
2449         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2450         uint16_t flags;
2451         int rc = 0;
2452
2453         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2454         req.fid = rte_cpu_to_le_16(0xffff);
2455
2456         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2457
2458         HWRM_CHECK_RESULT();
2459
2460         /* Hard Coded.. 0xfff VLAN ID mask */
2461         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2462         flags = rte_le_to_cpu_16(resp->flags);
2463         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2464                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2465
2466         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2467                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2468                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2469         }
2470
2471         switch (resp->port_partition_type) {
2472         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2473         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2474         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2475                 /* FALLTHROUGH */
2476                 bp->port_partition_type = resp->port_partition_type;
2477                 break;
2478         default:
2479                 bp->port_partition_type = 0;
2480                 break;
2481         }
2482
2483         HWRM_UNLOCK();
2484
2485         return rc;
2486 }
2487
2488 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2489                                    struct hwrm_func_qcaps_output *qcaps)
2490 {
2491         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2492         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2493                sizeof(qcaps->mac_address));
2494         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2495         qcaps->max_rx_rings = fcfg->num_rx_rings;
2496         qcaps->max_tx_rings = fcfg->num_tx_rings;
2497         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2498         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2499         qcaps->max_vfs = 0;
2500         qcaps->first_vf_id = 0;
2501         qcaps->max_vnics = fcfg->num_vnics;
2502         qcaps->max_decap_records = 0;
2503         qcaps->max_encap_records = 0;
2504         qcaps->max_tx_wm_flows = 0;
2505         qcaps->max_tx_em_flows = 0;
2506         qcaps->max_rx_wm_flows = 0;
2507         qcaps->max_rx_em_flows = 0;
2508         qcaps->max_flow_id = 0;
2509         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2510         qcaps->max_sp_tx_rings = 0;
2511         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2512 }
2513
2514 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2515 {
2516         struct hwrm_func_cfg_input req = {0};
2517         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2518         int rc;
2519
2520         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2521                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2522                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2523                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2524                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2525                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2526                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2527                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2528                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2529                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2530         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2531         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2532         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2533                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2534                                    BNXT_NUM_VLANS);
2535         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2536         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2537         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2538         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2539         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2540         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2541         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2542         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2543         req.fid = rte_cpu_to_le_16(0xffff);
2544
2545         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2546
2547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2548
2549         HWRM_CHECK_RESULT();
2550         HWRM_UNLOCK();
2551
2552         return rc;
2553 }
2554
2555 static void populate_vf_func_cfg_req(struct bnxt *bp,
2556                                      struct hwrm_func_cfg_input *req,
2557                                      int num_vfs)
2558 {
2559         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2560                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2561                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2562                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2563                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2564                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2565                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2566                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2567                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2568                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2569
2570         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2571                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2572                                     BNXT_NUM_VLANS);
2573         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2574                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2575                                     BNXT_NUM_VLANS);
2576         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2577                                                 (num_vfs + 1));
2578         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2579         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2580                                                (num_vfs + 1));
2581         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2582         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2583         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2584         /* TODO: For now, do not support VMDq/RFS on VFs. */
2585         req->num_vnics = rte_cpu_to_le_16(1);
2586         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2587                                                  (num_vfs + 1));
2588 }
2589
2590 static void add_random_mac_if_needed(struct bnxt *bp,
2591                                      struct hwrm_func_cfg_input *cfg_req,
2592                                      int vf)
2593 {
2594         struct ether_addr mac;
2595
2596         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2597                 return;
2598
2599         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2600                 cfg_req->enables |=
2601                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2602                 eth_random_addr(cfg_req->dflt_mac_addr);
2603                 bp->pf.vf_info[vf].random_mac = true;
2604         } else {
2605                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2606         }
2607 }
2608
2609 static void reserve_resources_from_vf(struct bnxt *bp,
2610                                       struct hwrm_func_cfg_input *cfg_req,
2611                                       int vf)
2612 {
2613         struct hwrm_func_qcaps_input req = {0};
2614         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2615         int rc;
2616
2617         /* Get the actual allocated values now */
2618         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2619         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2621
2622         if (rc) {
2623                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2624                 copy_func_cfg_to_qcaps(cfg_req, resp);
2625         } else if (resp->error_code) {
2626                 rc = rte_le_to_cpu_16(resp->error_code);
2627                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2628                 copy_func_cfg_to_qcaps(cfg_req, resp);
2629         }
2630
2631         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2632         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2633         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2634         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2635         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2636         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2637         /*
2638          * TODO: While not supporting VMDq with VFs, max_vnics is always
2639          * forced to 1 in this case
2640          */
2641         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2642         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2643
2644         HWRM_UNLOCK();
2645 }
2646
2647 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2648 {
2649         struct hwrm_func_qcfg_input req = {0};
2650         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2651         int rc;
2652
2653         /* Check for zero MAC address */
2654         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2655         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2656         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2657         if (rc) {
2658                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2659                 return -1;
2660         } else if (resp->error_code) {
2661                 rc = rte_le_to_cpu_16(resp->error_code);
2662                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2663                 return -1;
2664         }
2665         rc = rte_le_to_cpu_16(resp->vlan);
2666
2667         HWRM_UNLOCK();
2668
2669         return rc;
2670 }
2671
2672 static int update_pf_resource_max(struct bnxt *bp)
2673 {
2674         struct hwrm_func_qcfg_input req = {0};
2675         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2676         int rc;
2677
2678         /* And copy the allocated numbers into the pf struct */
2679         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2680         req.fid = rte_cpu_to_le_16(0xffff);
2681         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2682         HWRM_CHECK_RESULT();
2683
2684         /* Only TX ring value reflects actual allocation? TODO */
2685         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2686         bp->pf.evb_mode = resp->evb_mode;
2687
2688         HWRM_UNLOCK();
2689
2690         return rc;
2691 }
2692
2693 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2694 {
2695         int rc;
2696
2697         if (!BNXT_PF(bp)) {
2698                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2699                 return -1;
2700         }
2701
2702         rc = bnxt_hwrm_func_qcaps(bp);
2703         if (rc)
2704                 return rc;
2705
2706         bp->pf.func_cfg_flags &=
2707                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2708                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2709         bp->pf.func_cfg_flags |=
2710                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2711         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2712         return rc;
2713 }
2714
2715 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2716 {
2717         struct hwrm_func_cfg_input req = {0};
2718         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2719         int i;
2720         size_t sz;
2721         int rc = 0;
2722         size_t req_buf_sz;
2723
2724         if (!BNXT_PF(bp)) {
2725                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2726                 return -1;
2727         }
2728
2729         rc = bnxt_hwrm_func_qcaps(bp);
2730
2731         if (rc)
2732                 return rc;
2733
2734         bp->pf.active_vfs = num_vfs;
2735
2736         /*
2737          * First, configure the PF to only use one TX ring.  This ensures that
2738          * there are enough rings for all VFs.
2739          *
2740          * If we don't do this, when we call func_alloc() later, we will lock
2741          * extra rings to the PF that won't be available during func_cfg() of
2742          * the VFs.
2743          *
2744          * This has been fixed with firmware versions above 20.6.54
2745          */
2746         bp->pf.func_cfg_flags &=
2747                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2748                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2749         bp->pf.func_cfg_flags |=
2750                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2751         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2752         if (rc)
2753                 return rc;
2754
2755         /*
2756          * Now, create and register a buffer to hold forwarded VF requests
2757          */
2758         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2759         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2760                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2761         if (bp->pf.vf_req_buf == NULL) {
2762                 rc = -ENOMEM;
2763                 goto error_free;
2764         }
2765         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2766                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2767         for (i = 0; i < num_vfs; i++)
2768                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2769                                         (i * HWRM_MAX_REQ_LEN);
2770
2771         rc = bnxt_hwrm_func_buf_rgtr(bp);
2772         if (rc)
2773                 goto error_free;
2774
2775         populate_vf_func_cfg_req(bp, &req, num_vfs);
2776
2777         bp->pf.active_vfs = 0;
2778         for (i = 0; i < num_vfs; i++) {
2779                 add_random_mac_if_needed(bp, &req, i);
2780
2781                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2782                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2783                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2784                 rc = bnxt_hwrm_send_message(bp,
2785                                             &req,
2786                                             sizeof(req),
2787                                             BNXT_USE_CHIMP_MB);
2788
2789                 /* Clear enable flag for next pass */
2790                 req.enables &= ~rte_cpu_to_le_32(
2791                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2792
2793                 if (rc || resp->error_code) {
2794                         PMD_DRV_LOG(ERR,
2795                                 "Failed to initizlie VF %d\n", i);
2796                         PMD_DRV_LOG(ERR,
2797                                 "Not all VFs available. (%d, %d)\n",
2798                                 rc, resp->error_code);
2799                         HWRM_UNLOCK();
2800                         break;
2801                 }
2802
2803                 HWRM_UNLOCK();
2804
2805                 reserve_resources_from_vf(bp, &req, i);
2806                 bp->pf.active_vfs++;
2807                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2808         }
2809
2810         /*
2811          * Now configure the PF to use "the rest" of the resources
2812          * We're using STD_TX_RING_MODE here though which will limit the TX
2813          * rings.  This will allow QoS to function properly.  Not setting this
2814          * will cause PF rings to break bandwidth settings.
2815          */
2816         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2817         if (rc)
2818                 goto error_free;
2819
2820         rc = update_pf_resource_max(bp);
2821         if (rc)
2822                 goto error_free;
2823
2824         return rc;
2825
2826 error_free:
2827         bnxt_hwrm_func_buf_unrgtr(bp);
2828         return rc;
2829 }
2830
2831 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2832 {
2833         struct hwrm_func_cfg_input req = {0};
2834         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2835         int rc;
2836
2837         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2838
2839         req.fid = rte_cpu_to_le_16(0xffff);
2840         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2841         req.evb_mode = bp->pf.evb_mode;
2842
2843         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2844         HWRM_CHECK_RESULT();
2845         HWRM_UNLOCK();
2846
2847         return rc;
2848 }
2849
2850 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2851                                 uint8_t tunnel_type)
2852 {
2853         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2854         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2855         int rc = 0;
2856
2857         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2858         req.tunnel_type = tunnel_type;
2859         req.tunnel_dst_port_val = port;
2860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2861         HWRM_CHECK_RESULT();
2862
2863         switch (tunnel_type) {
2864         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2865                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2866                 bp->vxlan_port = port;
2867                 break;
2868         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2869                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2870                 bp->geneve_port = port;
2871                 break;
2872         default:
2873                 break;
2874         }
2875
2876         HWRM_UNLOCK();
2877
2878         return rc;
2879 }
2880
2881 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2882                                 uint8_t tunnel_type)
2883 {
2884         struct hwrm_tunnel_dst_port_free_input req = {0};
2885         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2886         int rc = 0;
2887
2888         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2889
2890         req.tunnel_type = tunnel_type;
2891         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2892         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2893
2894         HWRM_CHECK_RESULT();
2895         HWRM_UNLOCK();
2896
2897         return rc;
2898 }
2899
2900 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2901                                         uint32_t flags)
2902 {
2903         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2904         struct hwrm_func_cfg_input req = {0};
2905         int rc;
2906
2907         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2908
2909         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2910         req.flags = rte_cpu_to_le_32(flags);
2911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2912
2913         HWRM_CHECK_RESULT();
2914         HWRM_UNLOCK();
2915
2916         return rc;
2917 }
2918
2919 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2920 {
2921         uint32_t *flag = flagp;
2922
2923         vnic->flags = *flag;
2924 }
2925
2926 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2927 {
2928         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2929 }
2930
2931 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2932 {
2933         int rc = 0;
2934         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2935         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2936
2937         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2938
2939         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2940         req.req_buf_page_size = rte_cpu_to_le_16(
2941                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2942         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2943         req.req_buf_page_addr0 =
2944                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2945         if (req.req_buf_page_addr0 == 0) {
2946                 PMD_DRV_LOG(ERR,
2947                         "unable to map buffer address to physical memory\n");
2948                 return -ENOMEM;
2949         }
2950
2951         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2952
2953         HWRM_CHECK_RESULT();
2954         HWRM_UNLOCK();
2955
2956         return rc;
2957 }
2958
2959 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2960 {
2961         int rc = 0;
2962         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2963         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2964
2965         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2966
2967         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2968
2969         HWRM_CHECK_RESULT();
2970         HWRM_UNLOCK();
2971
2972         return rc;
2973 }
2974
2975 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2976 {
2977         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2978         struct hwrm_func_cfg_input req = {0};
2979         int rc;
2980
2981         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2982
2983         req.fid = rte_cpu_to_le_16(0xffff);
2984         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2985         req.enables = rte_cpu_to_le_32(
2986                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2987         req.async_event_cr = rte_cpu_to_le_16(
2988                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2989         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2990
2991         HWRM_CHECK_RESULT();
2992         HWRM_UNLOCK();
2993
2994         return rc;
2995 }
2996
2997 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2998 {
2999         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3000         struct hwrm_func_vf_cfg_input req = {0};
3001         int rc;
3002
3003         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3004
3005         req.enables = rte_cpu_to_le_32(
3006                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3007         req.async_event_cr = rte_cpu_to_le_16(
3008                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
3009         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3010
3011         HWRM_CHECK_RESULT();
3012         HWRM_UNLOCK();
3013
3014         return rc;
3015 }
3016
3017 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3018 {
3019         struct hwrm_func_cfg_input req = {0};
3020         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3021         uint16_t dflt_vlan, fid;
3022         uint32_t func_cfg_flags;
3023         int rc = 0;
3024
3025         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3026
3027         if (is_vf) {
3028                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3029                 fid = bp->pf.vf_info[vf].fid;
3030                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3031         } else {
3032                 fid = rte_cpu_to_le_16(0xffff);
3033                 func_cfg_flags = bp->pf.func_cfg_flags;
3034                 dflt_vlan = bp->vlan;
3035         }
3036
3037         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3038         req.fid = rte_cpu_to_le_16(fid);
3039         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3040         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3041
3042         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3043
3044         HWRM_CHECK_RESULT();
3045         HWRM_UNLOCK();
3046
3047         return rc;
3048 }
3049
3050 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3051                         uint16_t max_bw, uint16_t enables)
3052 {
3053         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3054         struct hwrm_func_cfg_input req = {0};
3055         int rc;
3056
3057         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3058
3059         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3060         req.enables |= rte_cpu_to_le_32(enables);
3061         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3062         req.max_bw = rte_cpu_to_le_32(max_bw);
3063         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3064
3065         HWRM_CHECK_RESULT();
3066         HWRM_UNLOCK();
3067
3068         return rc;
3069 }
3070
3071 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3072 {
3073         struct hwrm_func_cfg_input req = {0};
3074         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3075         int rc = 0;
3076
3077         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3078
3079         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3080         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3081         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3082         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3083
3084         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3085
3086         HWRM_CHECK_RESULT();
3087         HWRM_UNLOCK();
3088
3089         return rc;
3090 }
3091
3092 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3093 {
3094         int rc;
3095
3096         if (BNXT_PF(bp))
3097                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3098         else
3099                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3100
3101         return rc;
3102 }
3103
3104 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3105                               void *encaped, size_t ec_size)
3106 {
3107         int rc = 0;
3108         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3109         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3110
3111         if (ec_size > sizeof(req.encap_request))
3112                 return -1;
3113
3114         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3115
3116         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3117         memcpy(req.encap_request, encaped, ec_size);
3118
3119         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3120
3121         HWRM_CHECK_RESULT();
3122         HWRM_UNLOCK();
3123
3124         return rc;
3125 }
3126
3127 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3128                                        struct ether_addr *mac)
3129 {
3130         struct hwrm_func_qcfg_input req = {0};
3131         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3132         int rc;
3133
3134         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3135
3136         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3137         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3138
3139         HWRM_CHECK_RESULT();
3140
3141         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3142
3143         HWRM_UNLOCK();
3144
3145         return rc;
3146 }
3147
3148 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3149                             void *encaped, size_t ec_size)
3150 {
3151         int rc = 0;
3152         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3153         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3154
3155         if (ec_size > sizeof(req.encap_request))
3156                 return -1;
3157
3158         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3159
3160         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3161         memcpy(req.encap_request, encaped, ec_size);
3162
3163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3164
3165         HWRM_CHECK_RESULT();
3166         HWRM_UNLOCK();
3167
3168         return rc;
3169 }
3170
3171 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3172                          struct rte_eth_stats *stats, uint8_t rx)
3173 {
3174         int rc = 0;
3175         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3176         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3177
3178         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3179
3180         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3181
3182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3183
3184         HWRM_CHECK_RESULT();
3185
3186         if (rx) {
3187                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3188                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3189                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3190                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3191                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3192                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3193                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3194                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3195         } else {
3196                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3197                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3198                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3199                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3200                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3201                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3202                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3203         }
3204
3205
3206         HWRM_UNLOCK();
3207
3208         return rc;
3209 }
3210
3211 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3212 {
3213         struct hwrm_port_qstats_input req = {0};
3214         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3215         struct bnxt_pf_info *pf = &bp->pf;
3216         int rc;
3217
3218         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3219
3220         req.port_id = rte_cpu_to_le_16(pf->port_id);
3221         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3222         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3223         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3224
3225         HWRM_CHECK_RESULT();
3226         HWRM_UNLOCK();
3227
3228         return rc;
3229 }
3230
3231 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3232 {
3233         struct hwrm_port_clr_stats_input req = {0};
3234         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3235         struct bnxt_pf_info *pf = &bp->pf;
3236         int rc;
3237
3238         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3239         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3240             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3241                 return 0;
3242
3243         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3244
3245         req.port_id = rte_cpu_to_le_16(pf->port_id);
3246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3247
3248         HWRM_CHECK_RESULT();
3249         HWRM_UNLOCK();
3250
3251         return rc;
3252 }
3253
3254 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3255 {
3256         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3257         struct hwrm_port_led_qcaps_input req = {0};
3258         int rc;
3259
3260         if (BNXT_VF(bp))
3261                 return 0;
3262
3263         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3264         req.port_id = bp->pf.port_id;
3265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3266
3267         HWRM_CHECK_RESULT();
3268
3269         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3270                 unsigned int i;
3271
3272                 bp->num_leds = resp->num_leds;
3273                 memcpy(bp->leds, &resp->led0_id,
3274                         sizeof(bp->leds[0]) * bp->num_leds);
3275                 for (i = 0; i < bp->num_leds; i++) {
3276                         struct bnxt_led_info *led = &bp->leds[i];
3277
3278                         uint16_t caps = led->led_state_caps;
3279
3280                         if (!led->led_group_id ||
3281                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3282                                 bp->num_leds = 0;
3283                                 break;
3284                         }
3285                 }
3286         }
3287
3288         HWRM_UNLOCK();
3289
3290         return rc;
3291 }
3292
3293 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3294 {
3295         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3296         struct hwrm_port_led_cfg_input req = {0};
3297         struct bnxt_led_cfg *led_cfg;
3298         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3299         uint16_t duration = 0;
3300         int rc, i;
3301
3302         if (!bp->num_leds || BNXT_VF(bp))
3303                 return -EOPNOTSUPP;
3304
3305         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3306
3307         if (led_on) {
3308                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3309                 duration = rte_cpu_to_le_16(500);
3310         }
3311         req.port_id = bp->pf.port_id;
3312         req.num_leds = bp->num_leds;
3313         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3314         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3315                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3316                 led_cfg->led_id = bp->leds[i].led_id;
3317                 led_cfg->led_state = led_state;
3318                 led_cfg->led_blink_on = duration;
3319                 led_cfg->led_blink_off = duration;
3320                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3321         }
3322
3323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3324
3325         HWRM_CHECK_RESULT();
3326         HWRM_UNLOCK();
3327
3328         return rc;
3329 }
3330
3331 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3332                                uint32_t *length)
3333 {
3334         int rc;
3335         struct hwrm_nvm_get_dir_info_input req = {0};
3336         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3337
3338         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3339
3340         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3341
3342         HWRM_CHECK_RESULT();
3343         HWRM_UNLOCK();
3344
3345         if (!rc) {
3346                 *entries = rte_le_to_cpu_32(resp->entries);
3347                 *length = rte_le_to_cpu_32(resp->entry_length);
3348         }
3349         return rc;
3350 }
3351
3352 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3353 {
3354         int rc;
3355         uint32_t dir_entries;
3356         uint32_t entry_length;
3357         uint8_t *buf;
3358         size_t buflen;
3359         rte_iova_t dma_handle;
3360         struct hwrm_nvm_get_dir_entries_input req = {0};
3361         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3362
3363         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3364         if (rc != 0)
3365                 return rc;
3366
3367         *data++ = dir_entries;
3368         *data++ = entry_length;
3369         len -= 2;
3370         memset(data, 0xff, len);
3371
3372         buflen = dir_entries * entry_length;
3373         buf = rte_malloc("nvm_dir", buflen, 0);
3374         rte_mem_lock_page(buf);
3375         if (buf == NULL)
3376                 return -ENOMEM;
3377         dma_handle = rte_mem_virt2iova(buf);
3378         if (dma_handle == 0) {
3379                 PMD_DRV_LOG(ERR,
3380                         "unable to map response address to physical memory\n");
3381                 return -ENOMEM;
3382         }
3383         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3384         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3385         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3386
3387         if (rc == 0)
3388                 memcpy(data, buf, len > buflen ? buflen : len);
3389
3390         rte_free(buf);
3391         HWRM_CHECK_RESULT();
3392         HWRM_UNLOCK();
3393
3394         return rc;
3395 }
3396
3397 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3398                              uint32_t offset, uint32_t length,
3399                              uint8_t *data)
3400 {
3401         int rc;
3402         uint8_t *buf;
3403         rte_iova_t dma_handle;
3404         struct hwrm_nvm_read_input req = {0};
3405         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3406
3407         buf = rte_malloc("nvm_item", length, 0);
3408         rte_mem_lock_page(buf);
3409         if (!buf)
3410                 return -ENOMEM;
3411
3412         dma_handle = rte_mem_virt2iova(buf);
3413         if (dma_handle == 0) {
3414                 PMD_DRV_LOG(ERR,
3415                         "unable to map response address to physical memory\n");
3416                 return -ENOMEM;
3417         }
3418         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3419         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3420         req.dir_idx = rte_cpu_to_le_16(index);
3421         req.offset = rte_cpu_to_le_32(offset);
3422         req.len = rte_cpu_to_le_32(length);
3423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3424         if (rc == 0)
3425                 memcpy(data, buf, length);
3426
3427         rte_free(buf);
3428         HWRM_CHECK_RESULT();
3429         HWRM_UNLOCK();
3430
3431         return rc;
3432 }
3433
3434 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3435 {
3436         int rc;
3437         struct hwrm_nvm_erase_dir_entry_input req = {0};
3438         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3439
3440         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3441         req.dir_idx = rte_cpu_to_le_16(index);
3442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3443         HWRM_CHECK_RESULT();
3444         HWRM_UNLOCK();
3445
3446         return rc;
3447 }
3448
3449
3450 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3451                           uint16_t dir_ordinal, uint16_t dir_ext,
3452                           uint16_t dir_attr, const uint8_t *data,
3453                           size_t data_len)
3454 {
3455         int rc;
3456         struct hwrm_nvm_write_input req = {0};
3457         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3458         rte_iova_t dma_handle;
3459         uint8_t *buf;
3460
3461         buf = rte_malloc("nvm_write", data_len, 0);
3462         rte_mem_lock_page(buf);
3463         if (!buf)
3464                 return -ENOMEM;
3465
3466         dma_handle = rte_mem_virt2iova(buf);
3467         if (dma_handle == 0) {
3468                 PMD_DRV_LOG(ERR,
3469                         "unable to map response address to physical memory\n");
3470                 return -ENOMEM;
3471         }
3472         memcpy(buf, data, data_len);
3473
3474         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3475
3476         req.dir_type = rte_cpu_to_le_16(dir_type);
3477         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3478         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3479         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3480         req.dir_data_length = rte_cpu_to_le_32(data_len);
3481         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3482
3483         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3484
3485         rte_free(buf);
3486         HWRM_CHECK_RESULT();
3487         HWRM_UNLOCK();
3488
3489         return rc;
3490 }
3491
3492 static void
3493 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3494 {
3495         uint32_t *count = cbdata;
3496
3497         *count = *count + 1;
3498 }
3499
3500 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3501                                      struct bnxt_vnic_info *vnic __rte_unused)
3502 {
3503         return 0;
3504 }
3505
3506 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3507 {
3508         uint32_t count = 0;
3509
3510         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3511             &count, bnxt_vnic_count_hwrm_stub);
3512
3513         return count;
3514 }
3515
3516 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3517                                         uint16_t *vnic_ids)
3518 {
3519         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3520         struct hwrm_func_vf_vnic_ids_query_output *resp =
3521                                                 bp->hwrm_cmd_resp_addr;
3522         int rc;
3523
3524         /* First query all VNIC ids */
3525         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3526
3527         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3528         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3529         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3530
3531         if (req.vnic_id_tbl_addr == 0) {
3532                 HWRM_UNLOCK();
3533                 PMD_DRV_LOG(ERR,
3534                 "unable to map VNIC ID table address to physical memory\n");
3535                 return -ENOMEM;
3536         }
3537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3538         if (rc) {
3539                 HWRM_UNLOCK();
3540                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3541                 return -1;
3542         } else if (resp->error_code) {
3543                 rc = rte_le_to_cpu_16(resp->error_code);
3544                 HWRM_UNLOCK();
3545                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3546                 return -1;
3547         }
3548         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3549
3550         HWRM_UNLOCK();
3551
3552         return rc;
3553 }
3554
3555 /*
3556  * This function queries the VNIC IDs  for a specified VF. It then calls
3557  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3558  * Then it calls the hwrm_cb function to program this new vnic configuration.
3559  */
3560 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3561         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3562         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3563 {
3564         struct bnxt_vnic_info vnic;
3565         int rc = 0;
3566         int i, num_vnic_ids;
3567         uint16_t *vnic_ids;
3568         size_t vnic_id_sz;
3569         size_t sz;
3570
3571         /* First query all VNIC ids */
3572         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3573         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3574                         RTE_CACHE_LINE_SIZE);
3575         if (vnic_ids == NULL) {
3576                 rc = -ENOMEM;
3577                 return rc;
3578         }
3579         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3580                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3581
3582         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3583
3584         if (num_vnic_ids < 0)
3585                 return num_vnic_ids;
3586
3587         /* Retrieve VNIC, update bd_stall then update */
3588
3589         for (i = 0; i < num_vnic_ids; i++) {
3590                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3591                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3592                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3593                 if (rc)
3594                         break;
3595                 if (vnic.mru <= 4)      /* Indicates unallocated */
3596                         continue;
3597
3598                 vnic_cb(&vnic, cbdata);
3599
3600                 rc = hwrm_cb(bp, &vnic);
3601                 if (rc)
3602                         break;
3603         }
3604
3605         rte_free(vnic_ids);
3606
3607         return rc;
3608 }
3609
3610 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3611                                               bool on)
3612 {
3613         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3614         struct hwrm_func_cfg_input req = {0};
3615         int rc;
3616
3617         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3618
3619         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3620         req.enables |= rte_cpu_to_le_32(
3621                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3622         req.vlan_antispoof_mode = on ?
3623                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3624                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3625         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3626
3627         HWRM_CHECK_RESULT();
3628         HWRM_UNLOCK();
3629
3630         return rc;
3631 }
3632
3633 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3634 {
3635         struct bnxt_vnic_info vnic;
3636         uint16_t *vnic_ids;
3637         size_t vnic_id_sz;
3638         int num_vnic_ids, i;
3639         size_t sz;
3640         int rc;
3641
3642         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3643         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3644                         RTE_CACHE_LINE_SIZE);
3645         if (vnic_ids == NULL) {
3646                 rc = -ENOMEM;
3647                 return rc;
3648         }
3649
3650         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3651                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3652
3653         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3654         if (rc <= 0)
3655                 goto exit;
3656         num_vnic_ids = rc;
3657
3658         /*
3659          * Loop through to find the default VNIC ID.
3660          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3661          * by sending the hwrm_func_qcfg command to the firmware.
3662          */
3663         for (i = 0; i < num_vnic_ids; i++) {
3664                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3665                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3666                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3667                                         bp->pf.first_vf_id + vf);
3668                 if (rc)
3669                         goto exit;
3670                 if (vnic.func_default) {
3671                         rte_free(vnic_ids);
3672                         return vnic.fw_vnic_id;
3673                 }
3674         }
3675         /* Could not find a default VNIC. */
3676         PMD_DRV_LOG(ERR, "No default VNIC\n");
3677 exit:
3678         rte_free(vnic_ids);
3679         return -1;
3680 }
3681
3682 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3683                          uint16_t dst_id,
3684                          struct bnxt_filter_info *filter)
3685 {
3686         int rc = 0;
3687         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3688         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3689         uint32_t enables = 0;
3690
3691         if (filter->fw_em_filter_id != UINT64_MAX)
3692                 bnxt_hwrm_clear_em_filter(bp, filter);
3693
3694         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3695
3696         req.flags = rte_cpu_to_le_32(filter->flags);
3697
3698         enables = filter->enables |
3699               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3700         req.dst_id = rte_cpu_to_le_16(dst_id);
3701
3702         if (filter->ip_addr_type) {
3703                 req.ip_addr_type = filter->ip_addr_type;
3704                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3705         }
3706         if (enables &
3707             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3708                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3709         if (enables &
3710             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3711                 memcpy(req.src_macaddr, filter->src_macaddr,
3712                        ETHER_ADDR_LEN);
3713         if (enables &
3714             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3715                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3716                        ETHER_ADDR_LEN);
3717         if (enables &
3718             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3719                 req.ovlan_vid = filter->l2_ovlan;
3720         if (enables &
3721             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3722                 req.ivlan_vid = filter->l2_ivlan;
3723         if (enables &
3724             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3725                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3726         if (enables &
3727             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3728                 req.ip_protocol = filter->ip_protocol;
3729         if (enables &
3730             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3731                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3732         if (enables &
3733             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3734                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3735         if (enables &
3736             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3737                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3738         if (enables &
3739             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3740                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3741         if (enables &
3742             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3743                 req.mirror_vnic_id = filter->mirror_vnic_id;
3744
3745         req.enables = rte_cpu_to_le_32(enables);
3746
3747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3748
3749         HWRM_CHECK_RESULT();
3750
3751         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3752         HWRM_UNLOCK();
3753
3754         return rc;
3755 }
3756
3757 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3758 {
3759         int rc = 0;
3760         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3761         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3762
3763         if (filter->fw_em_filter_id == UINT64_MAX)
3764                 return 0;
3765
3766         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3767         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3768
3769         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3770
3771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3772
3773         HWRM_CHECK_RESULT();
3774         HWRM_UNLOCK();
3775
3776         filter->fw_em_filter_id = UINT64_MAX;
3777         filter->fw_l2_filter_id = UINT64_MAX;
3778
3779         return 0;
3780 }
3781
3782 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3783                          uint16_t dst_id,
3784                          struct bnxt_filter_info *filter)
3785 {
3786         int rc = 0;
3787         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3788         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3789                                                 bp->hwrm_cmd_resp_addr;
3790         uint32_t enables = 0;
3791
3792         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3793                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3794
3795         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3796
3797         req.flags = rte_cpu_to_le_32(filter->flags);
3798
3799         enables = filter->enables |
3800               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3801         req.dst_id = rte_cpu_to_le_16(dst_id);
3802
3803
3804         if (filter->ip_addr_type) {
3805                 req.ip_addr_type = filter->ip_addr_type;
3806                 enables |=
3807                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3808         }
3809         if (enables &
3810             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3811                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3812         if (enables &
3813             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3814                 memcpy(req.src_macaddr, filter->src_macaddr,
3815                        ETHER_ADDR_LEN);
3816         //if (enables &
3817             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3818                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3819                        //ETHER_ADDR_LEN);
3820         if (enables &
3821             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3822                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3823         if (enables &
3824             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3825                 req.ip_protocol = filter->ip_protocol;
3826         if (enables &
3827             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3828                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3829         if (enables &
3830             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3831                 req.src_ipaddr_mask[0] =
3832                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3833         if (enables &
3834             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3835                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3836         if (enables &
3837             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3838                 req.dst_ipaddr_mask[0] =
3839                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3840         if (enables &
3841             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3842                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3843         if (enables &
3844             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3845                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3846         if (enables &
3847             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3848                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3849         if (enables &
3850             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3851                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3852         if (enables &
3853             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3854                 req.mirror_vnic_id = filter->mirror_vnic_id;
3855
3856         req.enables = rte_cpu_to_le_32(enables);
3857
3858         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3859
3860         HWRM_CHECK_RESULT();
3861
3862         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3863         HWRM_UNLOCK();
3864
3865         return rc;
3866 }
3867
3868 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3869                                 struct bnxt_filter_info *filter)
3870 {
3871         int rc = 0;
3872         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3873         struct hwrm_cfa_ntuple_filter_free_output *resp =
3874                                                 bp->hwrm_cmd_resp_addr;
3875
3876         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3877                 return 0;
3878
3879         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3880
3881         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3882
3883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3884
3885         HWRM_CHECK_RESULT();
3886         HWRM_UNLOCK();
3887
3888         filter->fw_ntuple_filter_id = UINT64_MAX;
3889
3890         return 0;
3891 }
3892
3893 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3894 {
3895         unsigned int rss_idx, fw_idx, i;
3896
3897         if (vnic->rss_table && vnic->hash_type) {
3898                 /*
3899                  * Fill the RSS hash & redirection table with
3900                  * ring group ids for all VNICs
3901                  */
3902                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3903                         rss_idx++, fw_idx++) {
3904                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3905                                 fw_idx %= bp->rx_cp_nr_rings;
3906                                 if (vnic->fw_grp_ids[fw_idx] !=
3907                                     INVALID_HW_RING_ID)
3908                                         break;
3909                                 fw_idx++;
3910                         }
3911                         if (i == bp->rx_cp_nr_rings)
3912                                 return 0;
3913                         vnic->rss_table[rss_idx] =
3914                                 vnic->fw_grp_ids[fw_idx];
3915                 }
3916                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3917         }
3918         return 0;
3919 }
3920
3921 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3922         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3923 {
3924         uint16_t flags;
3925
3926         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3927
3928         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3929         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3930
3931         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3932         req->num_cmpl_dma_aggr_during_int =
3933                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3934
3935         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3936
3937         /* min timer set to 1/2 of interrupt timer */
3938         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3939
3940         /* buf timer set to 1/4 of interrupt timer */
3941         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3942
3943         req->cmpl_aggr_dma_tmr_during_int =
3944                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3945
3946         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3947                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3948         req->flags = rte_cpu_to_le_16(flags);
3949 }
3950
3951 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3952                         struct bnxt_coal *coal, uint16_t ring_id)
3953 {
3954         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3955         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3956                                                 bp->hwrm_cmd_resp_addr;
3957         int rc;
3958
3959         /* Set ring coalesce parameters only for Stratus 100G NIC */
3960         if (!bnxt_stratus_device(bp))
3961                 return 0;
3962
3963         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3964         bnxt_hwrm_set_coal_params(coal, &req);
3965         req.ring_id = rte_cpu_to_le_16(ring_id);
3966         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3967         HWRM_CHECK_RESULT();
3968         HWRM_UNLOCK();
3969         return 0;
3970 }
3971
3972 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3973 {
3974         struct hwrm_port_qstats_ext_input req = {0};
3975         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3976         struct bnxt_pf_info *pf = &bp->pf;
3977         int rc;
3978
3979         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3980               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3981                 return 0;
3982
3983         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3984
3985         req.port_id = rte_cpu_to_le_16(pf->port_id);
3986         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3987                 req.tx_stat_host_addr =
3988                         rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3989                 req.tx_stat_size =
3990                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3991         }
3992         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3993                 req.rx_stat_host_addr =
3994                         rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3995                 req.rx_stat_size =
3996                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3997         }
3998         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3999
4000         if (rc) {
4001                 bp->fw_rx_port_stats_ext_size = 0;
4002                 bp->fw_tx_port_stats_ext_size = 0;
4003         } else {
4004                 bp->fw_rx_port_stats_ext_size =
4005                         rte_le_to_cpu_16(resp->rx_stat_size);
4006                 bp->fw_tx_port_stats_ext_size =
4007                         rte_le_to_cpu_16(resp->tx_stat_size);
4008         }
4009
4010         HWRM_CHECK_RESULT();
4011         HWRM_UNLOCK();
4012
4013         return rc;
4014 }