net/bnxt: use dedicated CPR for async events
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32 #define HWRM_VERSION_1_9_2              0x10903
33
34 struct bnxt_plcmodes_cfg {
35         uint32_t        flags;
36         uint16_t        jumbo_thresh;
37         uint16_t        hds_offset;
38         uint16_t        hds_threshold;
39 };
40
41 static int page_getenum(size_t size)
42 {
43         if (size <= 1 << 4)
44                 return 4;
45         if (size <= 1 << 12)
46                 return 12;
47         if (size <= 1 << 13)
48                 return 13;
49         if (size <= 1 << 16)
50                 return 16;
51         if (size <= 1 << 21)
52                 return 21;
53         if (size <= 1 << 22)
54                 return 22;
55         if (size <= 1 << 30)
56                 return 30;
57         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
58         return sizeof(void *) * 8 - 1;
59 }
60
61 static int page_roundup(size_t size)
62 {
63         return 1 << page_getenum(size);
64 }
65
66 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
67                                   uint8_t *pg_attr,
68                                   uint64_t *pg_dir)
69 {
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 /*
79  * HWRM Functions (sent to HWRM)
80  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
81  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
82  * command was failed by the ChiMP.
83  */
84
85 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
86                                   uint32_t msg_len, bool use_kong_mb)
87 {
88         unsigned int i;
89         struct input *req = msg;
90         struct output *resp = bp->hwrm_cmd_resp_addr;
91         uint32_t *data = msg;
92         uint8_t *bar;
93         uint8_t *valid;
94         uint16_t max_req_len = bp->max_req_len;
95         struct hwrm_short_input short_input = { 0 };
96         uint16_t bar_offset = use_kong_mb ?
97                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
98         uint16_t mb_trigger_offset = use_kong_mb ?
99                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
100
101         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
102             msg_len > bp->max_req_len) {
103                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
104
105                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
106                 memcpy(short_cmd_req, req, msg_len);
107
108                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
109                 short_input.signature = rte_cpu_to_le_16(
110                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
111                 short_input.size = rte_cpu_to_le_16(msg_len);
112                 short_input.req_addr =
113                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
114
115                 data = (uint32_t *)&short_input;
116                 msg_len = sizeof(short_input);
117
118                 /* Sync memory write before updating doorbell */
119                 rte_wmb();
120
121                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
122         }
123
124         /* Write request msg to hwrm channel */
125         for (i = 0; i < msg_len; i += 4) {
126                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
127                 rte_write32(*data, bar);
128                 data++;
129         }
130
131         /* Zero the rest of the request space */
132         for (; i < max_req_len; i += 4) {
133                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
134                 rte_write32(0, bar);
135         }
136
137         /* Ring channel doorbell */
138         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
139         rte_write32(1, bar);
140
141         /* Poll for the valid bit */
142         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
143                 /* Sanity check on the resp->resp_len */
144                 rte_rmb();
145                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
146                         /* Last byte of resp contains the valid key */
147                         valid = (uint8_t *)resp + resp->resp_len - 1;
148                         if (*valid == HWRM_RESP_VALID_KEY)
149                                 break;
150                 }
151                 rte_delay_us(1);
152         }
153
154         if (i >= HWRM_CMD_TIMEOUT) {
155                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
156                             req->req_type);
157                 return -ETIMEDOUT;
158         }
159         return 0;
160 }
161
162 /*
163  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
164  * spinlock, and does initial processing.
165  *
166  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
167  * releases the spinlock only if it returns.  If the regular int return codes
168  * are not used by the function, HWRM_CHECK_RESULT() should not be used
169  * directly, rather it should be copied and modified to suit the function.
170  *
171  * HWRM_UNLOCK() must be called after all response processing is completed.
172  */
173 #define HWRM_PREP(req, type, kong) do { \
174         rte_spinlock_lock(&bp->hwrm_lock); \
175         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
176         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
177         req.cmpl_ring = rte_cpu_to_le_16(-1); \
178         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
179                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
180         req.target_id = rte_cpu_to_le_16(0xffff); \
181         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
182 } while (0)
183
184 #define HWRM_CHECK_RESULT_SILENT() do {\
185         if (rc) { \
186                 rte_spinlock_unlock(&bp->hwrm_lock); \
187                 return rc; \
188         } \
189         if (resp->error_code) { \
190                 rc = rte_le_to_cpu_16(resp->error_code); \
191                 rte_spinlock_unlock(&bp->hwrm_lock); \
192                 return rc; \
193         } \
194 } while (0)
195
196 #define HWRM_CHECK_RESULT() do {\
197         if (rc) { \
198                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
201                         rc = -EACCES; \
202                 else if (rc > 0) \
203                         rc = -EINVAL; \
204                 return rc; \
205         } \
206         if (resp->error_code) { \
207                 rc = rte_le_to_cpu_16(resp->error_code); \
208                 if (resp->resp_len >= 16) { \
209                         struct hwrm_err_output *tmp_hwrm_err_op = \
210                                                 (void *)resp; \
211                         PMD_DRV_LOG(ERR, \
212                                 "error %d:%d:%08x:%04x\n", \
213                                 rc, tmp_hwrm_err_op->cmd_err, \
214                                 rte_le_to_cpu_32(\
215                                         tmp_hwrm_err_op->opaque_0), \
216                                 rte_le_to_cpu_16(\
217                                         tmp_hwrm_err_op->opaque_1)); \
218                 } else { \
219                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
220                 } \
221                 rte_spinlock_unlock(&bp->hwrm_lock); \
222                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
223                         rc = -EACCES; \
224                 else if (rc > 0) \
225                         rc = -EINVAL; \
226                 return rc; \
227         } \
228 } while (0)
229
230 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
231
232 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
233 {
234         int rc = 0;
235         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
236         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
237
238         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
239         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
240         req.mask = 0;
241
242         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
243
244         HWRM_CHECK_RESULT();
245         HWRM_UNLOCK();
246
247         return rc;
248 }
249
250 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
251                                  struct bnxt_vnic_info *vnic,
252                                  uint16_t vlan_count,
253                                  struct bnxt_vlan_table_entry *vlan_table)
254 {
255         int rc = 0;
256         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
257         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
258         uint32_t mask = 0;
259
260         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
261                 return rc;
262
263         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
264         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
265
266         /* FIXME add multicast flag, when multicast adding options is supported
267          * by ethtool.
268          */
269         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
271         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
273         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
274                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
275         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
276                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
277         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
278                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
279         if (vnic->mc_addr_cnt) {
280                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
281                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
282                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
283         }
284         if (vlan_table) {
285                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
286                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
287                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
288                          rte_mem_virt2iova(vlan_table));
289                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
290         }
291         req.mask = rte_cpu_to_le_32(mask);
292
293         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
294
295         HWRM_CHECK_RESULT();
296         HWRM_UNLOCK();
297
298         return rc;
299 }
300
301 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
302                         uint16_t vlan_count,
303                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
304 {
305         int rc = 0;
306         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
307         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
308                                                 bp->hwrm_cmd_resp_addr;
309
310         /*
311          * Older HWRM versions did not support this command, and the set_rx_mask
312          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
313          * removed from set_rx_mask call, and this command was added.
314          *
315          * This command is also present from 1.7.8.11 and higher,
316          * as well as 1.7.8.0
317          */
318         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
319                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
320                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
321                                         (11)))
322                                 return 0;
323                 }
324         }
325         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
326         req.fid = rte_cpu_to_le_16(fid);
327
328         req.vlan_tag_mask_tbl_addr =
329                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
330         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
331
332         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
333
334         HWRM_CHECK_RESULT();
335         HWRM_UNLOCK();
336
337         return rc;
338 }
339
340 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
341                            struct bnxt_filter_info *filter)
342 {
343         int rc = 0;
344         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
345         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
346
347         if (filter->fw_l2_filter_id == UINT64_MAX)
348                 return 0;
349
350         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
351
352         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
353
354         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
355
356         HWRM_CHECK_RESULT();
357         HWRM_UNLOCK();
358
359         filter->fw_l2_filter_id = UINT64_MAX;
360
361         return 0;
362 }
363
364 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
365                          uint16_t dst_id,
366                          struct bnxt_filter_info *filter)
367 {
368         int rc = 0;
369         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
370         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
371         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
372         const struct rte_eth_vmdq_rx_conf *conf =
373                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
374         uint32_t enables = 0;
375         uint16_t j = dst_id - 1;
376
377         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
378         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
379             conf->pool_map[j].pools & (1UL << j)) {
380                 PMD_DRV_LOG(DEBUG,
381                         "Add vlan %u to vmdq pool %u\n",
382                         conf->pool_map[j].vlan_id, j);
383
384                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
385                 filter->enables |=
386                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
387                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
388         }
389
390         if (filter->fw_l2_filter_id != UINT64_MAX)
391                 bnxt_hwrm_clear_l2_filter(bp, filter);
392
393         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
394
395         req.flags = rte_cpu_to_le_32(filter->flags);
396         req.flags |=
397         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
398
399         enables = filter->enables |
400               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
401         req.dst_id = rte_cpu_to_le_16(dst_id);
402
403         if (enables &
404             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
405                 memcpy(req.l2_addr, filter->l2_addr,
406                        RTE_ETHER_ADDR_LEN);
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
409                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
410                        RTE_ETHER_ADDR_LEN);
411         if (enables &
412             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
413                 req.l2_ovlan = filter->l2_ovlan;
414         if (enables &
415             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
416                 req.l2_ivlan = filter->l2_ivlan;
417         if (enables &
418             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
419                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
420         if (enables &
421             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
422                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
423         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
424                 req.src_id = rte_cpu_to_le_32(filter->src_id);
425         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
426                 req.src_type = filter->src_type;
427
428         req.enables = rte_cpu_to_le_32(enables);
429
430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
431
432         HWRM_CHECK_RESULT();
433
434         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
435         HWRM_UNLOCK();
436
437         return rc;
438 }
439
440 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
441 {
442         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
443         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
444         uint32_t flags = 0;
445         int rc;
446
447         if (!ptp)
448                 return 0;
449
450         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
451
452         if (ptp->rx_filter)
453                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
454         else
455                 flags |=
456                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
457         if (ptp->tx_tstamp_en)
458                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
459         else
460                 flags |=
461                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
462         req.flags = rte_cpu_to_le_32(flags);
463         req.enables = rte_cpu_to_le_32
464                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
465         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
466
467         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
468         HWRM_UNLOCK();
469
470         return rc;
471 }
472
473 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
474 {
475         int rc = 0;
476         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
477         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
478         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
479
480 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
481         if (ptp)
482                 return 0;
483
484         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
485
486         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
487
488         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
489
490         HWRM_CHECK_RESULT();
491
492         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
493                 return 0;
494
495         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
496         if (!ptp)
497                 return -ENOMEM;
498
499         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
500                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
501         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
502                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
503         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
504                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
505         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
506                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
507         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
508                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
509         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
510                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
511         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
512                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
513         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
514                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
515         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
516                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
517
518         ptp->bp = bp;
519         bp->ptp_cfg = ptp;
520
521         return 0;
522 }
523
524 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
525 {
526         int rc = 0;
527         struct hwrm_func_qcaps_input req = {.req_type = 0 };
528         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
529         uint16_t new_max_vfs;
530         uint32_t flags;
531         int i;
532
533         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
534
535         req.fid = rte_cpu_to_le_16(0xffff);
536
537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
538
539         HWRM_CHECK_RESULT();
540
541         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
542         flags = rte_le_to_cpu_32(resp->flags);
543         if (BNXT_PF(bp)) {
544                 bp->pf.port_id = resp->port_id;
545                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
546                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
547                 new_max_vfs = bp->pdev->max_vfs;
548                 if (new_max_vfs != bp->pf.max_vfs) {
549                         if (bp->pf.vf_info)
550                                 rte_free(bp->pf.vf_info);
551                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
552                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
553                         bp->pf.max_vfs = new_max_vfs;
554                         for (i = 0; i < new_max_vfs; i++) {
555                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
556                                 bp->pf.vf_info[i].vlan_table =
557                                         rte_zmalloc("VF VLAN table",
558                                                     getpagesize(),
559                                                     getpagesize());
560                                 if (bp->pf.vf_info[i].vlan_table == NULL)
561                                         PMD_DRV_LOG(ERR,
562                                         "Fail to alloc VLAN table for VF %d\n",
563                                         i);
564                                 else
565                                         rte_mem_lock_page(
566                                                 bp->pf.vf_info[i].vlan_table);
567                                 bp->pf.vf_info[i].vlan_as_table =
568                                         rte_zmalloc("VF VLAN AS table",
569                                                     getpagesize(),
570                                                     getpagesize());
571                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
572                                         PMD_DRV_LOG(ERR,
573                                         "Alloc VLAN AS table for VF %d fail\n",
574                                         i);
575                                 else
576                                         rte_mem_lock_page(
577                                                bp->pf.vf_info[i].vlan_as_table);
578                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
579                         }
580                 }
581         }
582
583         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
584         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
585         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
586         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
587         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
588         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
589         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
590         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
591         bp->max_l2_ctx =
592                 rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows;
593         /* TODO: For now, do not support VMDq/RFS on VFs. */
594         if (BNXT_PF(bp)) {
595                 if (bp->pf.max_vfs)
596                         bp->max_vnics = 1;
597                 else
598                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
599         } else {
600                 bp->max_vnics = 1;
601         }
602         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
603         if (BNXT_PF(bp)) {
604                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
605                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
606                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
607                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
608                         HWRM_UNLOCK();
609                         bnxt_hwrm_ptp_qcfg(bp);
610                 }
611         }
612
613         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
614                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
615
616         HWRM_UNLOCK();
617
618         return rc;
619 }
620
621 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
622 {
623         int rc;
624
625         rc = __bnxt_hwrm_func_qcaps(bp);
626         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
627                 rc = bnxt_alloc_ctx_mem(bp);
628                 if (rc)
629                         return rc;
630
631                 rc = bnxt_hwrm_func_resc_qcaps(bp);
632                 if (!rc)
633                         bp->flags |= BNXT_FLAG_NEW_RM;
634         }
635
636         return rc;
637 }
638
639 int bnxt_hwrm_func_reset(struct bnxt *bp)
640 {
641         int rc = 0;
642         struct hwrm_func_reset_input req = {.req_type = 0 };
643         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
644
645         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
646
647         req.enables = rte_cpu_to_le_32(0);
648
649         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
650
651         HWRM_CHECK_RESULT();
652         HWRM_UNLOCK();
653
654         return rc;
655 }
656
657 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
658 {
659         int rc;
660         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
661         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
662
663         if (bp->flags & BNXT_FLAG_REGISTERED)
664                 return 0;
665
666         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
667         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
668                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
669         req.ver_maj = RTE_VER_YEAR;
670         req.ver_min = RTE_VER_MONTH;
671         req.ver_upd = RTE_VER_MINOR;
672
673         if (BNXT_PF(bp)) {
674                 req.enables |= rte_cpu_to_le_32(
675                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
676                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
677                        RTE_MIN(sizeof(req.vf_req_fwd),
678                                sizeof(bp->pf.vf_req_fwd)));
679
680                 /*
681                  * PF can sniff HWRM API issued by VF. This can be set up by
682                  * linux driver and inherited by the DPDK PF driver. Clear
683                  * this HWRM sniffer list in FW because DPDK PF driver does
684                  * not support this.
685                  */
686                 req.flags =
687                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
688         }
689
690         req.async_event_fwd[0] |=
691                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
692                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
693                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
694         req.async_event_fwd[1] |=
695                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
696                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
697
698         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
699
700         HWRM_CHECK_RESULT();
701         HWRM_UNLOCK();
702
703         bp->flags |= BNXT_FLAG_REGISTERED;
704
705         return rc;
706 }
707
708 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
709 {
710         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
711                 return 0;
712
713         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
714 }
715
716 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
717 {
718         int rc;
719         uint32_t flags = 0;
720         uint32_t enables;
721         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
722         struct hwrm_func_vf_cfg_input req = {0};
723
724         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
725
726         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
727                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
728                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
729                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
730                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
731
732         if (BNXT_HAS_RING_GRPS(bp)) {
733                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
734                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
735         }
736
737         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
738         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
739                                             AGG_RING_MULTIPLIER);
740         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
741                                              bp->tx_nr_rings +
742                                              BNXT_NUM_ASYNC_CPR(bp));
743         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
744                                               bp->tx_nr_rings +
745                                               BNXT_NUM_ASYNC_CPR(bp));
746         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
747         if (bp->vf_resv_strategy ==
748             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
749                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
750                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
751                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
752                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
753                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
754                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
755         }
756
757         if (test)
758                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
759                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
760                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
761                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
762                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
763                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
764
765         if (test && BNXT_HAS_RING_GRPS(bp))
766                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
767
768         req.flags = rte_cpu_to_le_32(flags);
769         req.enables |= rte_cpu_to_le_32(enables);
770
771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
772
773         if (test)
774                 HWRM_CHECK_RESULT_SILENT();
775         else
776                 HWRM_CHECK_RESULT();
777
778         HWRM_UNLOCK();
779         return rc;
780 }
781
782 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
783 {
784         int rc;
785         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
786         struct hwrm_func_resource_qcaps_input req = {0};
787
788         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
789         req.fid = rte_cpu_to_le_16(0xffff);
790
791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
792
793         HWRM_CHECK_RESULT();
794
795         if (BNXT_VF(bp)) {
796                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
797                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
798                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
799                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
800                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
801                 /* func_resource_qcaps does not return max_rx_em_flows.
802                  * So use the value provided by func_qcaps.
803                  */
804                 bp->max_l2_ctx =
805                         rte_le_to_cpu_16(resp->max_l2_ctxs) +
806                         bp->max_rx_em_flows;
807                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
808                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
809         }
810         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
811         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
812         if (bp->vf_resv_strategy >
813             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
814                 bp->vf_resv_strategy =
815                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
816
817         HWRM_UNLOCK();
818         return rc;
819 }
820
821 int bnxt_hwrm_ver_get(struct bnxt *bp)
822 {
823         int rc = 0;
824         struct hwrm_ver_get_input req = {.req_type = 0 };
825         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
826         uint32_t fw_version;
827         uint16_t max_resp_len;
828         char type[RTE_MEMZONE_NAMESIZE];
829         uint32_t dev_caps_cfg;
830
831         bp->max_req_len = HWRM_MAX_REQ_LEN;
832         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
833
834         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
835         req.hwrm_intf_min = HWRM_VERSION_MINOR;
836         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
837
838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
839
840         HWRM_CHECK_RESULT();
841
842         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
843                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
844                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
845                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
846         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
847                      (resp->hwrm_fw_min_8b << 16) |
848                      (resp->hwrm_fw_bld_8b << 8) |
849                      resp->hwrm_fw_rsvd_8b;
850         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
851                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
852
853         fw_version = resp->hwrm_intf_maj_8b << 16;
854         fw_version |= resp->hwrm_intf_min_8b << 8;
855         fw_version |= resp->hwrm_intf_upd_8b;
856         bp->hwrm_spec_code = fw_version;
857
858         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
859                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
860                 rc = -EINVAL;
861                 goto error;
862         }
863
864         if (bp->max_req_len > resp->max_req_win_len) {
865                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
866                 rc = -EINVAL;
867         }
868         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
869         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
870         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
871                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
872
873         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
874         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
875
876         if (bp->max_resp_len != max_resp_len) {
877                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
878                         bp->pdev->addr.domain, bp->pdev->addr.bus,
879                         bp->pdev->addr.devid, bp->pdev->addr.function);
880
881                 rte_free(bp->hwrm_cmd_resp_addr);
882
883                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
884                 if (bp->hwrm_cmd_resp_addr == NULL) {
885                         rc = -ENOMEM;
886                         goto error;
887                 }
888                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
889                 bp->hwrm_cmd_resp_dma_addr =
890                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
891                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
892                         PMD_DRV_LOG(ERR,
893                         "Unable to map response buffer to physical memory.\n");
894                         rc = -ENOMEM;
895                         goto error;
896                 }
897                 bp->max_resp_len = max_resp_len;
898         }
899
900         if ((dev_caps_cfg &
901                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
902             (dev_caps_cfg &
903              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
904                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
905                 bp->flags |= BNXT_FLAG_SHORT_CMD;
906         }
907
908         if (((dev_caps_cfg &
909               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
910              (dev_caps_cfg &
911               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
912             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
913                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
914                         bp->pdev->addr.domain, bp->pdev->addr.bus,
915                         bp->pdev->addr.devid, bp->pdev->addr.function);
916
917                 rte_free(bp->hwrm_short_cmd_req_addr);
918
919                 bp->hwrm_short_cmd_req_addr =
920                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
921                 if (bp->hwrm_short_cmd_req_addr == NULL) {
922                         rc = -ENOMEM;
923                         goto error;
924                 }
925                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
926                 bp->hwrm_short_cmd_req_dma_addr =
927                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
928                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
929                         rte_free(bp->hwrm_short_cmd_req_addr);
930                         PMD_DRV_LOG(ERR,
931                                 "Unable to map buffer to physical memory.\n");
932                         rc = -ENOMEM;
933                         goto error;
934                 }
935         }
936         if (dev_caps_cfg &
937             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
938                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
939                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
940         }
941         if (dev_caps_cfg &
942             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
943                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
944
945 error:
946         HWRM_UNLOCK();
947         return rc;
948 }
949
950 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
951 {
952         int rc;
953         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
954         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
955
956         if (!(bp->flags & BNXT_FLAG_REGISTERED))
957                 return 0;
958
959         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
960         req.flags = flags;
961
962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
963
964         HWRM_CHECK_RESULT();
965         HWRM_UNLOCK();
966
967         bp->flags &= ~BNXT_FLAG_REGISTERED;
968
969         return rc;
970 }
971
972 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
973 {
974         int rc = 0;
975         struct hwrm_port_phy_cfg_input req = {0};
976         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
977         uint32_t enables = 0;
978
979         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
980
981         if (conf->link_up) {
982                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
983                 if (bp->link_info.auto_mode && conf->link_speed) {
984                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
985                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
986                 }
987
988                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
989                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
990                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
991                 /*
992                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
993                  * any auto mode, even "none".
994                  */
995                 if (!conf->link_speed) {
996                         /* No speeds specified. Enable AutoNeg - all speeds */
997                         req.auto_mode =
998                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
999                 }
1000                 /* AutoNeg - Advertise speeds specified. */
1001                 if (conf->auto_link_speed_mask &&
1002                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1003                         req.auto_mode =
1004                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1005                         req.auto_link_speed_mask =
1006                                 conf->auto_link_speed_mask;
1007                         enables |=
1008                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1009                 }
1010
1011                 req.auto_duplex = conf->duplex;
1012                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1013                 req.auto_pause = conf->auto_pause;
1014                 req.force_pause = conf->force_pause;
1015                 /* Set force_pause if there is no auto or if there is a force */
1016                 if (req.auto_pause && !req.force_pause)
1017                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1018                 else
1019                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1020
1021                 req.enables = rte_cpu_to_le_32(enables);
1022         } else {
1023                 req.flags =
1024                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1025                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1026         }
1027
1028         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1029
1030         HWRM_CHECK_RESULT();
1031         HWRM_UNLOCK();
1032
1033         return rc;
1034 }
1035
1036 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1037                                    struct bnxt_link_info *link_info)
1038 {
1039         int rc = 0;
1040         struct hwrm_port_phy_qcfg_input req = {0};
1041         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1042
1043         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1044
1045         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1046
1047         HWRM_CHECK_RESULT();
1048
1049         link_info->phy_link_status = resp->link;
1050         link_info->link_up =
1051                 (link_info->phy_link_status ==
1052                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1053         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1054         link_info->duplex = resp->duplex_cfg;
1055         link_info->pause = resp->pause;
1056         link_info->auto_pause = resp->auto_pause;
1057         link_info->force_pause = resp->force_pause;
1058         link_info->auto_mode = resp->auto_mode;
1059         link_info->phy_type = resp->phy_type;
1060         link_info->media_type = resp->media_type;
1061
1062         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1063         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1064         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1065         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1066         link_info->phy_ver[0] = resp->phy_maj;
1067         link_info->phy_ver[1] = resp->phy_min;
1068         link_info->phy_ver[2] = resp->phy_bld;
1069
1070         HWRM_UNLOCK();
1071
1072         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1073         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1074         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1075         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1076         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1077                     link_info->auto_link_speed_mask);
1078         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1079                     link_info->force_link_speed);
1080
1081         return rc;
1082 }
1083
1084 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1085 {
1086         int rc = 0;
1087         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1088         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1089         int i;
1090
1091         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1092
1093         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1094         /* HWRM Version >= 1.9.1 */
1095         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1096                 req.drv_qmap_cap =
1097                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1099
1100         HWRM_CHECK_RESULT();
1101
1102 #define GET_QUEUE_INFO(x) \
1103         bp->cos_queue[x].id = resp->queue_id##x; \
1104         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1105
1106         GET_QUEUE_INFO(0);
1107         GET_QUEUE_INFO(1);
1108         GET_QUEUE_INFO(2);
1109         GET_QUEUE_INFO(3);
1110         GET_QUEUE_INFO(4);
1111         GET_QUEUE_INFO(5);
1112         GET_QUEUE_INFO(6);
1113         GET_QUEUE_INFO(7);
1114
1115         HWRM_UNLOCK();
1116
1117         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1118                 bp->tx_cosq_id = bp->cos_queue[0].id;
1119         } else {
1120                 /* iterate and find the COSq profile to use for Tx */
1121                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1122                         if (bp->cos_queue[i].profile ==
1123                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1124                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1125                                 break;
1126                         }
1127                 }
1128         }
1129
1130         bp->max_tc = resp->max_configurable_queues;
1131         bp->max_lltc = resp->max_configurable_lossless_queues;
1132         if (bp->max_tc > BNXT_MAX_QUEUE)
1133                 bp->max_tc = BNXT_MAX_QUEUE;
1134         bp->max_q = bp->max_tc;
1135
1136         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1137
1138         return rc;
1139 }
1140
1141 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1142                          struct bnxt_ring *ring,
1143                          uint32_t ring_type, uint32_t map_index,
1144                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1145 {
1146         int rc = 0;
1147         uint32_t enables = 0;
1148         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1149         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1150         struct rte_mempool *mb_pool;
1151         uint16_t rx_buf_size;
1152
1153         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1154
1155         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1156         req.fbo = rte_cpu_to_le_32(0);
1157         /* Association of ring index with doorbell index */
1158         req.logical_id = rte_cpu_to_le_16(map_index);
1159         req.length = rte_cpu_to_le_32(ring->ring_size);
1160
1161         switch (ring_type) {
1162         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1163                 req.ring_type = ring_type;
1164                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1165                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1166                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1167                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1168                         enables |=
1169                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1170                 break;
1171         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1172                 req.ring_type = ring_type;
1173                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1174                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1175                 if (BNXT_CHIP_THOR(bp)) {
1176                         mb_pool = bp->rx_queues[0]->mb_pool;
1177                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1178                                       RTE_PKTMBUF_HEADROOM;
1179                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1180                         enables |=
1181                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1182                 }
1183                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1184                         enables |=
1185                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1186                 break;
1187         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1188                 req.ring_type = ring_type;
1189                 if (BNXT_HAS_NQ(bp)) {
1190                         /* Association of cp ring with nq */
1191                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1192                         enables |=
1193                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1194                 }
1195                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1196                 break;
1197         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1198                 req.ring_type = ring_type;
1199                 req.page_size = BNXT_PAGE_SHFT;
1200                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1201                 break;
1202         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1203                 req.ring_type = ring_type;
1204                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1205
1206                 mb_pool = bp->rx_queues[0]->mb_pool;
1207                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1208                               RTE_PKTMBUF_HEADROOM;
1209                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1210
1211                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1212                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1213                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1214                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1215                 break;
1216         default:
1217                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1218                         ring_type);
1219                 HWRM_UNLOCK();
1220                 return -EINVAL;
1221         }
1222         req.enables = rte_cpu_to_le_32(enables);
1223
1224         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1225
1226         if (rc || resp->error_code) {
1227                 if (rc == 0 && resp->error_code)
1228                         rc = rte_le_to_cpu_16(resp->error_code);
1229                 switch (ring_type) {
1230                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1231                         PMD_DRV_LOG(ERR,
1232                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1233                         HWRM_UNLOCK();
1234                         return rc;
1235                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1236                         PMD_DRV_LOG(ERR,
1237                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1238                         HWRM_UNLOCK();
1239                         return rc;
1240                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1241                         PMD_DRV_LOG(ERR,
1242                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1243                                     rc);
1244                         HWRM_UNLOCK();
1245                         return rc;
1246                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1247                         PMD_DRV_LOG(ERR,
1248                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1249                         HWRM_UNLOCK();
1250                         return rc;
1251                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1252                         PMD_DRV_LOG(ERR,
1253                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1254                         HWRM_UNLOCK();
1255                         return rc;
1256                 default:
1257                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1258                         HWRM_UNLOCK();
1259                         return rc;
1260                 }
1261         }
1262
1263         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1264         HWRM_UNLOCK();
1265         return rc;
1266 }
1267
1268 int bnxt_hwrm_ring_free(struct bnxt *bp,
1269                         struct bnxt_ring *ring, uint32_t ring_type)
1270 {
1271         int rc;
1272         struct hwrm_ring_free_input req = {.req_type = 0 };
1273         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1274
1275         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1276
1277         req.ring_type = ring_type;
1278         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1279
1280         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1281
1282         if (rc || resp->error_code) {
1283                 if (rc == 0 && resp->error_code)
1284                         rc = rte_le_to_cpu_16(resp->error_code);
1285                 HWRM_UNLOCK();
1286
1287                 switch (ring_type) {
1288                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1289                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1290                                 rc);
1291                         return rc;
1292                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1293                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1294                                 rc);
1295                         return rc;
1296                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1297                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1298                                 rc);
1299                         return rc;
1300                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1301                         PMD_DRV_LOG(ERR,
1302                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1303                         return rc;
1304                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1305                         PMD_DRV_LOG(ERR,
1306                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1307                         return rc;
1308                 default:
1309                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1310                         return rc;
1311                 }
1312         }
1313         HWRM_UNLOCK();
1314         return 0;
1315 }
1316
1317 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1318 {
1319         int rc = 0;
1320         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1321         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1322
1323         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1324
1325         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1326         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1327         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1328         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1329
1330         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1331
1332         HWRM_CHECK_RESULT();
1333
1334         bp->grp_info[idx].fw_grp_id =
1335             rte_le_to_cpu_16(resp->ring_group_id);
1336
1337         HWRM_UNLOCK();
1338
1339         return rc;
1340 }
1341
1342 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1343 {
1344         int rc;
1345         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1346         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1347
1348         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1349
1350         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1351
1352         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1353
1354         HWRM_CHECK_RESULT();
1355         HWRM_UNLOCK();
1356
1357         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1358         return rc;
1359 }
1360
1361 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1362 {
1363         int rc = 0;
1364         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1365         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1366
1367         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1368                 return rc;
1369
1370         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1371
1372         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1373
1374         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1375
1376         HWRM_CHECK_RESULT();
1377         HWRM_UNLOCK();
1378
1379         return rc;
1380 }
1381
1382 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1383                                 unsigned int idx __rte_unused)
1384 {
1385         int rc;
1386         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1387         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1388
1389         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1390
1391         req.update_period_ms = rte_cpu_to_le_32(0);
1392
1393         req.stats_dma_addr =
1394             rte_cpu_to_le_64(cpr->hw_stats_map);
1395
1396         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1397
1398         HWRM_CHECK_RESULT();
1399
1400         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1401
1402         HWRM_UNLOCK();
1403
1404         return rc;
1405 }
1406
1407 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1408                                 unsigned int idx __rte_unused)
1409 {
1410         int rc;
1411         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1412         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1413
1414         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1415
1416         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1417
1418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1419
1420         HWRM_CHECK_RESULT();
1421         HWRM_UNLOCK();
1422
1423         return rc;
1424 }
1425
1426 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1427 {
1428         int rc = 0, i, j;
1429         struct hwrm_vnic_alloc_input req = { 0 };
1430         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1431
1432         if (!BNXT_HAS_RING_GRPS(bp))
1433                 goto skip_ring_grps;
1434
1435         /* map ring groups to this vnic */
1436         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1437                 vnic->start_grp_id, vnic->end_grp_id);
1438         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1439                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1440
1441         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1442         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1443         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1444         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1445
1446 skip_ring_grps:
1447         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1448                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1449         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1450
1451         if (vnic->func_default)
1452                 req.flags =
1453                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1455
1456         HWRM_CHECK_RESULT();
1457
1458         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1459         HWRM_UNLOCK();
1460         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1461         return rc;
1462 }
1463
1464 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1465                                         struct bnxt_vnic_info *vnic,
1466                                         struct bnxt_plcmodes_cfg *pmode)
1467 {
1468         int rc = 0;
1469         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1470         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1471
1472         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1473
1474         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1475
1476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1477
1478         HWRM_CHECK_RESULT();
1479
1480         pmode->flags = rte_le_to_cpu_32(resp->flags);
1481         /* dflt_vnic bit doesn't exist in the _cfg command */
1482         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1483         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1484         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1485         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1486
1487         HWRM_UNLOCK();
1488
1489         return rc;
1490 }
1491
1492 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1493                                        struct bnxt_vnic_info *vnic,
1494                                        struct bnxt_plcmodes_cfg *pmode)
1495 {
1496         int rc = 0;
1497         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1498         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1499
1500         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1501                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1502                 return rc;
1503         }
1504
1505         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1506
1507         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1508         req.flags = rte_cpu_to_le_32(pmode->flags);
1509         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1510         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1511         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1512         req.enables = rte_cpu_to_le_32(
1513             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1514             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1515             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1516         );
1517
1518         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1519
1520         HWRM_CHECK_RESULT();
1521         HWRM_UNLOCK();
1522
1523         return rc;
1524 }
1525
1526 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1527 {
1528         int rc = 0;
1529         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1530         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1531         struct bnxt_plcmodes_cfg pmodes = { 0 };
1532         uint32_t ctx_enable_flag = 0;
1533         uint32_t enables = 0;
1534
1535         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1536                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1537                 return rc;
1538         }
1539
1540         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1541         if (rc)
1542                 return rc;
1543
1544         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1545
1546         if (BNXT_CHIP_THOR(bp)) {
1547                 struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
1548                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1549                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1550
1551                 req.default_rx_ring_id =
1552                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1553                 req.default_cmpl_ring_id =
1554                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1555                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1556                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1557                 goto config_mru;
1558         }
1559
1560         /* Only RSS support for now TBD: COS & LB */
1561         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1562         if (vnic->lb_rule != 0xffff)
1563                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1564         if (vnic->cos_rule != 0xffff)
1565                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1566         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1567                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1568                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1569         }
1570         enables |= ctx_enable_flag;
1571         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1572         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1573         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1574         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1575
1576 config_mru:
1577         req.enables = rte_cpu_to_le_32(enables);
1578         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1579         req.mru = rte_cpu_to_le_16(vnic->mru);
1580         /* Configure default VNIC only once. */
1581         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1582                 req.flags |=
1583                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1584                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1585         }
1586         if (vnic->vlan_strip)
1587                 req.flags |=
1588                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1589         if (vnic->bd_stall)
1590                 req.flags |=
1591                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1592         if (vnic->roce_dual)
1593                 req.flags |= rte_cpu_to_le_32(
1594                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1595         if (vnic->roce_only)
1596                 req.flags |= rte_cpu_to_le_32(
1597                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1598         if (vnic->rss_dflt_cr)
1599                 req.flags |= rte_cpu_to_le_32(
1600                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1601
1602         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1603
1604         HWRM_CHECK_RESULT();
1605         HWRM_UNLOCK();
1606
1607         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1608
1609         return rc;
1610 }
1611
1612 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1613                 int16_t fw_vf_id)
1614 {
1615         int rc = 0;
1616         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1617         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1618
1619         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1620                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1621                 return rc;
1622         }
1623         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1624
1625         req.enables =
1626                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1627         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1628         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1629
1630         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1631
1632         HWRM_CHECK_RESULT();
1633
1634         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1635         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1636         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1637         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1638         vnic->mru = rte_le_to_cpu_16(resp->mru);
1639         vnic->func_default = rte_le_to_cpu_32(
1640                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1641         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1642                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1643         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1644                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1645         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1646                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1647         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1648                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1649         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1650                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1651
1652         HWRM_UNLOCK();
1653
1654         return rc;
1655 }
1656
1657 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1658                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1659 {
1660         int rc = 0;
1661         uint16_t ctx_id;
1662         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1663         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1664                                                 bp->hwrm_cmd_resp_addr;
1665
1666         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1667
1668         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1669         HWRM_CHECK_RESULT();
1670
1671         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1672         if (!BNXT_HAS_RING_GRPS(bp))
1673                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1674         else if (ctx_idx == 0)
1675                 vnic->rss_rule = ctx_id;
1676
1677         HWRM_UNLOCK();
1678
1679         return rc;
1680 }
1681
1682 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1683                             struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1684 {
1685         int rc = 0;
1686         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1687         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1688                                                 bp->hwrm_cmd_resp_addr;
1689
1690         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1691                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1692                 return rc;
1693         }
1694         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1695
1696         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1697
1698         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1699
1700         HWRM_CHECK_RESULT();
1701         HWRM_UNLOCK();
1702
1703         return rc;
1704 }
1705
1706 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1707 {
1708         int rc = 0;
1709         struct hwrm_vnic_free_input req = {.req_type = 0 };
1710         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1711
1712         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1713                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1714                 return rc;
1715         }
1716
1717         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1718
1719         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1720
1721         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1722
1723         HWRM_CHECK_RESULT();
1724         HWRM_UNLOCK();
1725
1726         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1727         /* Configure default VNIC again if necessary. */
1728         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1729                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1730
1731         return rc;
1732 }
1733
1734 static int
1735 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1736 {
1737         int i;
1738         int rc = 0;
1739         int nr_ctxs = vnic->num_lb_ctxts;
1740         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1741         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1742
1743         for (i = 0; i < nr_ctxs; i++) {
1744                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1745
1746                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1747                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1748                 req.hash_mode_flags = vnic->hash_mode;
1749
1750                 req.hash_key_tbl_addr =
1751                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1752
1753                 req.ring_grp_tbl_addr =
1754                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1755                                          i * HW_HASH_INDEX_SIZE);
1756                 req.ring_table_pair_index = i;
1757                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1758
1759                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1760                                             BNXT_USE_CHIMP_MB);
1761
1762                 HWRM_CHECK_RESULT();
1763                 HWRM_UNLOCK();
1764         }
1765
1766         return rc;
1767 }
1768
1769 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1770                            struct bnxt_vnic_info *vnic)
1771 {
1772         int rc = 0;
1773         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1774         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1775
1776         if (!vnic->rss_table)
1777                 return 0;
1778
1779         if (BNXT_CHIP_THOR(bp))
1780                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1781
1782         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1783
1784         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1785         req.hash_mode_flags = vnic->hash_mode;
1786
1787         req.ring_grp_tbl_addr =
1788             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1789         req.hash_key_tbl_addr =
1790             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1791         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1792         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1793
1794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1795
1796         HWRM_CHECK_RESULT();
1797         HWRM_UNLOCK();
1798
1799         return rc;
1800 }
1801
1802 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1803                         struct bnxt_vnic_info *vnic)
1804 {
1805         int rc = 0;
1806         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1807         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1808         uint16_t size;
1809
1810         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1811                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1812                 return rc;
1813         }
1814
1815         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1816
1817         req.flags = rte_cpu_to_le_32(
1818                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1819
1820         req.enables = rte_cpu_to_le_32(
1821                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1822
1823         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1824         size -= RTE_PKTMBUF_HEADROOM;
1825
1826         req.jumbo_thresh = rte_cpu_to_le_16(size);
1827         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1828
1829         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1830
1831         HWRM_CHECK_RESULT();
1832         HWRM_UNLOCK();
1833
1834         return rc;
1835 }
1836
1837 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1838                         struct bnxt_vnic_info *vnic, bool enable)
1839 {
1840         int rc = 0;
1841         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1842         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1843
1844         if (BNXT_CHIP_THOR(bp))
1845                 return 0;
1846
1847         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1848
1849         if (enable) {
1850                 req.enables = rte_cpu_to_le_32(
1851                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1852                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1853                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1854                 req.flags = rte_cpu_to_le_32(
1855                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1856                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1857                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1858                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1859                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1860                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1861                 req.max_agg_segs = rte_cpu_to_le_16(5);
1862                 req.max_aggs =
1863                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1864                 req.min_agg_len = rte_cpu_to_le_32(512);
1865         }
1866         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1867
1868         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1869
1870         HWRM_CHECK_RESULT();
1871         HWRM_UNLOCK();
1872
1873         return rc;
1874 }
1875
1876 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1877 {
1878         struct hwrm_func_cfg_input req = {0};
1879         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1880         int rc;
1881
1882         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1883         req.enables = rte_cpu_to_le_32(
1884                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1885         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1886         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1887
1888         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1889
1890         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1891         HWRM_CHECK_RESULT();
1892         HWRM_UNLOCK();
1893
1894         bp->pf.vf_info[vf].random_mac = false;
1895
1896         return rc;
1897 }
1898
1899 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1900                                   uint64_t *dropped)
1901 {
1902         int rc = 0;
1903         struct hwrm_func_qstats_input req = {.req_type = 0};
1904         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1905
1906         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1907
1908         req.fid = rte_cpu_to_le_16(fid);
1909
1910         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1911
1912         HWRM_CHECK_RESULT();
1913
1914         if (dropped)
1915                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1916
1917         HWRM_UNLOCK();
1918
1919         return rc;
1920 }
1921
1922 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1923                           struct rte_eth_stats *stats)
1924 {
1925         int rc = 0;
1926         struct hwrm_func_qstats_input req = {.req_type = 0};
1927         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1928
1929         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1930
1931         req.fid = rte_cpu_to_le_16(fid);
1932
1933         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1934
1935         HWRM_CHECK_RESULT();
1936
1937         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1938         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1939         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1940         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1941         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1942         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1943
1944         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1945         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1946         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1947         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1948         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1949         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1950
1951         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1952         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1953         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1954
1955         HWRM_UNLOCK();
1956
1957         return rc;
1958 }
1959
1960 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1961 {
1962         int rc = 0;
1963         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1964         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1965
1966         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1967
1968         req.fid = rte_cpu_to_le_16(fid);
1969
1970         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1971
1972         HWRM_CHECK_RESULT();
1973         HWRM_UNLOCK();
1974
1975         return rc;
1976 }
1977
1978 /*
1979  * HWRM utility functions
1980  */
1981
1982 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1983 {
1984         unsigned int i;
1985         int rc = 0;
1986
1987         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1988                 struct bnxt_tx_queue *txq;
1989                 struct bnxt_rx_queue *rxq;
1990                 struct bnxt_cp_ring_info *cpr;
1991
1992                 if (i >= bp->rx_cp_nr_rings) {
1993                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1994                         cpr = txq->cp_ring;
1995                 } else {
1996                         rxq = bp->rx_queues[i];
1997                         cpr = rxq->cp_ring;
1998                 }
1999
2000                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2001                 if (rc)
2002                         return rc;
2003         }
2004         return 0;
2005 }
2006
2007 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2008 {
2009         int rc;
2010         unsigned int i;
2011         struct bnxt_cp_ring_info *cpr;
2012
2013         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2014
2015                 if (i >= bp->rx_cp_nr_rings) {
2016                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2017                 } else {
2018                         cpr = bp->rx_queues[i]->cp_ring;
2019                         if (BNXT_HAS_RING_GRPS(bp))
2020                                 bp->grp_info[i].fw_stats_ctx = -1;
2021                 }
2022                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2023                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2024                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2025                         if (rc)
2026                                 return rc;
2027                 }
2028         }
2029         return 0;
2030 }
2031
2032 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2033 {
2034         unsigned int i;
2035         int rc = 0;
2036
2037         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2038                 struct bnxt_tx_queue *txq;
2039                 struct bnxt_rx_queue *rxq;
2040                 struct bnxt_cp_ring_info *cpr;
2041
2042                 if (i >= bp->rx_cp_nr_rings) {
2043                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2044                         cpr = txq->cp_ring;
2045                 } else {
2046                         rxq = bp->rx_queues[i];
2047                         cpr = rxq->cp_ring;
2048                 }
2049
2050                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2051
2052                 if (rc)
2053                         return rc;
2054         }
2055         return rc;
2056 }
2057
2058 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2059 {
2060         uint16_t idx;
2061         uint32_t rc = 0;
2062
2063         if (!BNXT_HAS_RING_GRPS(bp))
2064                 return 0;
2065
2066         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2067
2068                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2069                         continue;
2070
2071                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2072
2073                 if (rc)
2074                         return rc;
2075         }
2076         return rc;
2077 }
2078
2079 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2080 {
2081         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2082
2083         bnxt_hwrm_ring_free(bp, cp_ring,
2084                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2085         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2086         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2087                                      sizeof(*cpr->cp_desc_ring));
2088         cpr->cp_raw_cons = 0;
2089         cpr->valid = 0;
2090 }
2091
2092 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2093 {
2094         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2095
2096         bnxt_hwrm_ring_free(bp, cp_ring,
2097                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2098         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2099         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2100                         sizeof(*cpr->cp_desc_ring));
2101         cpr->cp_raw_cons = 0;
2102         cpr->valid = 0;
2103 }
2104
2105 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2106 {
2107         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2108         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2109         struct bnxt_ring *ring = rxr->rx_ring_struct;
2110         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2111
2112         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2113                 bnxt_hwrm_ring_free(bp, ring,
2114                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2115                 ring->fw_ring_id = INVALID_HW_RING_ID;
2116                 if (BNXT_HAS_RING_GRPS(bp))
2117                         bp->grp_info[queue_index].rx_fw_ring_id =
2118                                                         INVALID_HW_RING_ID;
2119                 memset(rxr->rx_desc_ring, 0,
2120                        rxr->rx_ring_struct->ring_size *
2121                        sizeof(*rxr->rx_desc_ring));
2122                 memset(rxr->rx_buf_ring, 0,
2123                        rxr->rx_ring_struct->ring_size *
2124                        sizeof(*rxr->rx_buf_ring));
2125                 rxr->rx_prod = 0;
2126         }
2127         ring = rxr->ag_ring_struct;
2128         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2129                 bnxt_hwrm_ring_free(bp, ring,
2130                                     BNXT_CHIP_THOR(bp) ?
2131                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2132                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2133                 ring->fw_ring_id = INVALID_HW_RING_ID;
2134                 memset(rxr->ag_buf_ring, 0,
2135                        rxr->ag_ring_struct->ring_size *
2136                        sizeof(*rxr->ag_buf_ring));
2137                 rxr->ag_prod = 0;
2138                 if (BNXT_HAS_RING_GRPS(bp))
2139                         bp->grp_info[queue_index].ag_fw_ring_id =
2140                                                         INVALID_HW_RING_ID;
2141         }
2142         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2143                 bnxt_free_cp_ring(bp, cpr);
2144                 if (rxq->nq_ring)
2145                         bnxt_free_nq_ring(bp, rxq->nq_ring);
2146         }
2147
2148         if (BNXT_HAS_RING_GRPS(bp))
2149                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2150 }
2151
2152 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2153 {
2154         unsigned int i;
2155
2156         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2157                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2158                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2159                 struct bnxt_ring *ring = txr->tx_ring_struct;
2160                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2161
2162                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2163                         bnxt_hwrm_ring_free(bp, ring,
2164                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2165                         ring->fw_ring_id = INVALID_HW_RING_ID;
2166                         memset(txr->tx_desc_ring, 0,
2167                                         txr->tx_ring_struct->ring_size *
2168                                         sizeof(*txr->tx_desc_ring));
2169                         memset(txr->tx_buf_ring, 0,
2170                                         txr->tx_ring_struct->ring_size *
2171                                         sizeof(*txr->tx_buf_ring));
2172                         txr->tx_prod = 0;
2173                         txr->tx_cons = 0;
2174                 }
2175                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2176                         bnxt_free_cp_ring(bp, cpr);
2177                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2178                         if (txq->nq_ring)
2179                                 bnxt_free_nq_ring(bp, txq->nq_ring);
2180                 }
2181         }
2182
2183         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2184                 bnxt_free_hwrm_rx_ring(bp, i);
2185
2186         return 0;
2187 }
2188
2189 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2190 {
2191         uint16_t i;
2192         uint32_t rc = 0;
2193
2194         if (!BNXT_HAS_RING_GRPS(bp))
2195                 return 0;
2196
2197         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2198                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2199                 if (rc)
2200                         return rc;
2201         }
2202         return rc;
2203 }
2204
2205 void bnxt_free_hwrm_resources(struct bnxt *bp)
2206 {
2207         /* Release memzone */
2208         rte_free(bp->hwrm_cmd_resp_addr);
2209         rte_free(bp->hwrm_short_cmd_req_addr);
2210         bp->hwrm_cmd_resp_addr = NULL;
2211         bp->hwrm_short_cmd_req_addr = NULL;
2212         bp->hwrm_cmd_resp_dma_addr = 0;
2213         bp->hwrm_short_cmd_req_dma_addr = 0;
2214 }
2215
2216 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2217 {
2218         struct rte_pci_device *pdev = bp->pdev;
2219         char type[RTE_MEMZONE_NAMESIZE];
2220
2221         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2222                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2223         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2224         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2225         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2226         if (bp->hwrm_cmd_resp_addr == NULL)
2227                 return -ENOMEM;
2228         bp->hwrm_cmd_resp_dma_addr =
2229                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2230         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2231                 PMD_DRV_LOG(ERR,
2232                         "unable to map response address to physical memory\n");
2233                 return -ENOMEM;
2234         }
2235         rte_spinlock_init(&bp->hwrm_lock);
2236
2237         return 0;
2238 }
2239
2240 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2241 {
2242         struct bnxt_filter_info *filter;
2243         int rc = 0;
2244
2245         STAILQ_FOREACH(filter, &vnic->filter, next) {
2246                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2247                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2248                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2249                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2250                 else
2251                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2252                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2253                 //if (rc)
2254                         //break;
2255         }
2256         return rc;
2257 }
2258
2259 static int
2260 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2261 {
2262         struct bnxt_filter_info *filter;
2263         struct rte_flow *flow;
2264         int rc = 0;
2265
2266         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2267                 filter = flow->filter;
2268                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2269                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2270                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2271                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2272                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2273                 else
2274                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2275
2276                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2277                 rte_free(flow);
2278                 //if (rc)
2279                         //break;
2280         }
2281         return rc;
2282 }
2283
2284 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2285 {
2286         struct bnxt_filter_info *filter;
2287         int rc = 0;
2288
2289         STAILQ_FOREACH(filter, &vnic->filter, next) {
2290                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2291                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2292                                                      filter);
2293                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2294                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2295                                                          filter);
2296                 else
2297                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2298                                                      filter);
2299                 if (rc)
2300                         break;
2301         }
2302         return rc;
2303 }
2304
2305 void bnxt_free_tunnel_ports(struct bnxt *bp)
2306 {
2307         if (bp->vxlan_port_cnt)
2308                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2309                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2310         bp->vxlan_port = 0;
2311         if (bp->geneve_port_cnt)
2312                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2313                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2314         bp->geneve_port = 0;
2315 }
2316
2317 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2318 {
2319         int i, j;
2320
2321         if (bp->vnic_info == NULL)
2322                 return;
2323
2324         /*
2325          * Cleanup VNICs in reverse order, to make sure the L2 filter
2326          * from vnic0 is last to be cleaned up.
2327          */
2328         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2329                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2330
2331                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2332                         PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2333                         return;
2334                 }
2335
2336                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2337
2338                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2339
2340                 if (BNXT_CHIP_THOR(bp)) {
2341                         for (j = 0; j < vnic->num_lb_ctxts; j++) {
2342                                 bnxt_hwrm_vnic_ctx_free(bp, vnic,
2343                                                         vnic->fw_grp_ids[j]);
2344                                 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2345                         }
2346                         vnic->num_lb_ctxts = 0;
2347                 } else {
2348                         bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2349                         vnic->rss_rule = INVALID_HW_RING_ID;
2350                 }
2351
2352                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2353
2354                 bnxt_hwrm_vnic_free(bp, vnic);
2355
2356                 rte_free(vnic->fw_grp_ids);
2357         }
2358         /* Ring resources */
2359         bnxt_free_all_hwrm_rings(bp);
2360         bnxt_free_all_hwrm_ring_grps(bp);
2361         bnxt_free_all_hwrm_stat_ctxs(bp);
2362         bnxt_free_tunnel_ports(bp);
2363 }
2364
2365 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2366 {
2367         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2368
2369         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2370                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2371
2372         switch (conf_link_speed) {
2373         case ETH_LINK_SPEED_10M_HD:
2374         case ETH_LINK_SPEED_100M_HD:
2375                 /* FALLTHROUGH */
2376                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2377         }
2378         return hw_link_duplex;
2379 }
2380
2381 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2382 {
2383         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2384 }
2385
2386 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2387 {
2388         uint16_t eth_link_speed = 0;
2389
2390         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2391                 return ETH_LINK_SPEED_AUTONEG;
2392
2393         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2394         case ETH_LINK_SPEED_100M:
2395         case ETH_LINK_SPEED_100M_HD:
2396                 /* FALLTHROUGH */
2397                 eth_link_speed =
2398                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2399                 break;
2400         case ETH_LINK_SPEED_1G:
2401                 eth_link_speed =
2402                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2403                 break;
2404         case ETH_LINK_SPEED_2_5G:
2405                 eth_link_speed =
2406                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2407                 break;
2408         case ETH_LINK_SPEED_10G:
2409                 eth_link_speed =
2410                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2411                 break;
2412         case ETH_LINK_SPEED_20G:
2413                 eth_link_speed =
2414                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2415                 break;
2416         case ETH_LINK_SPEED_25G:
2417                 eth_link_speed =
2418                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2419                 break;
2420         case ETH_LINK_SPEED_40G:
2421                 eth_link_speed =
2422                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2423                 break;
2424         case ETH_LINK_SPEED_50G:
2425                 eth_link_speed =
2426                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2427                 break;
2428         case ETH_LINK_SPEED_100G:
2429                 eth_link_speed =
2430                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2431                 break;
2432         default:
2433                 PMD_DRV_LOG(ERR,
2434                         "Unsupported link speed %d; default to AUTO\n",
2435                         conf_link_speed);
2436                 break;
2437         }
2438         return eth_link_speed;
2439 }
2440
2441 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2442                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2443                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2444                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2445
2446 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2447 {
2448         uint32_t one_speed;
2449
2450         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2451                 return 0;
2452
2453         if (link_speed & ETH_LINK_SPEED_FIXED) {
2454                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2455
2456                 if (one_speed & (one_speed - 1)) {
2457                         PMD_DRV_LOG(ERR,
2458                                 "Invalid advertised speeds (%u) for port %u\n",
2459                                 link_speed, port_id);
2460                         return -EINVAL;
2461                 }
2462                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2463                         PMD_DRV_LOG(ERR,
2464                                 "Unsupported advertised speed (%u) for port %u\n",
2465                                 link_speed, port_id);
2466                         return -EINVAL;
2467                 }
2468         } else {
2469                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2470                         PMD_DRV_LOG(ERR,
2471                                 "Unsupported advertised speeds (%u) for port %u\n",
2472                                 link_speed, port_id);
2473                         return -EINVAL;
2474                 }
2475         }
2476         return 0;
2477 }
2478
2479 static uint16_t
2480 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2481 {
2482         uint16_t ret = 0;
2483
2484         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2485                 if (bp->link_info.support_speeds)
2486                         return bp->link_info.support_speeds;
2487                 link_speed = BNXT_SUPPORTED_SPEEDS;
2488         }
2489
2490         if (link_speed & ETH_LINK_SPEED_100M)
2491                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2492         if (link_speed & ETH_LINK_SPEED_100M_HD)
2493                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2494         if (link_speed & ETH_LINK_SPEED_1G)
2495                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2496         if (link_speed & ETH_LINK_SPEED_2_5G)
2497                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2498         if (link_speed & ETH_LINK_SPEED_10G)
2499                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2500         if (link_speed & ETH_LINK_SPEED_20G)
2501                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2502         if (link_speed & ETH_LINK_SPEED_25G)
2503                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2504         if (link_speed & ETH_LINK_SPEED_40G)
2505                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2506         if (link_speed & ETH_LINK_SPEED_50G)
2507                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2508         if (link_speed & ETH_LINK_SPEED_100G)
2509                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2510         return ret;
2511 }
2512
2513 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2514 {
2515         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2516
2517         switch (hw_link_speed) {
2518         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2519                 eth_link_speed = ETH_SPEED_NUM_100M;
2520                 break;
2521         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2522                 eth_link_speed = ETH_SPEED_NUM_1G;
2523                 break;
2524         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2525                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2526                 break;
2527         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2528                 eth_link_speed = ETH_SPEED_NUM_10G;
2529                 break;
2530         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2531                 eth_link_speed = ETH_SPEED_NUM_20G;
2532                 break;
2533         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2534                 eth_link_speed = ETH_SPEED_NUM_25G;
2535                 break;
2536         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2537                 eth_link_speed = ETH_SPEED_NUM_40G;
2538                 break;
2539         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2540                 eth_link_speed = ETH_SPEED_NUM_50G;
2541                 break;
2542         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2543                 eth_link_speed = ETH_SPEED_NUM_100G;
2544                 break;
2545         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2546         default:
2547                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2548                         hw_link_speed);
2549                 break;
2550         }
2551         return eth_link_speed;
2552 }
2553
2554 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2555 {
2556         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2557
2558         switch (hw_link_duplex) {
2559         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2560         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2561                 /* FALLTHROUGH */
2562                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2563                 break;
2564         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2565                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2566                 break;
2567         default:
2568                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2569                         hw_link_duplex);
2570                 break;
2571         }
2572         return eth_link_duplex;
2573 }
2574
2575 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2576 {
2577         int rc = 0;
2578         struct bnxt_link_info *link_info = &bp->link_info;
2579
2580         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2581         if (rc) {
2582                 PMD_DRV_LOG(ERR,
2583                         "Get link config failed with rc %d\n", rc);
2584                 goto exit;
2585         }
2586         if (link_info->link_speed)
2587                 link->link_speed =
2588                         bnxt_parse_hw_link_speed(link_info->link_speed);
2589         else
2590                 link->link_speed = ETH_SPEED_NUM_NONE;
2591         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2592         link->link_status = link_info->link_up;
2593         link->link_autoneg = link_info->auto_mode ==
2594                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2595                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2596 exit:
2597         return rc;
2598 }
2599
2600 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2601 {
2602         int rc = 0;
2603         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2604         struct bnxt_link_info link_req;
2605         uint16_t speed, autoneg;
2606
2607         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2608                 return 0;
2609
2610         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2611                         bp->eth_dev->data->port_id);
2612         if (rc)
2613                 goto error;
2614
2615         memset(&link_req, 0, sizeof(link_req));
2616         link_req.link_up = link_up;
2617         if (!link_up)
2618                 goto port_phy_cfg;
2619
2620         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2621         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2622         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2623         /* Autoneg can be done only when the FW allows */
2624         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2625                                 bp->link_info.force_link_speed)) {
2626                 link_req.phy_flags |=
2627                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2628                 link_req.auto_link_speed_mask =
2629                         bnxt_parse_eth_link_speed_mask(bp,
2630                                                        dev_conf->link_speeds);
2631         } else {
2632                 if (bp->link_info.phy_type ==
2633                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2634                     bp->link_info.phy_type ==
2635                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2636                     bp->link_info.media_type ==
2637                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2638                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2639                         return -EINVAL;
2640                 }
2641
2642                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2643                 /* If user wants a particular speed try that first. */
2644                 if (speed)
2645                         link_req.link_speed = speed;
2646                 else if (bp->link_info.force_link_speed)
2647                         link_req.link_speed = bp->link_info.force_link_speed;
2648                 else
2649                         link_req.link_speed = bp->link_info.auto_link_speed;
2650         }
2651         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2652         link_req.auto_pause = bp->link_info.auto_pause;
2653         link_req.force_pause = bp->link_info.force_pause;
2654
2655 port_phy_cfg:
2656         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2657         if (rc) {
2658                 PMD_DRV_LOG(ERR,
2659                         "Set link config failed with rc %d\n", rc);
2660         }
2661
2662 error:
2663         return rc;
2664 }
2665
2666 /* JIRA 22088 */
2667 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2668 {
2669         struct hwrm_func_qcfg_input req = {0};
2670         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2671         uint16_t flags;
2672         int rc = 0;
2673
2674         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2675         req.fid = rte_cpu_to_le_16(0xffff);
2676
2677         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2678
2679         HWRM_CHECK_RESULT();
2680
2681         /* Hard Coded.. 0xfff VLAN ID mask */
2682         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2683         flags = rte_le_to_cpu_16(resp->flags);
2684         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2685                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2686
2687         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2688                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2689                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2690         }
2691
2692         if (mtu)
2693                 *mtu = resp->mtu;
2694
2695         switch (resp->port_partition_type) {
2696         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2697         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2698         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2699                 /* FALLTHROUGH */
2700                 bp->port_partition_type = resp->port_partition_type;
2701                 break;
2702         default:
2703                 bp->port_partition_type = 0;
2704                 break;
2705         }
2706
2707         HWRM_UNLOCK();
2708
2709         return rc;
2710 }
2711
2712 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2713                                    struct hwrm_func_qcaps_output *qcaps)
2714 {
2715         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2716         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2717                sizeof(qcaps->mac_address));
2718         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2719         qcaps->max_rx_rings = fcfg->num_rx_rings;
2720         qcaps->max_tx_rings = fcfg->num_tx_rings;
2721         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2722         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2723         qcaps->max_vfs = 0;
2724         qcaps->first_vf_id = 0;
2725         qcaps->max_vnics = fcfg->num_vnics;
2726         qcaps->max_decap_records = 0;
2727         qcaps->max_encap_records = 0;
2728         qcaps->max_tx_wm_flows = 0;
2729         qcaps->max_tx_em_flows = 0;
2730         qcaps->max_rx_wm_flows = 0;
2731         qcaps->max_rx_em_flows = 0;
2732         qcaps->max_flow_id = 0;
2733         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2734         qcaps->max_sp_tx_rings = 0;
2735         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2736 }
2737
2738 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2739 {
2740         struct hwrm_func_cfg_input req = {0};
2741         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2742         uint32_t enables;
2743         int rc;
2744
2745         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2746                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2747                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2748                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2749                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2750                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2751                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2752                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2753                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2754
2755         if (BNXT_HAS_RING_GRPS(bp)) {
2756                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2757                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2758         } else if (BNXT_HAS_NQ(bp)) {
2759                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2760                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2761         }
2762
2763         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2764         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2765         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2766                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2767                                    BNXT_NUM_VLANS);
2768         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2769         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2770         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2771         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2772         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2773         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2774         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2775         req.fid = rte_cpu_to_le_16(0xffff);
2776         req.enables = rte_cpu_to_le_32(enables);
2777
2778         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2779
2780         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2781
2782         HWRM_CHECK_RESULT();
2783         HWRM_UNLOCK();
2784
2785         return rc;
2786 }
2787
2788 static void populate_vf_func_cfg_req(struct bnxt *bp,
2789                                      struct hwrm_func_cfg_input *req,
2790                                      int num_vfs)
2791 {
2792         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2793                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2794                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2795                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2796                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2797                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2798                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2799                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2800                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2801                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2802
2803         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2804                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2805                                     BNXT_NUM_VLANS);
2806         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2807                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2808                                     BNXT_NUM_VLANS);
2809         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2810                                                 (num_vfs + 1));
2811         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2812         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2813                                                (num_vfs + 1));
2814         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2815         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2816         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2817         /* TODO: For now, do not support VMDq/RFS on VFs. */
2818         req->num_vnics = rte_cpu_to_le_16(1);
2819         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2820                                                  (num_vfs + 1));
2821 }
2822
2823 static void add_random_mac_if_needed(struct bnxt *bp,
2824                                      struct hwrm_func_cfg_input *cfg_req,
2825                                      int vf)
2826 {
2827         struct rte_ether_addr mac;
2828
2829         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2830                 return;
2831
2832         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2833                 cfg_req->enables |=
2834                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2835                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2836                 bp->pf.vf_info[vf].random_mac = true;
2837         } else {
2838                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2839                         RTE_ETHER_ADDR_LEN);
2840         }
2841 }
2842
2843 static void reserve_resources_from_vf(struct bnxt *bp,
2844                                       struct hwrm_func_cfg_input *cfg_req,
2845                                       int vf)
2846 {
2847         struct hwrm_func_qcaps_input req = {0};
2848         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2849         int rc;
2850
2851         /* Get the actual allocated values now */
2852         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2853         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2854         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2855
2856         if (rc) {
2857                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2858                 copy_func_cfg_to_qcaps(cfg_req, resp);
2859         } else if (resp->error_code) {
2860                 rc = rte_le_to_cpu_16(resp->error_code);
2861                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2862                 copy_func_cfg_to_qcaps(cfg_req, resp);
2863         }
2864
2865         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2866         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2867         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2868         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2869         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2870         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2871         /*
2872          * TODO: While not supporting VMDq with VFs, max_vnics is always
2873          * forced to 1 in this case
2874          */
2875         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2876         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2877
2878         HWRM_UNLOCK();
2879 }
2880
2881 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2882 {
2883         struct hwrm_func_qcfg_input req = {0};
2884         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2885         int rc;
2886
2887         /* Check for zero MAC address */
2888         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2889         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2890         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2891         HWRM_CHECK_RESULT();
2892         rc = rte_le_to_cpu_16(resp->vlan);
2893
2894         HWRM_UNLOCK();
2895
2896         return rc;
2897 }
2898
2899 static int update_pf_resource_max(struct bnxt *bp)
2900 {
2901         struct hwrm_func_qcfg_input req = {0};
2902         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2903         int rc;
2904
2905         /* And copy the allocated numbers into the pf struct */
2906         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2907         req.fid = rte_cpu_to_le_16(0xffff);
2908         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2909         HWRM_CHECK_RESULT();
2910
2911         /* Only TX ring value reflects actual allocation? TODO */
2912         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2913         bp->pf.evb_mode = resp->evb_mode;
2914
2915         HWRM_UNLOCK();
2916
2917         return rc;
2918 }
2919
2920 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2921 {
2922         int rc;
2923
2924         if (!BNXT_PF(bp)) {
2925                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2926                 return -EINVAL;
2927         }
2928
2929         rc = bnxt_hwrm_func_qcaps(bp);
2930         if (rc)
2931                 return rc;
2932
2933         bp->pf.func_cfg_flags &=
2934                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2935                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2936         bp->pf.func_cfg_flags |=
2937                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2938         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2939         rc = __bnxt_hwrm_func_qcaps(bp);
2940         return rc;
2941 }
2942
2943 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2944 {
2945         struct hwrm_func_cfg_input req = {0};
2946         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2947         int i;
2948         size_t sz;
2949         int rc = 0;
2950         size_t req_buf_sz;
2951
2952         if (!BNXT_PF(bp)) {
2953                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2954                 return -EINVAL;
2955         }
2956
2957         rc = bnxt_hwrm_func_qcaps(bp);
2958
2959         if (rc)
2960                 return rc;
2961
2962         bp->pf.active_vfs = num_vfs;
2963
2964         /*
2965          * First, configure the PF to only use one TX ring.  This ensures that
2966          * there are enough rings for all VFs.
2967          *
2968          * If we don't do this, when we call func_alloc() later, we will lock
2969          * extra rings to the PF that won't be available during func_cfg() of
2970          * the VFs.
2971          *
2972          * This has been fixed with firmware versions above 20.6.54
2973          */
2974         bp->pf.func_cfg_flags &=
2975                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2976                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2977         bp->pf.func_cfg_flags |=
2978                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2979         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2980         if (rc)
2981                 return rc;
2982
2983         /*
2984          * Now, create and register a buffer to hold forwarded VF requests
2985          */
2986         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2987         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2988                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2989         if (bp->pf.vf_req_buf == NULL) {
2990                 rc = -ENOMEM;
2991                 goto error_free;
2992         }
2993         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2994                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2995         for (i = 0; i < num_vfs; i++)
2996                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2997                                         (i * HWRM_MAX_REQ_LEN);
2998
2999         rc = bnxt_hwrm_func_buf_rgtr(bp);
3000         if (rc)
3001                 goto error_free;
3002
3003         populate_vf_func_cfg_req(bp, &req, num_vfs);
3004
3005         bp->pf.active_vfs = 0;
3006         for (i = 0; i < num_vfs; i++) {
3007                 add_random_mac_if_needed(bp, &req, i);
3008
3009                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3010                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3011                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3012                 rc = bnxt_hwrm_send_message(bp,
3013                                             &req,
3014                                             sizeof(req),
3015                                             BNXT_USE_CHIMP_MB);
3016
3017                 /* Clear enable flag for next pass */
3018                 req.enables &= ~rte_cpu_to_le_32(
3019                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3020
3021                 if (rc || resp->error_code) {
3022                         PMD_DRV_LOG(ERR,
3023                                 "Failed to initizlie VF %d\n", i);
3024                         PMD_DRV_LOG(ERR,
3025                                 "Not all VFs available. (%d, %d)\n",
3026                                 rc, resp->error_code);
3027                         HWRM_UNLOCK();
3028                         break;
3029                 }
3030
3031                 HWRM_UNLOCK();
3032
3033                 reserve_resources_from_vf(bp, &req, i);
3034                 bp->pf.active_vfs++;
3035                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3036         }
3037
3038         /*
3039          * Now configure the PF to use "the rest" of the resources
3040          * We're using STD_TX_RING_MODE here though which will limit the TX
3041          * rings.  This will allow QoS to function properly.  Not setting this
3042          * will cause PF rings to break bandwidth settings.
3043          */
3044         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3045         if (rc)
3046                 goto error_free;
3047
3048         rc = update_pf_resource_max(bp);
3049         if (rc)
3050                 goto error_free;
3051
3052         return rc;
3053
3054 error_free:
3055         bnxt_hwrm_func_buf_unrgtr(bp);
3056         return rc;
3057 }
3058
3059 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3060 {
3061         struct hwrm_func_cfg_input req = {0};
3062         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3063         int rc;
3064
3065         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3066
3067         req.fid = rte_cpu_to_le_16(0xffff);
3068         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3069         req.evb_mode = bp->pf.evb_mode;
3070
3071         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3072         HWRM_CHECK_RESULT();
3073         HWRM_UNLOCK();
3074
3075         return rc;
3076 }
3077
3078 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3079                                 uint8_t tunnel_type)
3080 {
3081         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3082         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3083         int rc = 0;
3084
3085         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3086         req.tunnel_type = tunnel_type;
3087         req.tunnel_dst_port_val = port;
3088         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3089         HWRM_CHECK_RESULT();
3090
3091         switch (tunnel_type) {
3092         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3093                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3094                 bp->vxlan_port = port;
3095                 break;
3096         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3097                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3098                 bp->geneve_port = port;
3099                 break;
3100         default:
3101                 break;
3102         }
3103
3104         HWRM_UNLOCK();
3105
3106         return rc;
3107 }
3108
3109 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3110                                 uint8_t tunnel_type)
3111 {
3112         struct hwrm_tunnel_dst_port_free_input req = {0};
3113         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3114         int rc = 0;
3115
3116         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3117
3118         req.tunnel_type = tunnel_type;
3119         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3120         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3121
3122         HWRM_CHECK_RESULT();
3123         HWRM_UNLOCK();
3124
3125         return rc;
3126 }
3127
3128 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3129                                         uint32_t flags)
3130 {
3131         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3132         struct hwrm_func_cfg_input req = {0};
3133         int rc;
3134
3135         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3136
3137         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3138         req.flags = rte_cpu_to_le_32(flags);
3139         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3140
3141         HWRM_CHECK_RESULT();
3142         HWRM_UNLOCK();
3143
3144         return rc;
3145 }
3146
3147 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3148 {
3149         uint32_t *flag = flagp;
3150
3151         vnic->flags = *flag;
3152 }
3153
3154 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3155 {
3156         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3157 }
3158
3159 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3160 {
3161         int rc = 0;
3162         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3163         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3164
3165         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3166
3167         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3168         req.req_buf_page_size = rte_cpu_to_le_16(
3169                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3170         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3171         req.req_buf_page_addr0 =
3172                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3173         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3174                 PMD_DRV_LOG(ERR,
3175                         "unable to map buffer address to physical memory\n");
3176                 return -ENOMEM;
3177         }
3178
3179         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3180
3181         HWRM_CHECK_RESULT();
3182         HWRM_UNLOCK();
3183
3184         return rc;
3185 }
3186
3187 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3188 {
3189         int rc = 0;
3190         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3191         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3192
3193         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3194                 return 0;
3195
3196         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3197
3198         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3199
3200         HWRM_CHECK_RESULT();
3201         HWRM_UNLOCK();
3202
3203         return rc;
3204 }
3205
3206 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3207 {
3208         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3209         struct hwrm_func_cfg_input req = {0};
3210         int rc;
3211
3212         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3213
3214         req.fid = rte_cpu_to_le_16(0xffff);
3215         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3216         req.enables = rte_cpu_to_le_32(
3217                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3218         req.async_event_cr = rte_cpu_to_le_16(
3219                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3220         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3221
3222         HWRM_CHECK_RESULT();
3223         HWRM_UNLOCK();
3224
3225         return rc;
3226 }
3227
3228 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3229 {
3230         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3231         struct hwrm_func_vf_cfg_input req = {0};
3232         int rc;
3233
3234         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3235
3236         req.enables = rte_cpu_to_le_32(
3237                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3238         req.async_event_cr = rte_cpu_to_le_16(
3239                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3241
3242         HWRM_CHECK_RESULT();
3243         HWRM_UNLOCK();
3244
3245         return rc;
3246 }
3247
3248 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3249 {
3250         struct hwrm_func_cfg_input req = {0};
3251         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3252         uint16_t dflt_vlan, fid;
3253         uint32_t func_cfg_flags;
3254         int rc = 0;
3255
3256         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3257
3258         if (is_vf) {
3259                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3260                 fid = bp->pf.vf_info[vf].fid;
3261                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3262         } else {
3263                 fid = rte_cpu_to_le_16(0xffff);
3264                 func_cfg_flags = bp->pf.func_cfg_flags;
3265                 dflt_vlan = bp->vlan;
3266         }
3267
3268         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3269         req.fid = rte_cpu_to_le_16(fid);
3270         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3271         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3272
3273         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3274
3275         HWRM_CHECK_RESULT();
3276         HWRM_UNLOCK();
3277
3278         return rc;
3279 }
3280
3281 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3282                         uint16_t max_bw, uint16_t enables)
3283 {
3284         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3285         struct hwrm_func_cfg_input req = {0};
3286         int rc;
3287
3288         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3289
3290         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3291         req.enables |= rte_cpu_to_le_32(enables);
3292         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3293         req.max_bw = rte_cpu_to_le_32(max_bw);
3294         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3295
3296         HWRM_CHECK_RESULT();
3297         HWRM_UNLOCK();
3298
3299         return rc;
3300 }
3301
3302 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3303 {
3304         struct hwrm_func_cfg_input req = {0};
3305         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3306         int rc = 0;
3307
3308         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3309
3310         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3311         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3312         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3313         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3314
3315         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3316
3317         HWRM_CHECK_RESULT();
3318         HWRM_UNLOCK();
3319
3320         return rc;
3321 }
3322
3323 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3324 {
3325         int rc;
3326
3327         if (BNXT_PF(bp))
3328                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3329         else
3330                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3331
3332         return rc;
3333 }
3334
3335 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3336                               void *encaped, size_t ec_size)
3337 {
3338         int rc = 0;
3339         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3340         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3341
3342         if (ec_size > sizeof(req.encap_request))
3343                 return -1;
3344
3345         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3346
3347         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3348         memcpy(req.encap_request, encaped, ec_size);
3349
3350         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3351
3352         HWRM_CHECK_RESULT();
3353         HWRM_UNLOCK();
3354
3355         return rc;
3356 }
3357
3358 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3359                                        struct rte_ether_addr *mac)
3360 {
3361         struct hwrm_func_qcfg_input req = {0};
3362         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3363         int rc;
3364
3365         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3366
3367         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3368         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3369
3370         HWRM_CHECK_RESULT();
3371
3372         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3373
3374         HWRM_UNLOCK();
3375
3376         return rc;
3377 }
3378
3379 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3380                             void *encaped, size_t ec_size)
3381 {
3382         int rc = 0;
3383         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3384         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3385
3386         if (ec_size > sizeof(req.encap_request))
3387                 return -1;
3388
3389         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3390
3391         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3392         memcpy(req.encap_request, encaped, ec_size);
3393
3394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3395
3396         HWRM_CHECK_RESULT();
3397         HWRM_UNLOCK();
3398
3399         return rc;
3400 }
3401
3402 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3403                          struct rte_eth_stats *stats, uint8_t rx)
3404 {
3405         int rc = 0;
3406         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3407         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3408
3409         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3410
3411         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3412
3413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3414
3415         HWRM_CHECK_RESULT();
3416
3417         if (rx) {
3418                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3419                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3420                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3421                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3422                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3423                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3424                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3425                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3426         } else {
3427                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3428                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3429                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3430                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3431                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3432                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3433         }
3434
3435
3436         HWRM_UNLOCK();
3437
3438         return rc;
3439 }
3440
3441 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3442 {
3443         struct hwrm_port_qstats_input req = {0};
3444         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3445         struct bnxt_pf_info *pf = &bp->pf;
3446         int rc;
3447
3448         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3449
3450         req.port_id = rte_cpu_to_le_16(pf->port_id);
3451         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3452         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3453         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3454
3455         HWRM_CHECK_RESULT();
3456         HWRM_UNLOCK();
3457
3458         return rc;
3459 }
3460
3461 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3462 {
3463         struct hwrm_port_clr_stats_input req = {0};
3464         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3465         struct bnxt_pf_info *pf = &bp->pf;
3466         int rc;
3467
3468         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3469         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3470             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3471                 return 0;
3472
3473         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3474
3475         req.port_id = rte_cpu_to_le_16(pf->port_id);
3476         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3477
3478         HWRM_CHECK_RESULT();
3479         HWRM_UNLOCK();
3480
3481         return rc;
3482 }
3483
3484 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3485 {
3486         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3487         struct hwrm_port_led_qcaps_input req = {0};
3488         int rc;
3489
3490         if (BNXT_VF(bp))
3491                 return 0;
3492
3493         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3494         req.port_id = bp->pf.port_id;
3495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3496
3497         HWRM_CHECK_RESULT();
3498
3499         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3500                 unsigned int i;
3501
3502                 bp->num_leds = resp->num_leds;
3503                 memcpy(bp->leds, &resp->led0_id,
3504                         sizeof(bp->leds[0]) * bp->num_leds);
3505                 for (i = 0; i < bp->num_leds; i++) {
3506                         struct bnxt_led_info *led = &bp->leds[i];
3507
3508                         uint16_t caps = led->led_state_caps;
3509
3510                         if (!led->led_group_id ||
3511                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3512                                 bp->num_leds = 0;
3513                                 break;
3514                         }
3515                 }
3516         }
3517
3518         HWRM_UNLOCK();
3519
3520         return rc;
3521 }
3522
3523 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3524 {
3525         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3526         struct hwrm_port_led_cfg_input req = {0};
3527         struct bnxt_led_cfg *led_cfg;
3528         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3529         uint16_t duration = 0;
3530         int rc, i;
3531
3532         if (!bp->num_leds || BNXT_VF(bp))
3533                 return -EOPNOTSUPP;
3534
3535         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3536
3537         if (led_on) {
3538                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3539                 duration = rte_cpu_to_le_16(500);
3540         }
3541         req.port_id = bp->pf.port_id;
3542         req.num_leds = bp->num_leds;
3543         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3544         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3545                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3546                 led_cfg->led_id = bp->leds[i].led_id;
3547                 led_cfg->led_state = led_state;
3548                 led_cfg->led_blink_on = duration;
3549                 led_cfg->led_blink_off = duration;
3550                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3551         }
3552
3553         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3554
3555         HWRM_CHECK_RESULT();
3556         HWRM_UNLOCK();
3557
3558         return rc;
3559 }
3560
3561 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3562                                uint32_t *length)
3563 {
3564         int rc;
3565         struct hwrm_nvm_get_dir_info_input req = {0};
3566         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3567
3568         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3569
3570         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3571
3572         HWRM_CHECK_RESULT();
3573
3574         *entries = rte_le_to_cpu_32(resp->entries);
3575         *length = rte_le_to_cpu_32(resp->entry_length);
3576
3577         HWRM_UNLOCK();
3578         return rc;
3579 }
3580
3581 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3582 {
3583         int rc;
3584         uint32_t dir_entries;
3585         uint32_t entry_length;
3586         uint8_t *buf;
3587         size_t buflen;
3588         rte_iova_t dma_handle;
3589         struct hwrm_nvm_get_dir_entries_input req = {0};
3590         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3591
3592         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3593         if (rc != 0)
3594                 return rc;
3595
3596         *data++ = dir_entries;
3597         *data++ = entry_length;
3598         len -= 2;
3599         memset(data, 0xff, len);
3600
3601         buflen = dir_entries * entry_length;
3602         buf = rte_malloc("nvm_dir", buflen, 0);
3603         rte_mem_lock_page(buf);
3604         if (buf == NULL)
3605                 return -ENOMEM;
3606         dma_handle = rte_mem_virt2iova(buf);
3607         if (dma_handle == RTE_BAD_IOVA) {
3608                 PMD_DRV_LOG(ERR,
3609                         "unable to map response address to physical memory\n");
3610                 return -ENOMEM;
3611         }
3612         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3613         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3615
3616         if (rc == 0)
3617                 memcpy(data, buf, len > buflen ? buflen : len);
3618
3619         rte_free(buf);
3620         HWRM_CHECK_RESULT();
3621         HWRM_UNLOCK();
3622
3623         return rc;
3624 }
3625
3626 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3627                              uint32_t offset, uint32_t length,
3628                              uint8_t *data)
3629 {
3630         int rc;
3631         uint8_t *buf;
3632         rte_iova_t dma_handle;
3633         struct hwrm_nvm_read_input req = {0};
3634         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3635
3636         buf = rte_malloc("nvm_item", length, 0);
3637         rte_mem_lock_page(buf);
3638         if (!buf)
3639                 return -ENOMEM;
3640
3641         dma_handle = rte_mem_virt2iova(buf);
3642         if (dma_handle == RTE_BAD_IOVA) {
3643                 PMD_DRV_LOG(ERR,
3644                         "unable to map response address to physical memory\n");
3645                 return -ENOMEM;
3646         }
3647         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3648         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3649         req.dir_idx = rte_cpu_to_le_16(index);
3650         req.offset = rte_cpu_to_le_32(offset);
3651         req.len = rte_cpu_to_le_32(length);
3652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3653         if (rc == 0)
3654                 memcpy(data, buf, length);
3655
3656         rte_free(buf);
3657         HWRM_CHECK_RESULT();
3658         HWRM_UNLOCK();
3659
3660         return rc;
3661 }
3662
3663 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3664 {
3665         int rc;
3666         struct hwrm_nvm_erase_dir_entry_input req = {0};
3667         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3668
3669         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3670         req.dir_idx = rte_cpu_to_le_16(index);
3671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3672         HWRM_CHECK_RESULT();
3673         HWRM_UNLOCK();
3674
3675         return rc;
3676 }
3677
3678
3679 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3680                           uint16_t dir_ordinal, uint16_t dir_ext,
3681                           uint16_t dir_attr, const uint8_t *data,
3682                           size_t data_len)
3683 {
3684         int rc;
3685         struct hwrm_nvm_write_input req = {0};
3686         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3687         rte_iova_t dma_handle;
3688         uint8_t *buf;
3689
3690         buf = rte_malloc("nvm_write", data_len, 0);
3691         rte_mem_lock_page(buf);
3692         if (!buf)
3693                 return -ENOMEM;
3694
3695         dma_handle = rte_mem_virt2iova(buf);
3696         if (dma_handle == RTE_BAD_IOVA) {
3697                 PMD_DRV_LOG(ERR,
3698                         "unable to map response address to physical memory\n");
3699                 return -ENOMEM;
3700         }
3701         memcpy(buf, data, data_len);
3702
3703         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3704
3705         req.dir_type = rte_cpu_to_le_16(dir_type);
3706         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3707         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3708         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3709         req.dir_data_length = rte_cpu_to_le_32(data_len);
3710         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3711
3712         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3713
3714         rte_free(buf);
3715         HWRM_CHECK_RESULT();
3716         HWRM_UNLOCK();
3717
3718         return rc;
3719 }
3720
3721 static void
3722 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3723 {
3724         uint32_t *count = cbdata;
3725
3726         *count = *count + 1;
3727 }
3728
3729 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3730                                      struct bnxt_vnic_info *vnic __rte_unused)
3731 {
3732         return 0;
3733 }
3734
3735 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3736 {
3737         uint32_t count = 0;
3738
3739         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3740             &count, bnxt_vnic_count_hwrm_stub);
3741
3742         return count;
3743 }
3744
3745 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3746                                         uint16_t *vnic_ids)
3747 {
3748         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3749         struct hwrm_func_vf_vnic_ids_query_output *resp =
3750                                                 bp->hwrm_cmd_resp_addr;
3751         int rc;
3752
3753         /* First query all VNIC ids */
3754         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3755
3756         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3757         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3758         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3759
3760         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3761                 HWRM_UNLOCK();
3762                 PMD_DRV_LOG(ERR,
3763                 "unable to map VNIC ID table address to physical memory\n");
3764                 return -ENOMEM;
3765         }
3766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3767         HWRM_CHECK_RESULT();
3768         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3769
3770         HWRM_UNLOCK();
3771
3772         return rc;
3773 }
3774
3775 /*
3776  * This function queries the VNIC IDs  for a specified VF. It then calls
3777  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3778  * Then it calls the hwrm_cb function to program this new vnic configuration.
3779  */
3780 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3781         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3782         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3783 {
3784         struct bnxt_vnic_info vnic;
3785         int rc = 0;
3786         int i, num_vnic_ids;
3787         uint16_t *vnic_ids;
3788         size_t vnic_id_sz;
3789         size_t sz;
3790
3791         /* First query all VNIC ids */
3792         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3793         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3794                         RTE_CACHE_LINE_SIZE);
3795         if (vnic_ids == NULL)
3796                 return -ENOMEM;
3797
3798         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3799                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3800
3801         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3802
3803         if (num_vnic_ids < 0)
3804                 return num_vnic_ids;
3805
3806         /* Retrieve VNIC, update bd_stall then update */
3807
3808         for (i = 0; i < num_vnic_ids; i++) {
3809                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3810                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3811                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3812                 if (rc)
3813                         break;
3814                 if (vnic.mru <= 4)      /* Indicates unallocated */
3815                         continue;
3816
3817                 vnic_cb(&vnic, cbdata);
3818
3819                 rc = hwrm_cb(bp, &vnic);
3820                 if (rc)
3821                         break;
3822         }
3823
3824         rte_free(vnic_ids);
3825
3826         return rc;
3827 }
3828
3829 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3830                                               bool on)
3831 {
3832         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3833         struct hwrm_func_cfg_input req = {0};
3834         int rc;
3835
3836         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3837
3838         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3839         req.enables |= rte_cpu_to_le_32(
3840                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3841         req.vlan_antispoof_mode = on ?
3842                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3843                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3844         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3845
3846         HWRM_CHECK_RESULT();
3847         HWRM_UNLOCK();
3848
3849         return rc;
3850 }
3851
3852 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3853 {
3854         struct bnxt_vnic_info vnic;
3855         uint16_t *vnic_ids;
3856         size_t vnic_id_sz;
3857         int num_vnic_ids, i;
3858         size_t sz;
3859         int rc;
3860
3861         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3862         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3863                         RTE_CACHE_LINE_SIZE);
3864         if (vnic_ids == NULL)
3865                 return -ENOMEM;
3866
3867         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3868                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3869
3870         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3871         if (rc <= 0)
3872                 goto exit;
3873         num_vnic_ids = rc;
3874
3875         /*
3876          * Loop through to find the default VNIC ID.
3877          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3878          * by sending the hwrm_func_qcfg command to the firmware.
3879          */
3880         for (i = 0; i < num_vnic_ids; i++) {
3881                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3882                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3883                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3884                                         bp->pf.first_vf_id + vf);
3885                 if (rc)
3886                         goto exit;
3887                 if (vnic.func_default) {
3888                         rte_free(vnic_ids);
3889                         return vnic.fw_vnic_id;
3890                 }
3891         }
3892         /* Could not find a default VNIC. */
3893         PMD_DRV_LOG(ERR, "No default VNIC\n");
3894 exit:
3895         rte_free(vnic_ids);
3896         return rc;
3897 }
3898
3899 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3900                          uint16_t dst_id,
3901                          struct bnxt_filter_info *filter)
3902 {
3903         int rc = 0;
3904         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3905         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3906         uint32_t enables = 0;
3907
3908         if (filter->fw_em_filter_id != UINT64_MAX)
3909                 bnxt_hwrm_clear_em_filter(bp, filter);
3910
3911         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3912
3913         req.flags = rte_cpu_to_le_32(filter->flags);
3914
3915         enables = filter->enables |
3916               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3917         req.dst_id = rte_cpu_to_le_16(dst_id);
3918
3919         if (filter->ip_addr_type) {
3920                 req.ip_addr_type = filter->ip_addr_type;
3921                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3922         }
3923         if (enables &
3924             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3925                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3926         if (enables &
3927             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3928                 memcpy(req.src_macaddr, filter->src_macaddr,
3929                        RTE_ETHER_ADDR_LEN);
3930         if (enables &
3931             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3932                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3933                        RTE_ETHER_ADDR_LEN);
3934         if (enables &
3935             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3936                 req.ovlan_vid = filter->l2_ovlan;
3937         if (enables &
3938             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3939                 req.ivlan_vid = filter->l2_ivlan;
3940         if (enables &
3941             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3942                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3943         if (enables &
3944             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3945                 req.ip_protocol = filter->ip_protocol;
3946         if (enables &
3947             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3948                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3949         if (enables &
3950             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3951                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3952         if (enables &
3953             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3954                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3955         if (enables &
3956             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3957                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3958         if (enables &
3959             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3960                 req.mirror_vnic_id = filter->mirror_vnic_id;
3961
3962         req.enables = rte_cpu_to_le_32(enables);
3963
3964         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3965
3966         HWRM_CHECK_RESULT();
3967
3968         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3969         HWRM_UNLOCK();
3970
3971         return rc;
3972 }
3973
3974 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3975 {
3976         int rc = 0;
3977         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3978         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3979
3980         if (filter->fw_em_filter_id == UINT64_MAX)
3981                 return 0;
3982
3983         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3984         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3985
3986         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3987
3988         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3989
3990         HWRM_CHECK_RESULT();
3991         HWRM_UNLOCK();
3992
3993         filter->fw_em_filter_id = UINT64_MAX;
3994         filter->fw_l2_filter_id = UINT64_MAX;
3995
3996         return 0;
3997 }
3998
3999 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4000                          uint16_t dst_id,
4001                          struct bnxt_filter_info *filter)
4002 {
4003         int rc = 0;
4004         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4005         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4006                                                 bp->hwrm_cmd_resp_addr;
4007         uint32_t enables = 0;
4008
4009         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4010                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4011
4012         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4013
4014         req.flags = rte_cpu_to_le_32(filter->flags);
4015
4016         enables = filter->enables |
4017               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4018         req.dst_id = rte_cpu_to_le_16(dst_id);
4019
4020
4021         if (filter->ip_addr_type) {
4022                 req.ip_addr_type = filter->ip_addr_type;
4023                 enables |=
4024                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4025         }
4026         if (enables &
4027             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4028                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4029         if (enables &
4030             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4031                 memcpy(req.src_macaddr, filter->src_macaddr,
4032                        RTE_ETHER_ADDR_LEN);
4033         //if (enables &
4034             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4035                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4036                        //RTE_ETHER_ADDR_LEN);
4037         if (enables &
4038             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4039                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4040         if (enables &
4041             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4042                 req.ip_protocol = filter->ip_protocol;
4043         if (enables &
4044             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4045                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4046         if (enables &
4047             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4048                 req.src_ipaddr_mask[0] =
4049                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4050         if (enables &
4051             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4052                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4053         if (enables &
4054             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4055                 req.dst_ipaddr_mask[0] =
4056                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4057         if (enables &
4058             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4059                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4060         if (enables &
4061             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4062                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4063         if (enables &
4064             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4065                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4066         if (enables &
4067             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4068                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4069         if (enables &
4070             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4071                 req.mirror_vnic_id = filter->mirror_vnic_id;
4072
4073         req.enables = rte_cpu_to_le_32(enables);
4074
4075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4076
4077         HWRM_CHECK_RESULT();
4078
4079         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4080         HWRM_UNLOCK();
4081
4082         return rc;
4083 }
4084
4085 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4086                                 struct bnxt_filter_info *filter)
4087 {
4088         int rc = 0;
4089         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4090         struct hwrm_cfa_ntuple_filter_free_output *resp =
4091                                                 bp->hwrm_cmd_resp_addr;
4092
4093         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4094                 return 0;
4095
4096         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4097
4098         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4099
4100         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4101
4102         HWRM_CHECK_RESULT();
4103         HWRM_UNLOCK();
4104
4105         filter->fw_ntuple_filter_id = UINT64_MAX;
4106
4107         return 0;
4108 }
4109
4110 static int
4111 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4112 {
4113         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4114         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4115         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4116         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4117         uint16_t *ring_tbl = vnic->rss_table;
4118         int nr_ctxs = vnic->num_lb_ctxts;
4119         int max_rings = bp->rx_nr_rings;
4120         int i, j, k, cnt;
4121         int rc = 0;
4122
4123         for (i = 0, k = 0; i < nr_ctxs; i++) {
4124                 struct bnxt_rx_ring_info *rxr;
4125                 struct bnxt_cp_ring_info *cpr;
4126
4127                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4128
4129                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4130                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4131                 req.hash_mode_flags = vnic->hash_mode;
4132
4133                 req.ring_grp_tbl_addr =
4134                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4135                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4136                                      2 * sizeof(*ring_tbl));
4137                 req.hash_key_tbl_addr =
4138                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4139
4140                 req.ring_table_pair_index = i;
4141                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4142
4143                 for (j = 0; j < 64; j++) {
4144                         uint16_t ring_id;
4145
4146                         /* Find next active ring. */
4147                         for (cnt = 0; cnt < max_rings; cnt++) {
4148                                 if (rx_queue_state[k] !=
4149                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4150                                         break;
4151                                 if (++k == max_rings)
4152                                         k = 0;
4153                         }
4154
4155                         /* Return if no rings are active. */
4156                         if (cnt == max_rings)
4157                                 return 0;
4158
4159                         /* Add rx/cp ring pair to RSS table. */
4160                         rxr = rxqs[k]->rx_ring;
4161                         cpr = rxqs[k]->cp_ring;
4162
4163                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4164                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4165                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4166                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4167
4168                         if (++k == max_rings)
4169                                 k = 0;
4170                 }
4171                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4172                                             BNXT_USE_CHIMP_MB);
4173
4174                 HWRM_CHECK_RESULT();
4175                 HWRM_UNLOCK();
4176         }
4177
4178         return rc;
4179 }
4180
4181 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4182 {
4183         unsigned int rss_idx, fw_idx, i;
4184
4185         if (!(vnic->rss_table && vnic->hash_type))
4186                 return 0;
4187
4188         if (BNXT_CHIP_THOR(bp))
4189                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4190
4191         /*
4192          * Fill the RSS hash & redirection table with
4193          * ring group ids for all VNICs
4194          */
4195         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4196                 rss_idx++, fw_idx++) {
4197                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4198                         fw_idx %= bp->rx_cp_nr_rings;
4199                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4200                                 break;
4201                         fw_idx++;
4202                 }
4203                 if (i == bp->rx_cp_nr_rings)
4204                         return 0;
4205                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4206         }
4207         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4208 }
4209
4210 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4211         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4212 {
4213         uint16_t flags;
4214
4215         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4216
4217         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4218         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4219
4220         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4221         req->num_cmpl_dma_aggr_during_int =
4222                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4223
4224         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4225
4226         /* min timer set to 1/2 of interrupt timer */
4227         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4228
4229         /* buf timer set to 1/4 of interrupt timer */
4230         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4231
4232         req->cmpl_aggr_dma_tmr_during_int =
4233                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4234
4235         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4236                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4237         req->flags = rte_cpu_to_le_16(flags);
4238 }
4239
4240 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4241                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4242 {
4243         struct hwrm_ring_aggint_qcaps_input req = {0};
4244         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4245         uint32_t enables;
4246         uint16_t flags;
4247         int rc;
4248
4249         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4251         HWRM_CHECK_RESULT();
4252
4253         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4254         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4255
4256         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4257                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4258         agg_req->flags = rte_cpu_to_le_16(flags);
4259         enables =
4260          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4261          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4262         agg_req->enables = rte_cpu_to_le_32(enables);
4263
4264         HWRM_UNLOCK();
4265         return rc;
4266 }
4267
4268 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4269                         struct bnxt_coal *coal, uint16_t ring_id)
4270 {
4271         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4272         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4273                                                 bp->hwrm_cmd_resp_addr;
4274         int rc;
4275
4276         /* Set ring coalesce parameters only for 100G NICs */
4277         if (BNXT_CHIP_THOR(bp)) {
4278                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4279                         return -1;
4280         } else if (bnxt_stratus_device(bp)) {
4281                 bnxt_hwrm_set_coal_params(coal, &req);
4282         } else {
4283                 return 0;
4284         }
4285
4286         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4287         req.ring_id = rte_cpu_to_le_16(ring_id);
4288         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4289         HWRM_CHECK_RESULT();
4290         HWRM_UNLOCK();
4291         return 0;
4292 }
4293
4294 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4295 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4296 {
4297         struct hwrm_func_backing_store_qcaps_input req = {0};
4298         struct hwrm_func_backing_store_qcaps_output *resp =
4299                 bp->hwrm_cmd_resp_addr;
4300         int rc;
4301
4302         if (!BNXT_CHIP_THOR(bp) ||
4303             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4304             BNXT_VF(bp) ||
4305             bp->ctx)
4306                 return 0;
4307
4308         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4310         HWRM_CHECK_RESULT_SILENT();
4311
4312         if (!rc) {
4313                 struct bnxt_ctx_pg_info *ctx_pg;
4314                 struct bnxt_ctx_mem_info *ctx;
4315                 int total_alloc_len;
4316                 int i;
4317
4318                 total_alloc_len = sizeof(*ctx);
4319                 ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
4320                                  RTE_CACHE_LINE_SIZE);
4321                 if (!ctx) {
4322                         rc = -ENOMEM;
4323                         goto ctx_err;
4324                 }
4325                 memset(ctx, 0, total_alloc_len);
4326
4327                 ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4328                                     sizeof(*ctx_pg) * BNXT_MAX_Q,
4329                                     RTE_CACHE_LINE_SIZE);
4330                 if (!ctx_pg) {
4331                         rc = -ENOMEM;
4332                         goto ctx_err;
4333                 }
4334                 for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4335                         ctx->tqm_mem[i] = ctx_pg;
4336
4337                 bp->ctx = ctx;
4338                 ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4339                 ctx->qp_min_qp1_entries =
4340                         rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4341                 ctx->qp_max_l2_entries =
4342                         rte_le_to_cpu_16(resp->qp_max_l2_entries);
4343                 ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4344                 ctx->srq_max_l2_entries =
4345                         rte_le_to_cpu_16(resp->srq_max_l2_entries);
4346                 ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4347                 ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4348                 ctx->cq_max_l2_entries =
4349                         rte_le_to_cpu_16(resp->cq_max_l2_entries);
4350                 ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4351                 ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4352                 ctx->vnic_max_vnic_entries =
4353                         rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4354                 ctx->vnic_max_ring_table_entries =
4355                         rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4356                 ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4357                 ctx->stat_max_entries =
4358                         rte_le_to_cpu_32(resp->stat_max_entries);
4359                 ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4360                 ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4361                 ctx->tqm_min_entries_per_ring =
4362                         rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4363                 ctx->tqm_max_entries_per_ring =
4364                         rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4365                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4366                 if (!ctx->tqm_entries_multiple)
4367                         ctx->tqm_entries_multiple = 1;
4368                 ctx->mrav_max_entries =
4369                         rte_le_to_cpu_32(resp->mrav_max_entries);
4370                 ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4371                 ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4372                 ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4373         } else {
4374                 rc = 0;
4375         }
4376 ctx_err:
4377         HWRM_UNLOCK();
4378         return rc;
4379 }
4380
4381 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4382 {
4383         struct hwrm_func_backing_store_cfg_input req = {0};
4384         struct hwrm_func_backing_store_cfg_output *resp =
4385                 bp->hwrm_cmd_resp_addr;
4386         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4387         struct bnxt_ctx_pg_info *ctx_pg;
4388         uint32_t *num_entries;
4389         uint64_t *pg_dir;
4390         uint8_t *pg_attr;
4391         uint32_t ena;
4392         int i, rc;
4393
4394         if (!ctx)
4395                 return 0;
4396
4397         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4398         req.enables = rte_cpu_to_le_32(enables);
4399
4400         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4401                 ctx_pg = &ctx->qp_mem;
4402                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4403                 req.qp_num_qp1_entries =
4404                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4405                 req.qp_num_l2_entries =
4406                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4407                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4408                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4409                                       &req.qpc_pg_size_qpc_lvl,
4410                                       &req.qpc_page_dir);
4411         }
4412
4413         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4414                 ctx_pg = &ctx->srq_mem;
4415                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4416                 req.srq_num_l2_entries =
4417                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4418                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4419                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4420                                       &req.srq_pg_size_srq_lvl,
4421                                       &req.srq_page_dir);
4422         }
4423
4424         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4425                 ctx_pg = &ctx->cq_mem;
4426                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4427                 req.cq_num_l2_entries =
4428                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4429                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4430                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4431                                       &req.cq_pg_size_cq_lvl,
4432                                       &req.cq_page_dir);
4433         }
4434
4435         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4436                 ctx_pg = &ctx->vnic_mem;
4437                 req.vnic_num_vnic_entries =
4438                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4439                 req.vnic_num_ring_table_entries =
4440                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4441                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4442                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4443                                       &req.vnic_pg_size_vnic_lvl,
4444                                       &req.vnic_page_dir);
4445         }
4446
4447         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4448                 ctx_pg = &ctx->stat_mem;
4449                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4450                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4451                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4452                                       &req.stat_pg_size_stat_lvl,
4453                                       &req.stat_page_dir);
4454         }
4455
4456         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4457         num_entries = &req.tqm_sp_num_entries;
4458         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4459         pg_dir = &req.tqm_sp_page_dir;
4460         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4461         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4462                 if (!(enables & ena))
4463                         continue;
4464
4465                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4466
4467                 ctx_pg = ctx->tqm_mem[i];
4468                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4469                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4470         }
4471
4472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4473         HWRM_CHECK_RESULT();
4474         HWRM_UNLOCK();
4475
4476         return rc;
4477 }
4478
4479 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4480 {
4481         struct hwrm_port_qstats_ext_input req = {0};
4482         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4483         struct bnxt_pf_info *pf = &bp->pf;
4484         int rc;
4485
4486         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4487               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4488                 return 0;
4489
4490         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4491
4492         req.port_id = rte_cpu_to_le_16(pf->port_id);
4493         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4494                 req.tx_stat_host_addr =
4495                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4496                 req.tx_stat_size =
4497                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4498         }
4499         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4500                 req.rx_stat_host_addr =
4501                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4502                 req.rx_stat_size =
4503                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4504         }
4505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4506
4507         if (rc) {
4508                 bp->fw_rx_port_stats_ext_size = 0;
4509                 bp->fw_tx_port_stats_ext_size = 0;
4510         } else {
4511                 bp->fw_rx_port_stats_ext_size =
4512                         rte_le_to_cpu_16(resp->rx_stat_size);
4513                 bp->fw_tx_port_stats_ext_size =
4514                         rte_le_to_cpu_16(resp->tx_stat_size);
4515         }
4516
4517         HWRM_CHECK_RESULT();
4518         HWRM_UNLOCK();
4519
4520         return rc;
4521 }
4522
4523 int
4524 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4525 {
4526         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4527         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4528                 bp->hwrm_cmd_resp_addr;
4529         int rc = 0;
4530
4531         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_KONG(bp));
4532         req.tunnel_type = type;
4533         req.dest_fid = bp->fw_fid;
4534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4535         HWRM_CHECK_RESULT();
4536
4537         HWRM_UNLOCK();
4538
4539         return rc;
4540 }
4541
4542 int
4543 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4544 {
4545         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4546         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4547                 bp->hwrm_cmd_resp_addr;
4548         int rc = 0;
4549
4550         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_KONG(bp));
4551         req.tunnel_type = type;
4552         req.dest_fid = bp->fw_fid;
4553         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4554         HWRM_CHECK_RESULT();
4555
4556         HWRM_UNLOCK();
4557
4558         return rc;
4559 }
4560
4561 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4562 {
4563         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4564         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4565                 bp->hwrm_cmd_resp_addr;
4566         int rc = 0;
4567
4568         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_KONG(bp));
4569         req.src_fid = bp->fw_fid;
4570         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4571         HWRM_CHECK_RESULT();
4572
4573         if (type)
4574                 *type = resp->tunnel_mask;
4575
4576         HWRM_UNLOCK();
4577
4578         return rc;
4579 }
4580
4581 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4582                                    uint16_t *dst_fid)
4583 {
4584         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4585         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4586                 bp->hwrm_cmd_resp_addr;
4587         int rc = 0;
4588
4589         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_KONG(bp));
4590         req.src_fid = bp->fw_fid;
4591         req.tunnel_type = tun_type;
4592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4593         HWRM_CHECK_RESULT();
4594
4595         if (dst_fid)
4596                 *dst_fid = resp->dest_fid;
4597
4598         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4599
4600         HWRM_UNLOCK();
4601
4602         return rc;
4603 }
4604
4605 int bnxt_hwrm_set_mac(struct bnxt *bp)
4606 {
4607         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4608         struct hwrm_func_vf_cfg_input req = {0};
4609         int rc = 0;
4610
4611         if (!BNXT_VF(bp))
4612                 return 0;
4613
4614         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4615
4616         req.enables =
4617                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4618         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4619
4620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4621
4622         HWRM_CHECK_RESULT();
4623
4624         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4625         HWRM_UNLOCK();
4626
4627         return rc;
4628 }