net/bnxt: support FW reset
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                6000000
30 #define HWRM_SHORT_CMD_TIMEOUT          50000
31 #define HWRM_SPEC_CODE_1_8_3            0x10803
32 #define HWRM_VERSION_1_9_1              0x10901
33 #define HWRM_VERSION_1_9_2              0x10903
34
35 struct bnxt_plcmodes_cfg {
36         uint32_t        flags;
37         uint16_t        jumbo_thresh;
38         uint16_t        hds_offset;
39         uint16_t        hds_threshold;
40 };
41
42 static int page_getenum(size_t size)
43 {
44         if (size <= 1 << 4)
45                 return 4;
46         if (size <= 1 << 12)
47                 return 12;
48         if (size <= 1 << 13)
49                 return 13;
50         if (size <= 1 << 16)
51                 return 16;
52         if (size <= 1 << 21)
53                 return 21;
54         if (size <= 1 << 22)
55                 return 22;
56         if (size <= 1 << 30)
57                 return 30;
58         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
59         return sizeof(void *) * 8 - 1;
60 }
61
62 static int page_roundup(size_t size)
63 {
64         return 1 << page_getenum(size);
65 }
66
67 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
68                                   uint8_t *pg_attr,
69                                   uint64_t *pg_dir)
70 {
71         if (rmem->nr_pages > 1) {
72                 *pg_attr = 1;
73                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
74         } else {
75                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76         }
77 }
78
79 /*
80  * HWRM Functions (sent to HWRM)
81  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
82  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
83  * command was failed by the ChiMP.
84  */
85
86 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
87                                   uint32_t msg_len, bool use_kong_mb)
88 {
89         unsigned int i;
90         struct input *req = msg;
91         struct output *resp = bp->hwrm_cmd_resp_addr;
92         uint32_t *data = msg;
93         uint8_t *bar;
94         uint8_t *valid;
95         uint16_t max_req_len = bp->max_req_len;
96         struct hwrm_short_input short_input = { 0 };
97         uint16_t bar_offset = use_kong_mb ?
98                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
99         uint16_t mb_trigger_offset = use_kong_mb ?
100                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
101         uint32_t timeout;
102
103         /* Do not send HWRM commands to firmware in error state */
104         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
105                 return 0;
106
107         /* For VER_GET command, set timeout as 50ms */
108         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
109                 timeout = HWRM_SHORT_CMD_TIMEOUT;
110         else
111                 timeout = HWRM_CMD_TIMEOUT;
112
113         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
114             msg_len > bp->max_req_len) {
115                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
116
117                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
118                 memcpy(short_cmd_req, req, msg_len);
119
120                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
121                 short_input.signature = rte_cpu_to_le_16(
122                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
123                 short_input.size = rte_cpu_to_le_16(msg_len);
124                 short_input.req_addr =
125                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
126
127                 data = (uint32_t *)&short_input;
128                 msg_len = sizeof(short_input);
129
130                 /* Sync memory write before updating doorbell */
131                 rte_wmb();
132
133                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
134         }
135
136         /* Write request msg to hwrm channel */
137         for (i = 0; i < msg_len; i += 4) {
138                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
139                 rte_write32(*data, bar);
140                 data++;
141         }
142
143         /* Zero the rest of the request space */
144         for (; i < max_req_len; i += 4) {
145                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
146                 rte_write32(0, bar);
147         }
148
149         /* Ring channel doorbell */
150         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
151         rte_write32(1, bar);
152
153         /* Poll for the valid bit */
154         for (i = 0; i < timeout; i++) {
155                 /* Sanity check on the resp->resp_len */
156                 rte_rmb();
157                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
158                         /* Last byte of resp contains the valid key */
159                         valid = (uint8_t *)resp + resp->resp_len - 1;
160                         if (*valid == HWRM_RESP_VALID_KEY)
161                                 break;
162                 }
163                 rte_delay_us(1);
164         }
165
166         if (i >= timeout) {
167                 /* Suppress VER_GET timeout messages during reset recovery */
168                 if (bp->flags & BNXT_FLAG_FW_RESET &&
169                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
170                         return -ETIMEDOUT;
171
172                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
173                             req->req_type);
174                 return -ETIMEDOUT;
175         }
176         return 0;
177 }
178
179 /*
180  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
181  * spinlock, and does initial processing.
182  *
183  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
184  * releases the spinlock only if it returns.  If the regular int return codes
185  * are not used by the function, HWRM_CHECK_RESULT() should not be used
186  * directly, rather it should be copied and modified to suit the function.
187  *
188  * HWRM_UNLOCK() must be called after all response processing is completed.
189  */
190 #define HWRM_PREP(req, type, kong) do { \
191         rte_spinlock_lock(&bp->hwrm_lock); \
192         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
194         req.cmpl_ring = rte_cpu_to_le_16(-1); \
195         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
197         req.target_id = rte_cpu_to_le_16(0xffff); \
198         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
199 } while (0)
200
201 #define HWRM_CHECK_RESULT_SILENT() do {\
202         if (rc) { \
203                 rte_spinlock_unlock(&bp->hwrm_lock); \
204                 return rc; \
205         } \
206         if (resp->error_code) { \
207                 rc = rte_le_to_cpu_16(resp->error_code); \
208                 rte_spinlock_unlock(&bp->hwrm_lock); \
209                 return rc; \
210         } \
211 } while (0)
212
213 #define HWRM_CHECK_RESULT() do {\
214         if (rc) { \
215                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218                         rc = -EACCES; \
219                 else if (rc > 0) \
220                         rc = -EINVAL; \
221                 return rc; \
222         } \
223         if (resp->error_code) { \
224                 rc = rte_le_to_cpu_16(resp->error_code); \
225                 if (resp->resp_len >= 16) { \
226                         struct hwrm_err_output *tmp_hwrm_err_op = \
227                                                 (void *)resp; \
228                         PMD_DRV_LOG(ERR, \
229                                 "error %d:%d:%08x:%04x\n", \
230                                 rc, tmp_hwrm_err_op->cmd_err, \
231                                 rte_le_to_cpu_32(\
232                                         tmp_hwrm_err_op->opaque_0), \
233                                 rte_le_to_cpu_16(\
234                                         tmp_hwrm_err_op->opaque_1)); \
235                 } else { \
236                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
237                 } \
238                 rte_spinlock_unlock(&bp->hwrm_lock); \
239                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
240                         rc = -EACCES; \
241                 else if (rc > 0) \
242                         rc = -EINVAL; \
243                 return rc; \
244         } \
245 } while (0)
246
247 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
248
249 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
250 {
251         int rc = 0;
252         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
253         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257         req.mask = 0;
258
259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
260
261         HWRM_CHECK_RESULT();
262         HWRM_UNLOCK();
263
264         return rc;
265 }
266
267 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
268                                  struct bnxt_vnic_info *vnic,
269                                  uint16_t vlan_count,
270                                  struct bnxt_vlan_table_entry *vlan_table)
271 {
272         int rc = 0;
273         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
274         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
275         uint32_t mask = 0;
276
277         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
278                 return rc;
279
280         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
281         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
282
283         /* FIXME add multicast flag, when multicast adding options is supported
284          * by ethtool.
285          */
286         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
287                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
288         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
289                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
290         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
291                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
292         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
293                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
294         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
295                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
296         if (vnic->mc_addr_cnt) {
297                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
298                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
299                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
300         }
301         if (vlan_table) {
302                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
303                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
304                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
305                          rte_mem_virt2iova(vlan_table));
306                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
307         }
308         req.mask = rte_cpu_to_le_32(mask);
309
310         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
311
312         HWRM_CHECK_RESULT();
313         HWRM_UNLOCK();
314
315         return rc;
316 }
317
318 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
319                         uint16_t vlan_count,
320                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
321 {
322         int rc = 0;
323         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
324         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
325                                                 bp->hwrm_cmd_resp_addr;
326
327         /*
328          * Older HWRM versions did not support this command, and the set_rx_mask
329          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
330          * removed from set_rx_mask call, and this command was added.
331          *
332          * This command is also present from 1.7.8.11 and higher,
333          * as well as 1.7.8.0
334          */
335         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
336                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
337                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
338                                         (11)))
339                                 return 0;
340                 }
341         }
342         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
343         req.fid = rte_cpu_to_le_16(fid);
344
345         req.vlan_tag_mask_tbl_addr =
346                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
347         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
348
349         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
350
351         HWRM_CHECK_RESULT();
352         HWRM_UNLOCK();
353
354         return rc;
355 }
356
357 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
358                            struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
363
364         if (filter->fw_l2_filter_id == UINT64_MAX)
365                 return 0;
366
367         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
368
369         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
370
371         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
372
373         HWRM_CHECK_RESULT();
374         HWRM_UNLOCK();
375
376         filter->fw_l2_filter_id = UINT64_MAX;
377
378         return 0;
379 }
380
381 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
382                          uint16_t dst_id,
383                          struct bnxt_filter_info *filter)
384 {
385         int rc = 0;
386         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
387         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
388         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
389         const struct rte_eth_vmdq_rx_conf *conf =
390                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
391         uint32_t enables = 0;
392         uint16_t j = dst_id - 1;
393
394         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
395         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
396             conf->pool_map[j].pools & (1UL << j)) {
397                 PMD_DRV_LOG(DEBUG,
398                         "Add vlan %u to vmdq pool %u\n",
399                         conf->pool_map[j].vlan_id, j);
400
401                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
402                 filter->enables |=
403                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
404                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
405         }
406
407         if (filter->fw_l2_filter_id != UINT64_MAX)
408                 bnxt_hwrm_clear_l2_filter(bp, filter);
409
410         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
411
412         req.flags = rte_cpu_to_le_32(filter->flags);
413         req.flags |=
414         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
415
416         enables = filter->enables |
417               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
418         req.dst_id = rte_cpu_to_le_16(dst_id);
419
420         if (enables &
421             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
422                 memcpy(req.l2_addr, filter->l2_addr,
423                        RTE_ETHER_ADDR_LEN);
424         if (enables &
425             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
426                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
427                        RTE_ETHER_ADDR_LEN);
428         if (enables &
429             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
430                 req.l2_ovlan = filter->l2_ovlan;
431         if (enables &
432             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
433                 req.l2_ivlan = filter->l2_ivlan;
434         if (enables &
435             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
436                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
437         if (enables &
438             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
439                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
440         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
441                 req.src_id = rte_cpu_to_le_32(filter->src_id);
442         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
443                 req.src_type = filter->src_type;
444
445         req.enables = rte_cpu_to_le_32(enables);
446
447         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
448
449         HWRM_CHECK_RESULT();
450
451         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
452         HWRM_UNLOCK();
453
454         return rc;
455 }
456
457 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
458 {
459         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
460         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
461         uint32_t flags = 0;
462         int rc;
463
464         if (!ptp)
465                 return 0;
466
467         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
468
469         if (ptp->rx_filter)
470                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
471         else
472                 flags |=
473                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
474         if (ptp->tx_tstamp_en)
475                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
476         else
477                 flags |=
478                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
479         req.flags = rte_cpu_to_le_32(flags);
480         req.enables = rte_cpu_to_le_32
481                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
482         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
483
484         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
485         HWRM_UNLOCK();
486
487         return rc;
488 }
489
490 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
491 {
492         int rc = 0;
493         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
494         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
495         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
496
497 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
498         if (ptp)
499                 return 0;
500
501         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
502
503         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
504
505         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
506
507         HWRM_CHECK_RESULT();
508
509         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
510                 return 0;
511
512         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
513         if (!ptp)
514                 return -ENOMEM;
515
516         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
517                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
518         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
519                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
520         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
521                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
522         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
523                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
524         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
525                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
526         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
527                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
528         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
529                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
530         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
531                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
532         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
533                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
534
535         ptp->bp = bp;
536         bp->ptp_cfg = ptp;
537
538         return 0;
539 }
540
541 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
542 {
543         int rc = 0;
544         struct hwrm_func_qcaps_input req = {.req_type = 0 };
545         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
546         uint16_t new_max_vfs;
547         uint32_t flags;
548         int i;
549
550         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
551
552         req.fid = rte_cpu_to_le_16(0xffff);
553
554         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
555
556         HWRM_CHECK_RESULT();
557
558         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
559         flags = rte_le_to_cpu_32(resp->flags);
560         if (BNXT_PF(bp)) {
561                 bp->pf.port_id = resp->port_id;
562                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
563                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
564                 new_max_vfs = bp->pdev->max_vfs;
565                 if (new_max_vfs != bp->pf.max_vfs) {
566                         if (bp->pf.vf_info)
567                                 rte_free(bp->pf.vf_info);
568                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
569                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
570                         bp->pf.max_vfs = new_max_vfs;
571                         for (i = 0; i < new_max_vfs; i++) {
572                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
573                                 bp->pf.vf_info[i].vlan_table =
574                                         rte_zmalloc("VF VLAN table",
575                                                     getpagesize(),
576                                                     getpagesize());
577                                 if (bp->pf.vf_info[i].vlan_table == NULL)
578                                         PMD_DRV_LOG(ERR,
579                                         "Fail to alloc VLAN table for VF %d\n",
580                                         i);
581                                 else
582                                         rte_mem_lock_page(
583                                                 bp->pf.vf_info[i].vlan_table);
584                                 bp->pf.vf_info[i].vlan_as_table =
585                                         rte_zmalloc("VF VLAN AS table",
586                                                     getpagesize(),
587                                                     getpagesize());
588                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
589                                         PMD_DRV_LOG(ERR,
590                                         "Alloc VLAN AS table for VF %d fail\n",
591                                         i);
592                                 else
593                                         rte_mem_lock_page(
594                                                bp->pf.vf_info[i].vlan_as_table);
595                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
596                         }
597                 }
598         }
599
600         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
601         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
602         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
603         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
604         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
605         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
606         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
607         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
608         bp->max_l2_ctx =
609                 rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows;
610         /* TODO: For now, do not support VMDq/RFS on VFs. */
611         if (BNXT_PF(bp)) {
612                 if (bp->pf.max_vfs)
613                         bp->max_vnics = 1;
614                 else
615                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
616         } else {
617                 bp->max_vnics = 1;
618         }
619         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
620         if (BNXT_PF(bp)) {
621                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
622                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
623                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
624                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
625                         HWRM_UNLOCK();
626                         bnxt_hwrm_ptp_qcfg(bp);
627                 }
628         }
629
630         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
631                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
632
633         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
634                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
635                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
636         } else {
637                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
638         }
639
640         HWRM_UNLOCK();
641
642         return rc;
643 }
644
645 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
646 {
647         int rc;
648
649         rc = __bnxt_hwrm_func_qcaps(bp);
650         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
651                 rc = bnxt_alloc_ctx_mem(bp);
652                 if (rc)
653                         return rc;
654
655                 rc = bnxt_hwrm_func_resc_qcaps(bp);
656                 if (!rc)
657                         bp->flags |= BNXT_FLAG_NEW_RM;
658         }
659
660         return rc;
661 }
662
663 int bnxt_hwrm_func_reset(struct bnxt *bp)
664 {
665         int rc = 0;
666         struct hwrm_func_reset_input req = {.req_type = 0 };
667         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
668
669         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
670
671         req.enables = rte_cpu_to_le_32(0);
672
673         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
674
675         HWRM_CHECK_RESULT();
676         HWRM_UNLOCK();
677
678         return rc;
679 }
680
681 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
682 {
683         int rc;
684         uint32_t flags = 0;
685         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
686         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
687
688         if (bp->flags & BNXT_FLAG_REGISTERED)
689                 return 0;
690
691         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
692         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
693                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
694
695         /* PFs and trusted VFs should indicate the support of the
696          * Master capability on non Stingray platform
697          */
698         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
699                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
700
701         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
702         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
703                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
704         req.ver_maj = RTE_VER_YEAR;
705         req.ver_min = RTE_VER_MONTH;
706         req.ver_upd = RTE_VER_MINOR;
707
708         if (BNXT_PF(bp)) {
709                 req.enables |= rte_cpu_to_le_32(
710                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
711                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
712                        RTE_MIN(sizeof(req.vf_req_fwd),
713                                sizeof(bp->pf.vf_req_fwd)));
714
715                 /*
716                  * PF can sniff HWRM API issued by VF. This can be set up by
717                  * linux driver and inherited by the DPDK PF driver. Clear
718                  * this HWRM sniffer list in FW because DPDK PF driver does
719                  * not support this.
720                  */
721                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
722         }
723
724         req.flags = rte_cpu_to_le_32(flags);
725
726         req.async_event_fwd[0] |=
727                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
728                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
729                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
730                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
731         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
732                 req.async_event_fwd[0] |=
733                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
734         req.async_event_fwd[1] |=
735                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
736                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
737
738         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
739
740         HWRM_CHECK_RESULT();
741
742         flags = rte_le_to_cpu_32(resp->flags);
743         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
744                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
745
746         HWRM_UNLOCK();
747
748         bp->flags |= BNXT_FLAG_REGISTERED;
749
750         return rc;
751 }
752
753 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
754 {
755         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
756                 return 0;
757
758         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
759 }
760
761 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
762 {
763         int rc;
764         uint32_t flags = 0;
765         uint32_t enables;
766         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
767         struct hwrm_func_vf_cfg_input req = {0};
768
769         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
770
771         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
772                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
773                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
774                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
775                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
776
777         if (BNXT_HAS_RING_GRPS(bp)) {
778                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
779                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
780         }
781
782         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
783         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
784                                             AGG_RING_MULTIPLIER);
785         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
786                                              bp->tx_nr_rings +
787                                              BNXT_NUM_ASYNC_CPR(bp));
788         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
789                                               bp->tx_nr_rings +
790                                               BNXT_NUM_ASYNC_CPR(bp));
791         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
792         if (bp->vf_resv_strategy ==
793             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
794                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
795                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
796                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
797                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
798                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
799                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
800         }
801
802         if (test)
803                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
804                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
805                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
806                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
807                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
808                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
809
810         if (test && BNXT_HAS_RING_GRPS(bp))
811                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
812
813         req.flags = rte_cpu_to_le_32(flags);
814         req.enables |= rte_cpu_to_le_32(enables);
815
816         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
817
818         if (test)
819                 HWRM_CHECK_RESULT_SILENT();
820         else
821                 HWRM_CHECK_RESULT();
822
823         HWRM_UNLOCK();
824         return rc;
825 }
826
827 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
828 {
829         int rc;
830         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
831         struct hwrm_func_resource_qcaps_input req = {0};
832
833         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
834         req.fid = rte_cpu_to_le_16(0xffff);
835
836         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
837
838         HWRM_CHECK_RESULT();
839
840         if (BNXT_VF(bp)) {
841                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
842                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
843                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
844                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
845                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
846                 /* func_resource_qcaps does not return max_rx_em_flows.
847                  * So use the value provided by func_qcaps.
848                  */
849                 bp->max_l2_ctx =
850                         rte_le_to_cpu_16(resp->max_l2_ctxs) +
851                         bp->max_rx_em_flows;
852                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
853                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
854         }
855         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
856         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
857         if (bp->vf_resv_strategy >
858             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
859                 bp->vf_resv_strategy =
860                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
861
862         HWRM_UNLOCK();
863         return rc;
864 }
865
866 int bnxt_hwrm_ver_get(struct bnxt *bp)
867 {
868         int rc = 0;
869         struct hwrm_ver_get_input req = {.req_type = 0 };
870         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
871         uint32_t fw_version;
872         uint16_t max_resp_len;
873         char type[RTE_MEMZONE_NAMESIZE];
874         uint32_t dev_caps_cfg;
875
876         bp->max_req_len = HWRM_MAX_REQ_LEN;
877         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
878
879         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
880         req.hwrm_intf_min = HWRM_VERSION_MINOR;
881         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
882
883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
884
885         if (bp->flags & BNXT_FLAG_FW_RESET)
886                 HWRM_CHECK_RESULT_SILENT();
887         else
888                 HWRM_CHECK_RESULT();
889
890         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
891                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
892                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
893                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
894         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
895                      (resp->hwrm_fw_min_8b << 16) |
896                      (resp->hwrm_fw_bld_8b << 8) |
897                      resp->hwrm_fw_rsvd_8b;
898         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
899                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
900
901         fw_version = resp->hwrm_intf_maj_8b << 16;
902         fw_version |= resp->hwrm_intf_min_8b << 8;
903         fw_version |= resp->hwrm_intf_upd_8b;
904         bp->hwrm_spec_code = fw_version;
905
906         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
907                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
908                 rc = -EINVAL;
909                 goto error;
910         }
911
912         if (bp->max_req_len > resp->max_req_win_len) {
913                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
914                 rc = -EINVAL;
915         }
916         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
917         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
918         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
919                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
920
921         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
922         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
923
924         if (bp->max_resp_len != max_resp_len) {
925                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
926                         bp->pdev->addr.domain, bp->pdev->addr.bus,
927                         bp->pdev->addr.devid, bp->pdev->addr.function);
928
929                 rte_free(bp->hwrm_cmd_resp_addr);
930
931                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
932                 if (bp->hwrm_cmd_resp_addr == NULL) {
933                         rc = -ENOMEM;
934                         goto error;
935                 }
936                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
937                 bp->hwrm_cmd_resp_dma_addr =
938                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
939                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
940                         PMD_DRV_LOG(ERR,
941                         "Unable to map response buffer to physical memory.\n");
942                         rc = -ENOMEM;
943                         goto error;
944                 }
945                 bp->max_resp_len = max_resp_len;
946         }
947
948         if ((dev_caps_cfg &
949                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
950             (dev_caps_cfg &
951              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
952                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
953                 bp->flags |= BNXT_FLAG_SHORT_CMD;
954         }
955
956         if (((dev_caps_cfg &
957               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
958              (dev_caps_cfg &
959               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
960             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
961                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
962                         bp->pdev->addr.domain, bp->pdev->addr.bus,
963                         bp->pdev->addr.devid, bp->pdev->addr.function);
964
965                 rte_free(bp->hwrm_short_cmd_req_addr);
966
967                 bp->hwrm_short_cmd_req_addr =
968                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
969                 if (bp->hwrm_short_cmd_req_addr == NULL) {
970                         rc = -ENOMEM;
971                         goto error;
972                 }
973                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
974                 bp->hwrm_short_cmd_req_dma_addr =
975                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
976                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
977                         rte_free(bp->hwrm_short_cmd_req_addr);
978                         PMD_DRV_LOG(ERR,
979                                 "Unable to map buffer to physical memory.\n");
980                         rc = -ENOMEM;
981                         goto error;
982                 }
983         }
984         if (dev_caps_cfg &
985             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
986                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
987                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
988         }
989         if (dev_caps_cfg &
990             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
991                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
992
993 error:
994         HWRM_UNLOCK();
995         return rc;
996 }
997
998 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
999 {
1000         int rc;
1001         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1002         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1003
1004         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1005                 return 0;
1006
1007         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1008         req.flags = flags;
1009
1010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1011
1012         HWRM_CHECK_RESULT();
1013         HWRM_UNLOCK();
1014
1015         return rc;
1016 }
1017
1018 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1019 {
1020         int rc = 0;
1021         struct hwrm_port_phy_cfg_input req = {0};
1022         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1023         uint32_t enables = 0;
1024
1025         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1026
1027         if (conf->link_up) {
1028                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1029                 if (bp->link_info.auto_mode && conf->link_speed) {
1030                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1031                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1032                 }
1033
1034                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1035                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1036                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1037                 /*
1038                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1039                  * any auto mode, even "none".
1040                  */
1041                 if (!conf->link_speed) {
1042                         /* No speeds specified. Enable AutoNeg - all speeds */
1043                         req.auto_mode =
1044                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1045                 }
1046                 /* AutoNeg - Advertise speeds specified. */
1047                 if (conf->auto_link_speed_mask &&
1048                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1049                         req.auto_mode =
1050                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1051                         req.auto_link_speed_mask =
1052                                 conf->auto_link_speed_mask;
1053                         enables |=
1054                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1055                 }
1056
1057                 req.auto_duplex = conf->duplex;
1058                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1059                 req.auto_pause = conf->auto_pause;
1060                 req.force_pause = conf->force_pause;
1061                 /* Set force_pause if there is no auto or if there is a force */
1062                 if (req.auto_pause && !req.force_pause)
1063                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1064                 else
1065                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1066
1067                 req.enables = rte_cpu_to_le_32(enables);
1068         } else {
1069                 req.flags =
1070                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1071                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1072         }
1073
1074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1075
1076         HWRM_CHECK_RESULT();
1077         HWRM_UNLOCK();
1078
1079         return rc;
1080 }
1081
1082 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1083                                    struct bnxt_link_info *link_info)
1084 {
1085         int rc = 0;
1086         struct hwrm_port_phy_qcfg_input req = {0};
1087         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1088
1089         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1090
1091         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1092
1093         HWRM_CHECK_RESULT();
1094
1095         link_info->phy_link_status = resp->link;
1096         link_info->link_up =
1097                 (link_info->phy_link_status ==
1098                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1099         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1100         link_info->duplex = resp->duplex_cfg;
1101         link_info->pause = resp->pause;
1102         link_info->auto_pause = resp->auto_pause;
1103         link_info->force_pause = resp->force_pause;
1104         link_info->auto_mode = resp->auto_mode;
1105         link_info->phy_type = resp->phy_type;
1106         link_info->media_type = resp->media_type;
1107
1108         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1109         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1110         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1111         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1112         link_info->phy_ver[0] = resp->phy_maj;
1113         link_info->phy_ver[1] = resp->phy_min;
1114         link_info->phy_ver[2] = resp->phy_bld;
1115
1116         HWRM_UNLOCK();
1117
1118         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1119         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1120         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1121         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1122         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1123                     link_info->auto_link_speed_mask);
1124         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1125                     link_info->force_link_speed);
1126
1127         return rc;
1128 }
1129
1130 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1131 {
1132         int rc = 0;
1133         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1134         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1135         int i;
1136
1137         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1138
1139         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1140         /* HWRM Version >= 1.9.1 */
1141         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1142                 req.drv_qmap_cap =
1143                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1144         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1145
1146         HWRM_CHECK_RESULT();
1147
1148 #define GET_QUEUE_INFO(x) \
1149         bp->cos_queue[x].id = resp->queue_id##x; \
1150         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1151
1152         GET_QUEUE_INFO(0);
1153         GET_QUEUE_INFO(1);
1154         GET_QUEUE_INFO(2);
1155         GET_QUEUE_INFO(3);
1156         GET_QUEUE_INFO(4);
1157         GET_QUEUE_INFO(5);
1158         GET_QUEUE_INFO(6);
1159         GET_QUEUE_INFO(7);
1160
1161         HWRM_UNLOCK();
1162
1163         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1164                 bp->tx_cosq_id = bp->cos_queue[0].id;
1165         } else {
1166                 /* iterate and find the COSq profile to use for Tx */
1167                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1168                         if (bp->cos_queue[i].profile ==
1169                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1170                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1171                                 break;
1172                         }
1173                 }
1174         }
1175
1176         bp->max_tc = resp->max_configurable_queues;
1177         bp->max_lltc = resp->max_configurable_lossless_queues;
1178         if (bp->max_tc > BNXT_MAX_QUEUE)
1179                 bp->max_tc = BNXT_MAX_QUEUE;
1180         bp->max_q = bp->max_tc;
1181
1182         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1183
1184         return rc;
1185 }
1186
1187 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1188                          struct bnxt_ring *ring,
1189                          uint32_t ring_type, uint32_t map_index,
1190                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1191 {
1192         int rc = 0;
1193         uint32_t enables = 0;
1194         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1195         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1196         struct rte_mempool *mb_pool;
1197         uint16_t rx_buf_size;
1198
1199         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1200
1201         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1202         req.fbo = rte_cpu_to_le_32(0);
1203         /* Association of ring index with doorbell index */
1204         req.logical_id = rte_cpu_to_le_16(map_index);
1205         req.length = rte_cpu_to_le_32(ring->ring_size);
1206
1207         switch (ring_type) {
1208         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1209                 req.ring_type = ring_type;
1210                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1211                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1212                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1213                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1214                         enables |=
1215                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1216                 break;
1217         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1218                 req.ring_type = ring_type;
1219                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1220                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1221                 if (BNXT_CHIP_THOR(bp)) {
1222                         mb_pool = bp->rx_queues[0]->mb_pool;
1223                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1224                                       RTE_PKTMBUF_HEADROOM;
1225                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1226                         enables |=
1227                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1228                 }
1229                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1230                         enables |=
1231                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1232                 break;
1233         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1234                 req.ring_type = ring_type;
1235                 if (BNXT_HAS_NQ(bp)) {
1236                         /* Association of cp ring with nq */
1237                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1238                         enables |=
1239                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1240                 }
1241                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1242                 break;
1243         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1244                 req.ring_type = ring_type;
1245                 req.page_size = BNXT_PAGE_SHFT;
1246                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1247                 break;
1248         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1249                 req.ring_type = ring_type;
1250                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1251
1252                 mb_pool = bp->rx_queues[0]->mb_pool;
1253                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1254                               RTE_PKTMBUF_HEADROOM;
1255                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1256
1257                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1258                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1259                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1260                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1261                 break;
1262         default:
1263                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1264                         ring_type);
1265                 HWRM_UNLOCK();
1266                 return -EINVAL;
1267         }
1268         req.enables = rte_cpu_to_le_32(enables);
1269
1270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1271
1272         if (rc || resp->error_code) {
1273                 if (rc == 0 && resp->error_code)
1274                         rc = rte_le_to_cpu_16(resp->error_code);
1275                 switch (ring_type) {
1276                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1277                         PMD_DRV_LOG(ERR,
1278                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1279                         HWRM_UNLOCK();
1280                         return rc;
1281                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1282                         PMD_DRV_LOG(ERR,
1283                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1284                         HWRM_UNLOCK();
1285                         return rc;
1286                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1287                         PMD_DRV_LOG(ERR,
1288                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1289                                     rc);
1290                         HWRM_UNLOCK();
1291                         return rc;
1292                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1293                         PMD_DRV_LOG(ERR,
1294                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1295                         HWRM_UNLOCK();
1296                         return rc;
1297                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1298                         PMD_DRV_LOG(ERR,
1299                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1300                         HWRM_UNLOCK();
1301                         return rc;
1302                 default:
1303                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1304                         HWRM_UNLOCK();
1305                         return rc;
1306                 }
1307         }
1308
1309         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1310         HWRM_UNLOCK();
1311         return rc;
1312 }
1313
1314 int bnxt_hwrm_ring_free(struct bnxt *bp,
1315                         struct bnxt_ring *ring, uint32_t ring_type)
1316 {
1317         int rc;
1318         struct hwrm_ring_free_input req = {.req_type = 0 };
1319         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1320
1321         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1322
1323         req.ring_type = ring_type;
1324         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1325
1326         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1327
1328         if (rc || resp->error_code) {
1329                 if (rc == 0 && resp->error_code)
1330                         rc = rte_le_to_cpu_16(resp->error_code);
1331                 HWRM_UNLOCK();
1332
1333                 switch (ring_type) {
1334                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1335                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1336                                 rc);
1337                         return rc;
1338                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1339                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1340                                 rc);
1341                         return rc;
1342                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1343                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1344                                 rc);
1345                         return rc;
1346                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1347                         PMD_DRV_LOG(ERR,
1348                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1349                         return rc;
1350                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1351                         PMD_DRV_LOG(ERR,
1352                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1353                         return rc;
1354                 default:
1355                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1356                         return rc;
1357                 }
1358         }
1359         HWRM_UNLOCK();
1360         return 0;
1361 }
1362
1363 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1364 {
1365         int rc = 0;
1366         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1367         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1368
1369         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1370
1371         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1372         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1373         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1374         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1375
1376         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1377
1378         HWRM_CHECK_RESULT();
1379
1380         bp->grp_info[idx].fw_grp_id =
1381             rte_le_to_cpu_16(resp->ring_group_id);
1382
1383         HWRM_UNLOCK();
1384
1385         return rc;
1386 }
1387
1388 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1389 {
1390         int rc;
1391         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1392         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1393
1394         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1395
1396         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1397
1398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1399
1400         HWRM_CHECK_RESULT();
1401         HWRM_UNLOCK();
1402
1403         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1404         return rc;
1405 }
1406
1407 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1408 {
1409         int rc = 0;
1410         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1411         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1412
1413         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1414                 return rc;
1415
1416         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1417
1418         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1419
1420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1421
1422         HWRM_CHECK_RESULT();
1423         HWRM_UNLOCK();
1424
1425         return rc;
1426 }
1427
1428 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1429                                 unsigned int idx __rte_unused)
1430 {
1431         int rc;
1432         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1433         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1434
1435         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1436
1437         req.update_period_ms = rte_cpu_to_le_32(0);
1438
1439         req.stats_dma_addr =
1440             rte_cpu_to_le_64(cpr->hw_stats_map);
1441
1442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1443
1444         HWRM_CHECK_RESULT();
1445
1446         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1447
1448         HWRM_UNLOCK();
1449
1450         return rc;
1451 }
1452
1453 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1454                                 unsigned int idx __rte_unused)
1455 {
1456         int rc;
1457         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1458         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1459
1460         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1461
1462         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1463
1464         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1465
1466         HWRM_CHECK_RESULT();
1467         HWRM_UNLOCK();
1468
1469         return rc;
1470 }
1471
1472 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1473 {
1474         int rc = 0, i, j;
1475         struct hwrm_vnic_alloc_input req = { 0 };
1476         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1477
1478         if (!BNXT_HAS_RING_GRPS(bp))
1479                 goto skip_ring_grps;
1480
1481         /* map ring groups to this vnic */
1482         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1483                 vnic->start_grp_id, vnic->end_grp_id);
1484         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1485                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1486
1487         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1488         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1489         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1490         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1491
1492 skip_ring_grps:
1493         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1494                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1495         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1496
1497         if (vnic->func_default)
1498                 req.flags =
1499                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1501
1502         HWRM_CHECK_RESULT();
1503
1504         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1505         HWRM_UNLOCK();
1506         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1507         return rc;
1508 }
1509
1510 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1511                                         struct bnxt_vnic_info *vnic,
1512                                         struct bnxt_plcmodes_cfg *pmode)
1513 {
1514         int rc = 0;
1515         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1516         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1517
1518         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1519
1520         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1521
1522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1523
1524         HWRM_CHECK_RESULT();
1525
1526         pmode->flags = rte_le_to_cpu_32(resp->flags);
1527         /* dflt_vnic bit doesn't exist in the _cfg command */
1528         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1529         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1530         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1531         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1532
1533         HWRM_UNLOCK();
1534
1535         return rc;
1536 }
1537
1538 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1539                                        struct bnxt_vnic_info *vnic,
1540                                        struct bnxt_plcmodes_cfg *pmode)
1541 {
1542         int rc = 0;
1543         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1544         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1545
1546         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1547                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1548                 return rc;
1549         }
1550
1551         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1552
1553         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1554         req.flags = rte_cpu_to_le_32(pmode->flags);
1555         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1556         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1557         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1558         req.enables = rte_cpu_to_le_32(
1559             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1560             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1561             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1562         );
1563
1564         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1565
1566         HWRM_CHECK_RESULT();
1567         HWRM_UNLOCK();
1568
1569         return rc;
1570 }
1571
1572 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1573 {
1574         int rc = 0;
1575         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1576         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1577         struct bnxt_plcmodes_cfg pmodes = { 0 };
1578         uint32_t ctx_enable_flag = 0;
1579         uint32_t enables = 0;
1580
1581         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1582                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1583                 return rc;
1584         }
1585
1586         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1587         if (rc)
1588                 return rc;
1589
1590         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1591
1592         if (BNXT_CHIP_THOR(bp)) {
1593                 struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
1594                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1595                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1596
1597                 req.default_rx_ring_id =
1598                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1599                 req.default_cmpl_ring_id =
1600                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1601                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1602                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1603                 goto config_mru;
1604         }
1605
1606         /* Only RSS support for now TBD: COS & LB */
1607         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1608         if (vnic->lb_rule != 0xffff)
1609                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1610         if (vnic->cos_rule != 0xffff)
1611                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1612         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1613                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1614                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1615         }
1616         enables |= ctx_enable_flag;
1617         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1618         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1619         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1620         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1621
1622 config_mru:
1623         req.enables = rte_cpu_to_le_32(enables);
1624         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1625         req.mru = rte_cpu_to_le_16(vnic->mru);
1626         /* Configure default VNIC only once. */
1627         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1628                 req.flags |=
1629                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1630                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1631         }
1632         if (vnic->vlan_strip)
1633                 req.flags |=
1634                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1635         if (vnic->bd_stall)
1636                 req.flags |=
1637                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1638         if (vnic->roce_dual)
1639                 req.flags |= rte_cpu_to_le_32(
1640                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1641         if (vnic->roce_only)
1642                 req.flags |= rte_cpu_to_le_32(
1643                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1644         if (vnic->rss_dflt_cr)
1645                 req.flags |= rte_cpu_to_le_32(
1646                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1647
1648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1649
1650         HWRM_CHECK_RESULT();
1651         HWRM_UNLOCK();
1652
1653         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1654
1655         return rc;
1656 }
1657
1658 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1659                 int16_t fw_vf_id)
1660 {
1661         int rc = 0;
1662         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1663         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1664
1665         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1666                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1667                 return rc;
1668         }
1669         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1670
1671         req.enables =
1672                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1673         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1674         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1675
1676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1677
1678         HWRM_CHECK_RESULT();
1679
1680         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1681         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1682         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1683         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1684         vnic->mru = rte_le_to_cpu_16(resp->mru);
1685         vnic->func_default = rte_le_to_cpu_32(
1686                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1687         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1688                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1689         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1690                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1691         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1692                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1693         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1694                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1695         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1696                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1697
1698         HWRM_UNLOCK();
1699
1700         return rc;
1701 }
1702
1703 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1704                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1705 {
1706         int rc = 0;
1707         uint16_t ctx_id;
1708         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1709         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1710                                                 bp->hwrm_cmd_resp_addr;
1711
1712         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1713
1714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1715         HWRM_CHECK_RESULT();
1716
1717         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1718         if (!BNXT_HAS_RING_GRPS(bp))
1719                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1720         else if (ctx_idx == 0)
1721                 vnic->rss_rule = ctx_id;
1722
1723         HWRM_UNLOCK();
1724
1725         return rc;
1726 }
1727
1728 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1729                             struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1730 {
1731         int rc = 0;
1732         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1733         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1734                                                 bp->hwrm_cmd_resp_addr;
1735
1736         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1737                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1738                 return rc;
1739         }
1740         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1741
1742         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1743
1744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1745
1746         HWRM_CHECK_RESULT();
1747         HWRM_UNLOCK();
1748
1749         return rc;
1750 }
1751
1752 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1753 {
1754         int rc = 0;
1755         struct hwrm_vnic_free_input req = {.req_type = 0 };
1756         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1757
1758         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1759                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1760                 return rc;
1761         }
1762
1763         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1764
1765         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1766
1767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1768
1769         HWRM_CHECK_RESULT();
1770         HWRM_UNLOCK();
1771
1772         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1773         /* Configure default VNIC again if necessary. */
1774         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1775                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1776
1777         return rc;
1778 }
1779
1780 static int
1781 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1782 {
1783         int i;
1784         int rc = 0;
1785         int nr_ctxs = vnic->num_lb_ctxts;
1786         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1787         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1788
1789         for (i = 0; i < nr_ctxs; i++) {
1790                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1791
1792                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1793                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1794                 req.hash_mode_flags = vnic->hash_mode;
1795
1796                 req.hash_key_tbl_addr =
1797                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1798
1799                 req.ring_grp_tbl_addr =
1800                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1801                                          i * HW_HASH_INDEX_SIZE);
1802                 req.ring_table_pair_index = i;
1803                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1804
1805                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1806                                             BNXT_USE_CHIMP_MB);
1807
1808                 HWRM_CHECK_RESULT();
1809                 HWRM_UNLOCK();
1810         }
1811
1812         return rc;
1813 }
1814
1815 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1816                            struct bnxt_vnic_info *vnic)
1817 {
1818         int rc = 0;
1819         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1820         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1821
1822         if (!vnic->rss_table)
1823                 return 0;
1824
1825         if (BNXT_CHIP_THOR(bp))
1826                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1827
1828         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1829
1830         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1831         req.hash_mode_flags = vnic->hash_mode;
1832
1833         req.ring_grp_tbl_addr =
1834             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1835         req.hash_key_tbl_addr =
1836             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1837         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1838         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1839
1840         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1841
1842         HWRM_CHECK_RESULT();
1843         HWRM_UNLOCK();
1844
1845         return rc;
1846 }
1847
1848 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1849                         struct bnxt_vnic_info *vnic)
1850 {
1851         int rc = 0;
1852         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1853         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1854         uint16_t size;
1855
1856         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1857                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1858                 return rc;
1859         }
1860
1861         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1862
1863         req.flags = rte_cpu_to_le_32(
1864                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1865
1866         req.enables = rte_cpu_to_le_32(
1867                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1868
1869         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1870         size -= RTE_PKTMBUF_HEADROOM;
1871
1872         req.jumbo_thresh = rte_cpu_to_le_16(size);
1873         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1874
1875         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1876
1877         HWRM_CHECK_RESULT();
1878         HWRM_UNLOCK();
1879
1880         return rc;
1881 }
1882
1883 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1884                         struct bnxt_vnic_info *vnic, bool enable)
1885 {
1886         int rc = 0;
1887         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1888         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1889
1890         if (BNXT_CHIP_THOR(bp))
1891                 return 0;
1892
1893         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1894
1895         if (enable) {
1896                 req.enables = rte_cpu_to_le_32(
1897                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1898                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1899                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1900                 req.flags = rte_cpu_to_le_32(
1901                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1902                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1903                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1904                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1905                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1906                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1907                 req.max_agg_segs = rte_cpu_to_le_16(5);
1908                 req.max_aggs =
1909                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1910                 req.min_agg_len = rte_cpu_to_le_32(512);
1911         }
1912         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1913
1914         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1915
1916         HWRM_CHECK_RESULT();
1917         HWRM_UNLOCK();
1918
1919         return rc;
1920 }
1921
1922 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1923 {
1924         struct hwrm_func_cfg_input req = {0};
1925         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1926         int rc;
1927
1928         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1929         req.enables = rte_cpu_to_le_32(
1930                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1931         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1932         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1933
1934         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1935
1936         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1937         HWRM_CHECK_RESULT();
1938         HWRM_UNLOCK();
1939
1940         bp->pf.vf_info[vf].random_mac = false;
1941
1942         return rc;
1943 }
1944
1945 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1946                                   uint64_t *dropped)
1947 {
1948         int rc = 0;
1949         struct hwrm_func_qstats_input req = {.req_type = 0};
1950         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1951
1952         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1953
1954         req.fid = rte_cpu_to_le_16(fid);
1955
1956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1957
1958         HWRM_CHECK_RESULT();
1959
1960         if (dropped)
1961                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1962
1963         HWRM_UNLOCK();
1964
1965         return rc;
1966 }
1967
1968 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1969                           struct rte_eth_stats *stats)
1970 {
1971         int rc = 0;
1972         struct hwrm_func_qstats_input req = {.req_type = 0};
1973         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1974
1975         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1976
1977         req.fid = rte_cpu_to_le_16(fid);
1978
1979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1980
1981         HWRM_CHECK_RESULT();
1982
1983         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1984         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1985         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1986         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1987         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1988         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1989
1990         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1991         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1992         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1993         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1994         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1995         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1996
1997         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1998         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1999         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2000
2001         HWRM_UNLOCK();
2002
2003         return rc;
2004 }
2005
2006 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2007 {
2008         int rc = 0;
2009         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2010         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2011
2012         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2013
2014         req.fid = rte_cpu_to_le_16(fid);
2015
2016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2017
2018         HWRM_CHECK_RESULT();
2019         HWRM_UNLOCK();
2020
2021         return rc;
2022 }
2023
2024 /*
2025  * HWRM utility functions
2026  */
2027
2028 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2029 {
2030         unsigned int i;
2031         int rc = 0;
2032
2033         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2034                 struct bnxt_tx_queue *txq;
2035                 struct bnxt_rx_queue *rxq;
2036                 struct bnxt_cp_ring_info *cpr;
2037
2038                 if (i >= bp->rx_cp_nr_rings) {
2039                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2040                         cpr = txq->cp_ring;
2041                 } else {
2042                         rxq = bp->rx_queues[i];
2043                         cpr = rxq->cp_ring;
2044                 }
2045
2046                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2047                 if (rc)
2048                         return rc;
2049         }
2050         return 0;
2051 }
2052
2053 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2054 {
2055         int rc;
2056         unsigned int i;
2057         struct bnxt_cp_ring_info *cpr;
2058
2059         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2060
2061                 if (i >= bp->rx_cp_nr_rings) {
2062                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2063                 } else {
2064                         cpr = bp->rx_queues[i]->cp_ring;
2065                         if (BNXT_HAS_RING_GRPS(bp))
2066                                 bp->grp_info[i].fw_stats_ctx = -1;
2067                 }
2068                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2069                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2070                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2071                         if (rc)
2072                                 return rc;
2073                 }
2074         }
2075         return 0;
2076 }
2077
2078 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2079 {
2080         unsigned int i;
2081         int rc = 0;
2082
2083         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2084                 struct bnxt_tx_queue *txq;
2085                 struct bnxt_rx_queue *rxq;
2086                 struct bnxt_cp_ring_info *cpr;
2087
2088                 if (i >= bp->rx_cp_nr_rings) {
2089                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2090                         cpr = txq->cp_ring;
2091                 } else {
2092                         rxq = bp->rx_queues[i];
2093                         cpr = rxq->cp_ring;
2094                 }
2095
2096                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2097
2098                 if (rc)
2099                         return rc;
2100         }
2101         return rc;
2102 }
2103
2104 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2105 {
2106         uint16_t idx;
2107         uint32_t rc = 0;
2108
2109         if (!BNXT_HAS_RING_GRPS(bp))
2110                 return 0;
2111
2112         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2113
2114                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2115                         continue;
2116
2117                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2118
2119                 if (rc)
2120                         return rc;
2121         }
2122         return rc;
2123 }
2124
2125 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2126 {
2127         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2128
2129         bnxt_hwrm_ring_free(bp, cp_ring,
2130                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2131         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2132         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2133                                      sizeof(*cpr->cp_desc_ring));
2134         cpr->cp_raw_cons = 0;
2135         cpr->valid = 0;
2136 }
2137
2138 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2139 {
2140         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2141
2142         bnxt_hwrm_ring_free(bp, cp_ring,
2143                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2144         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2145         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2146                         sizeof(*cpr->cp_desc_ring));
2147         cpr->cp_raw_cons = 0;
2148         cpr->valid = 0;
2149 }
2150
2151 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2152 {
2153         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2154         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2155         struct bnxt_ring *ring = rxr->rx_ring_struct;
2156         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2157
2158         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2159                 bnxt_hwrm_ring_free(bp, ring,
2160                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2161                 ring->fw_ring_id = INVALID_HW_RING_ID;
2162                 if (BNXT_HAS_RING_GRPS(bp))
2163                         bp->grp_info[queue_index].rx_fw_ring_id =
2164                                                         INVALID_HW_RING_ID;
2165                 memset(rxr->rx_desc_ring, 0,
2166                        rxr->rx_ring_struct->ring_size *
2167                        sizeof(*rxr->rx_desc_ring));
2168                 memset(rxr->rx_buf_ring, 0,
2169                        rxr->rx_ring_struct->ring_size *
2170                        sizeof(*rxr->rx_buf_ring));
2171                 rxr->rx_prod = 0;
2172         }
2173         ring = rxr->ag_ring_struct;
2174         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2175                 bnxt_hwrm_ring_free(bp, ring,
2176                                     BNXT_CHIP_THOR(bp) ?
2177                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2178                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2179                 ring->fw_ring_id = INVALID_HW_RING_ID;
2180                 memset(rxr->ag_buf_ring, 0,
2181                        rxr->ag_ring_struct->ring_size *
2182                        sizeof(*rxr->ag_buf_ring));
2183                 rxr->ag_prod = 0;
2184                 if (BNXT_HAS_RING_GRPS(bp))
2185                         bp->grp_info[queue_index].ag_fw_ring_id =
2186                                                         INVALID_HW_RING_ID;
2187         }
2188         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2189                 bnxt_free_cp_ring(bp, cpr);
2190                 if (rxq->nq_ring)
2191                         bnxt_free_nq_ring(bp, rxq->nq_ring);
2192         }
2193
2194         if (BNXT_HAS_RING_GRPS(bp))
2195                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2196 }
2197
2198 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2199 {
2200         unsigned int i;
2201
2202         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2203                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2204                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2205                 struct bnxt_ring *ring = txr->tx_ring_struct;
2206                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2207
2208                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2209                         bnxt_hwrm_ring_free(bp, ring,
2210                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2211                         ring->fw_ring_id = INVALID_HW_RING_ID;
2212                         memset(txr->tx_desc_ring, 0,
2213                                         txr->tx_ring_struct->ring_size *
2214                                         sizeof(*txr->tx_desc_ring));
2215                         memset(txr->tx_buf_ring, 0,
2216                                         txr->tx_ring_struct->ring_size *
2217                                         sizeof(*txr->tx_buf_ring));
2218                         txr->tx_prod = 0;
2219                         txr->tx_cons = 0;
2220                 }
2221                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2222                         bnxt_free_cp_ring(bp, cpr);
2223                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2224                         if (txq->nq_ring)
2225                                 bnxt_free_nq_ring(bp, txq->nq_ring);
2226                 }
2227         }
2228
2229         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2230                 bnxt_free_hwrm_rx_ring(bp, i);
2231
2232         return 0;
2233 }
2234
2235 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2236 {
2237         uint16_t i;
2238         uint32_t rc = 0;
2239
2240         if (!BNXT_HAS_RING_GRPS(bp))
2241                 return 0;
2242
2243         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2244                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2245                 if (rc)
2246                         return rc;
2247         }
2248         return rc;
2249 }
2250
2251 void bnxt_free_hwrm_resources(struct bnxt *bp)
2252 {
2253         /* Release memzone */
2254         rte_free(bp->hwrm_cmd_resp_addr);
2255         rte_free(bp->hwrm_short_cmd_req_addr);
2256         bp->hwrm_cmd_resp_addr = NULL;
2257         bp->hwrm_short_cmd_req_addr = NULL;
2258         bp->hwrm_cmd_resp_dma_addr = 0;
2259         bp->hwrm_short_cmd_req_dma_addr = 0;
2260 }
2261
2262 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2263 {
2264         struct rte_pci_device *pdev = bp->pdev;
2265         char type[RTE_MEMZONE_NAMESIZE];
2266
2267         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2268                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2269         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2270         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2271         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2272         if (bp->hwrm_cmd_resp_addr == NULL)
2273                 return -ENOMEM;
2274         bp->hwrm_cmd_resp_dma_addr =
2275                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2276         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2277                 PMD_DRV_LOG(ERR,
2278                         "unable to map response address to physical memory\n");
2279                 return -ENOMEM;
2280         }
2281         rte_spinlock_init(&bp->hwrm_lock);
2282
2283         return 0;
2284 }
2285
2286 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2287 {
2288         struct bnxt_filter_info *filter;
2289         int rc = 0;
2290
2291         STAILQ_FOREACH(filter, &vnic->filter, next) {
2292                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2293                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2294                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2295                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2296                 else
2297                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2298                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2299                 //if (rc)
2300                         //break;
2301         }
2302         return rc;
2303 }
2304
2305 static int
2306 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2307 {
2308         struct bnxt_filter_info *filter;
2309         struct rte_flow *flow;
2310         int rc = 0;
2311
2312         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2313                 filter = flow->filter;
2314                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2315                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2316                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2317                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2318                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2319                 else
2320                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2321
2322                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2323                 rte_free(flow);
2324                 //if (rc)
2325                         //break;
2326         }
2327         return rc;
2328 }
2329
2330 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2331 {
2332         struct bnxt_filter_info *filter;
2333         int rc = 0;
2334
2335         STAILQ_FOREACH(filter, &vnic->filter, next) {
2336                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2337                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2338                                                      filter);
2339                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2340                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2341                                                          filter);
2342                 else
2343                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2344                                                      filter);
2345                 if (rc)
2346                         break;
2347         }
2348         return rc;
2349 }
2350
2351 void bnxt_free_tunnel_ports(struct bnxt *bp)
2352 {
2353         if (bp->vxlan_port_cnt)
2354                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2355                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2356         bp->vxlan_port = 0;
2357         if (bp->geneve_port_cnt)
2358                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2359                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2360         bp->geneve_port = 0;
2361 }
2362
2363 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2364 {
2365         int i, j;
2366
2367         if (bp->vnic_info == NULL)
2368                 return;
2369
2370         /*
2371          * Cleanup VNICs in reverse order, to make sure the L2 filter
2372          * from vnic0 is last to be cleaned up.
2373          */
2374         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2375                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2376
2377                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2378                         PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2379                         return;
2380                 }
2381
2382                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2383
2384                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2385
2386                 if (BNXT_CHIP_THOR(bp)) {
2387                         for (j = 0; j < vnic->num_lb_ctxts; j++) {
2388                                 bnxt_hwrm_vnic_ctx_free(bp, vnic,
2389                                                         vnic->fw_grp_ids[j]);
2390                                 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2391                         }
2392                         vnic->num_lb_ctxts = 0;
2393                 } else {
2394                         bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2395                         vnic->rss_rule = INVALID_HW_RING_ID;
2396                 }
2397
2398                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2399
2400                 bnxt_hwrm_vnic_free(bp, vnic);
2401
2402                 rte_free(vnic->fw_grp_ids);
2403         }
2404         /* Ring resources */
2405         bnxt_free_all_hwrm_rings(bp);
2406         bnxt_free_all_hwrm_ring_grps(bp);
2407         bnxt_free_all_hwrm_stat_ctxs(bp);
2408         bnxt_free_tunnel_ports(bp);
2409 }
2410
2411 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2412 {
2413         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2414
2415         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2416                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2417
2418         switch (conf_link_speed) {
2419         case ETH_LINK_SPEED_10M_HD:
2420         case ETH_LINK_SPEED_100M_HD:
2421                 /* FALLTHROUGH */
2422                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2423         }
2424         return hw_link_duplex;
2425 }
2426
2427 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2428 {
2429         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2430 }
2431
2432 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2433 {
2434         uint16_t eth_link_speed = 0;
2435
2436         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2437                 return ETH_LINK_SPEED_AUTONEG;
2438
2439         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2440         case ETH_LINK_SPEED_100M:
2441         case ETH_LINK_SPEED_100M_HD:
2442                 /* FALLTHROUGH */
2443                 eth_link_speed =
2444                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2445                 break;
2446         case ETH_LINK_SPEED_1G:
2447                 eth_link_speed =
2448                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2449                 break;
2450         case ETH_LINK_SPEED_2_5G:
2451                 eth_link_speed =
2452                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2453                 break;
2454         case ETH_LINK_SPEED_10G:
2455                 eth_link_speed =
2456                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2457                 break;
2458         case ETH_LINK_SPEED_20G:
2459                 eth_link_speed =
2460                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2461                 break;
2462         case ETH_LINK_SPEED_25G:
2463                 eth_link_speed =
2464                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2465                 break;
2466         case ETH_LINK_SPEED_40G:
2467                 eth_link_speed =
2468                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2469                 break;
2470         case ETH_LINK_SPEED_50G:
2471                 eth_link_speed =
2472                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2473                 break;
2474         case ETH_LINK_SPEED_100G:
2475                 eth_link_speed =
2476                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2477                 break;
2478         default:
2479                 PMD_DRV_LOG(ERR,
2480                         "Unsupported link speed %d; default to AUTO\n",
2481                         conf_link_speed);
2482                 break;
2483         }
2484         return eth_link_speed;
2485 }
2486
2487 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2488                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2489                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2490                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2491
2492 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2493 {
2494         uint32_t one_speed;
2495
2496         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2497                 return 0;
2498
2499         if (link_speed & ETH_LINK_SPEED_FIXED) {
2500                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2501
2502                 if (one_speed & (one_speed - 1)) {
2503                         PMD_DRV_LOG(ERR,
2504                                 "Invalid advertised speeds (%u) for port %u\n",
2505                                 link_speed, port_id);
2506                         return -EINVAL;
2507                 }
2508                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2509                         PMD_DRV_LOG(ERR,
2510                                 "Unsupported advertised speed (%u) for port %u\n",
2511                                 link_speed, port_id);
2512                         return -EINVAL;
2513                 }
2514         } else {
2515                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2516                         PMD_DRV_LOG(ERR,
2517                                 "Unsupported advertised speeds (%u) for port %u\n",
2518                                 link_speed, port_id);
2519                         return -EINVAL;
2520                 }
2521         }
2522         return 0;
2523 }
2524
2525 static uint16_t
2526 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2527 {
2528         uint16_t ret = 0;
2529
2530         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2531                 if (bp->link_info.support_speeds)
2532                         return bp->link_info.support_speeds;
2533                 link_speed = BNXT_SUPPORTED_SPEEDS;
2534         }
2535
2536         if (link_speed & ETH_LINK_SPEED_100M)
2537                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2538         if (link_speed & ETH_LINK_SPEED_100M_HD)
2539                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2540         if (link_speed & ETH_LINK_SPEED_1G)
2541                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2542         if (link_speed & ETH_LINK_SPEED_2_5G)
2543                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2544         if (link_speed & ETH_LINK_SPEED_10G)
2545                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2546         if (link_speed & ETH_LINK_SPEED_20G)
2547                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2548         if (link_speed & ETH_LINK_SPEED_25G)
2549                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2550         if (link_speed & ETH_LINK_SPEED_40G)
2551                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2552         if (link_speed & ETH_LINK_SPEED_50G)
2553                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2554         if (link_speed & ETH_LINK_SPEED_100G)
2555                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2556         return ret;
2557 }
2558
2559 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2560 {
2561         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2562
2563         switch (hw_link_speed) {
2564         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2565                 eth_link_speed = ETH_SPEED_NUM_100M;
2566                 break;
2567         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2568                 eth_link_speed = ETH_SPEED_NUM_1G;
2569                 break;
2570         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2571                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2572                 break;
2573         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2574                 eth_link_speed = ETH_SPEED_NUM_10G;
2575                 break;
2576         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2577                 eth_link_speed = ETH_SPEED_NUM_20G;
2578                 break;
2579         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2580                 eth_link_speed = ETH_SPEED_NUM_25G;
2581                 break;
2582         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2583                 eth_link_speed = ETH_SPEED_NUM_40G;
2584                 break;
2585         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2586                 eth_link_speed = ETH_SPEED_NUM_50G;
2587                 break;
2588         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2589                 eth_link_speed = ETH_SPEED_NUM_100G;
2590                 break;
2591         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2592         default:
2593                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2594                         hw_link_speed);
2595                 break;
2596         }
2597         return eth_link_speed;
2598 }
2599
2600 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2601 {
2602         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2603
2604         switch (hw_link_duplex) {
2605         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2606         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2607                 /* FALLTHROUGH */
2608                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2609                 break;
2610         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2611                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2612                 break;
2613         default:
2614                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2615                         hw_link_duplex);
2616                 break;
2617         }
2618         return eth_link_duplex;
2619 }
2620
2621 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2622 {
2623         int rc = 0;
2624         struct bnxt_link_info *link_info = &bp->link_info;
2625
2626         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2627         if (rc) {
2628                 PMD_DRV_LOG(ERR,
2629                         "Get link config failed with rc %d\n", rc);
2630                 goto exit;
2631         }
2632         if (link_info->link_speed)
2633                 link->link_speed =
2634                         bnxt_parse_hw_link_speed(link_info->link_speed);
2635         else
2636                 link->link_speed = ETH_SPEED_NUM_NONE;
2637         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2638         link->link_status = link_info->link_up;
2639         link->link_autoneg = link_info->auto_mode ==
2640                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2641                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2642 exit:
2643         return rc;
2644 }
2645
2646 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2647 {
2648         int rc = 0;
2649         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2650         struct bnxt_link_info link_req;
2651         uint16_t speed, autoneg;
2652
2653         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2654                 return 0;
2655
2656         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2657                         bp->eth_dev->data->port_id);
2658         if (rc)
2659                 goto error;
2660
2661         memset(&link_req, 0, sizeof(link_req));
2662         link_req.link_up = link_up;
2663         if (!link_up)
2664                 goto port_phy_cfg;
2665
2666         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2667         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2668         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2669         /* Autoneg can be done only when the FW allows */
2670         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2671                                 bp->link_info.force_link_speed)) {
2672                 link_req.phy_flags |=
2673                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2674                 link_req.auto_link_speed_mask =
2675                         bnxt_parse_eth_link_speed_mask(bp,
2676                                                        dev_conf->link_speeds);
2677         } else {
2678                 if (bp->link_info.phy_type ==
2679                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2680                     bp->link_info.phy_type ==
2681                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2682                     bp->link_info.media_type ==
2683                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2684                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2685                         return -EINVAL;
2686                 }
2687
2688                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2689                 /* If user wants a particular speed try that first. */
2690                 if (speed)
2691                         link_req.link_speed = speed;
2692                 else if (bp->link_info.force_link_speed)
2693                         link_req.link_speed = bp->link_info.force_link_speed;
2694                 else
2695                         link_req.link_speed = bp->link_info.auto_link_speed;
2696         }
2697         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2698         link_req.auto_pause = bp->link_info.auto_pause;
2699         link_req.force_pause = bp->link_info.force_pause;
2700
2701 port_phy_cfg:
2702         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2703         if (rc) {
2704                 PMD_DRV_LOG(ERR,
2705                         "Set link config failed with rc %d\n", rc);
2706         }
2707
2708 error:
2709         return rc;
2710 }
2711
2712 /* JIRA 22088 */
2713 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2714 {
2715         struct hwrm_func_qcfg_input req = {0};
2716         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2717         uint16_t flags;
2718         int rc = 0;
2719
2720         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2721         req.fid = rte_cpu_to_le_16(0xffff);
2722
2723         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2724
2725         HWRM_CHECK_RESULT();
2726
2727         /* Hard Coded.. 0xfff VLAN ID mask */
2728         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2729         flags = rte_le_to_cpu_16(resp->flags);
2730         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2731                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2732
2733         if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2734                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2735                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2736         } else if (BNXT_VF(bp) &&
2737                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2738                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2739                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2740         }
2741
2742         if (mtu)
2743                 *mtu = resp->mtu;
2744
2745         switch (resp->port_partition_type) {
2746         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2747         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2748         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2749                 /* FALLTHROUGH */
2750                 bp->port_partition_type = resp->port_partition_type;
2751                 break;
2752         default:
2753                 bp->port_partition_type = 0;
2754                 break;
2755         }
2756
2757         HWRM_UNLOCK();
2758
2759         return rc;
2760 }
2761
2762 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2763                                    struct hwrm_func_qcaps_output *qcaps)
2764 {
2765         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2766         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2767                sizeof(qcaps->mac_address));
2768         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2769         qcaps->max_rx_rings = fcfg->num_rx_rings;
2770         qcaps->max_tx_rings = fcfg->num_tx_rings;
2771         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2772         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2773         qcaps->max_vfs = 0;
2774         qcaps->first_vf_id = 0;
2775         qcaps->max_vnics = fcfg->num_vnics;
2776         qcaps->max_decap_records = 0;
2777         qcaps->max_encap_records = 0;
2778         qcaps->max_tx_wm_flows = 0;
2779         qcaps->max_tx_em_flows = 0;
2780         qcaps->max_rx_wm_flows = 0;
2781         qcaps->max_rx_em_flows = 0;
2782         qcaps->max_flow_id = 0;
2783         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2784         qcaps->max_sp_tx_rings = 0;
2785         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2786 }
2787
2788 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2789 {
2790         struct hwrm_func_cfg_input req = {0};
2791         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2792         uint32_t enables;
2793         int rc;
2794
2795         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2796                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2797                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2798                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2799                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2800                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2801                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2802                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2803                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2804
2805         if (BNXT_HAS_RING_GRPS(bp)) {
2806                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2807                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2808         } else if (BNXT_HAS_NQ(bp)) {
2809                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2810                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2811         }
2812
2813         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2814         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2815         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2816                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2817                                    BNXT_NUM_VLANS);
2818         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2819         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2820         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2821         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2822         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2823         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2824         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2825         req.fid = rte_cpu_to_le_16(0xffff);
2826         req.enables = rte_cpu_to_le_32(enables);
2827
2828         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2829
2830         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2831
2832         HWRM_CHECK_RESULT();
2833         HWRM_UNLOCK();
2834
2835         return rc;
2836 }
2837
2838 static void populate_vf_func_cfg_req(struct bnxt *bp,
2839                                      struct hwrm_func_cfg_input *req,
2840                                      int num_vfs)
2841 {
2842         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2843                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2844                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2845                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2846                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2847                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2848                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2849                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2850                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2851                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2852
2853         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2854                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2855                                     BNXT_NUM_VLANS);
2856         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2857                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2858                                     BNXT_NUM_VLANS);
2859         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2860                                                 (num_vfs + 1));
2861         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2862         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2863                                                (num_vfs + 1));
2864         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2865         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2866         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2867         /* TODO: For now, do not support VMDq/RFS on VFs. */
2868         req->num_vnics = rte_cpu_to_le_16(1);
2869         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2870                                                  (num_vfs + 1));
2871 }
2872
2873 static void add_random_mac_if_needed(struct bnxt *bp,
2874                                      struct hwrm_func_cfg_input *cfg_req,
2875                                      int vf)
2876 {
2877         struct rte_ether_addr mac;
2878
2879         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2880                 return;
2881
2882         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2883                 cfg_req->enables |=
2884                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2885                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2886                 bp->pf.vf_info[vf].random_mac = true;
2887         } else {
2888                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2889                         RTE_ETHER_ADDR_LEN);
2890         }
2891 }
2892
2893 static void reserve_resources_from_vf(struct bnxt *bp,
2894                                       struct hwrm_func_cfg_input *cfg_req,
2895                                       int vf)
2896 {
2897         struct hwrm_func_qcaps_input req = {0};
2898         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2899         int rc;
2900
2901         /* Get the actual allocated values now */
2902         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2903         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2905
2906         if (rc) {
2907                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2908                 copy_func_cfg_to_qcaps(cfg_req, resp);
2909         } else if (resp->error_code) {
2910                 rc = rte_le_to_cpu_16(resp->error_code);
2911                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2912                 copy_func_cfg_to_qcaps(cfg_req, resp);
2913         }
2914
2915         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2916         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2917         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2918         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2919         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2920         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2921         /*
2922          * TODO: While not supporting VMDq with VFs, max_vnics is always
2923          * forced to 1 in this case
2924          */
2925         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2926         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2927
2928         HWRM_UNLOCK();
2929 }
2930
2931 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2932 {
2933         struct hwrm_func_qcfg_input req = {0};
2934         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2935         int rc;
2936
2937         /* Check for zero MAC address */
2938         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2939         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2940         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2941         HWRM_CHECK_RESULT();
2942         rc = rte_le_to_cpu_16(resp->vlan);
2943
2944         HWRM_UNLOCK();
2945
2946         return rc;
2947 }
2948
2949 static int update_pf_resource_max(struct bnxt *bp)
2950 {
2951         struct hwrm_func_qcfg_input req = {0};
2952         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2953         int rc;
2954
2955         /* And copy the allocated numbers into the pf struct */
2956         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2957         req.fid = rte_cpu_to_le_16(0xffff);
2958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2959         HWRM_CHECK_RESULT();
2960
2961         /* Only TX ring value reflects actual allocation? TODO */
2962         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2963         bp->pf.evb_mode = resp->evb_mode;
2964
2965         HWRM_UNLOCK();
2966
2967         return rc;
2968 }
2969
2970 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2971 {
2972         int rc;
2973
2974         if (!BNXT_PF(bp)) {
2975                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2976                 return -EINVAL;
2977         }
2978
2979         rc = bnxt_hwrm_func_qcaps(bp);
2980         if (rc)
2981                 return rc;
2982
2983         bp->pf.func_cfg_flags &=
2984                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2985                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2986         bp->pf.func_cfg_flags |=
2987                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2988         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2989         rc = __bnxt_hwrm_func_qcaps(bp);
2990         return rc;
2991 }
2992
2993 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2994 {
2995         struct hwrm_func_cfg_input req = {0};
2996         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2997         int i;
2998         size_t sz;
2999         int rc = 0;
3000         size_t req_buf_sz;
3001
3002         if (!BNXT_PF(bp)) {
3003                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3004                 return -EINVAL;
3005         }
3006
3007         rc = bnxt_hwrm_func_qcaps(bp);
3008
3009         if (rc)
3010                 return rc;
3011
3012         bp->pf.active_vfs = num_vfs;
3013
3014         /*
3015          * First, configure the PF to only use one TX ring.  This ensures that
3016          * there are enough rings for all VFs.
3017          *
3018          * If we don't do this, when we call func_alloc() later, we will lock
3019          * extra rings to the PF that won't be available during func_cfg() of
3020          * the VFs.
3021          *
3022          * This has been fixed with firmware versions above 20.6.54
3023          */
3024         bp->pf.func_cfg_flags &=
3025                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3026                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3027         bp->pf.func_cfg_flags |=
3028                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3029         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3030         if (rc)
3031                 return rc;
3032
3033         /*
3034          * Now, create and register a buffer to hold forwarded VF requests
3035          */
3036         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3037         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3038                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3039         if (bp->pf.vf_req_buf == NULL) {
3040                 rc = -ENOMEM;
3041                 goto error_free;
3042         }
3043         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3044                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3045         for (i = 0; i < num_vfs; i++)
3046                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3047                                         (i * HWRM_MAX_REQ_LEN);
3048
3049         rc = bnxt_hwrm_func_buf_rgtr(bp);
3050         if (rc)
3051                 goto error_free;
3052
3053         populate_vf_func_cfg_req(bp, &req, num_vfs);
3054
3055         bp->pf.active_vfs = 0;
3056         for (i = 0; i < num_vfs; i++) {
3057                 add_random_mac_if_needed(bp, &req, i);
3058
3059                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3060                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3061                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3062                 rc = bnxt_hwrm_send_message(bp,
3063                                             &req,
3064                                             sizeof(req),
3065                                             BNXT_USE_CHIMP_MB);
3066
3067                 /* Clear enable flag for next pass */
3068                 req.enables &= ~rte_cpu_to_le_32(
3069                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3070
3071                 if (rc || resp->error_code) {
3072                         PMD_DRV_LOG(ERR,
3073                                 "Failed to initizlie VF %d\n", i);
3074                         PMD_DRV_LOG(ERR,
3075                                 "Not all VFs available. (%d, %d)\n",
3076                                 rc, resp->error_code);
3077                         HWRM_UNLOCK();
3078                         break;
3079                 }
3080
3081                 HWRM_UNLOCK();
3082
3083                 reserve_resources_from_vf(bp, &req, i);
3084                 bp->pf.active_vfs++;
3085                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3086         }
3087
3088         /*
3089          * Now configure the PF to use "the rest" of the resources
3090          * We're using STD_TX_RING_MODE here though which will limit the TX
3091          * rings.  This will allow QoS to function properly.  Not setting this
3092          * will cause PF rings to break bandwidth settings.
3093          */
3094         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3095         if (rc)
3096                 goto error_free;
3097
3098         rc = update_pf_resource_max(bp);
3099         if (rc)
3100                 goto error_free;
3101
3102         return rc;
3103
3104 error_free:
3105         bnxt_hwrm_func_buf_unrgtr(bp);
3106         return rc;
3107 }
3108
3109 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3110 {
3111         struct hwrm_func_cfg_input req = {0};
3112         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3113         int rc;
3114
3115         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3116
3117         req.fid = rte_cpu_to_le_16(0xffff);
3118         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3119         req.evb_mode = bp->pf.evb_mode;
3120
3121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3122         HWRM_CHECK_RESULT();
3123         HWRM_UNLOCK();
3124
3125         return rc;
3126 }
3127
3128 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3129                                 uint8_t tunnel_type)
3130 {
3131         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3132         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3133         int rc = 0;
3134
3135         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3136         req.tunnel_type = tunnel_type;
3137         req.tunnel_dst_port_val = port;
3138         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3139         HWRM_CHECK_RESULT();
3140
3141         switch (tunnel_type) {
3142         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3143                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3144                 bp->vxlan_port = port;
3145                 break;
3146         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3147                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3148                 bp->geneve_port = port;
3149                 break;
3150         default:
3151                 break;
3152         }
3153
3154         HWRM_UNLOCK();
3155
3156         return rc;
3157 }
3158
3159 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3160                                 uint8_t tunnel_type)
3161 {
3162         struct hwrm_tunnel_dst_port_free_input req = {0};
3163         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3164         int rc = 0;
3165
3166         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3167
3168         req.tunnel_type = tunnel_type;
3169         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3170         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3171
3172         HWRM_CHECK_RESULT();
3173         HWRM_UNLOCK();
3174
3175         return rc;
3176 }
3177
3178 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3179                                         uint32_t flags)
3180 {
3181         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3182         struct hwrm_func_cfg_input req = {0};
3183         int rc;
3184
3185         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3186
3187         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3188         req.flags = rte_cpu_to_le_32(flags);
3189         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3190
3191         HWRM_CHECK_RESULT();
3192         HWRM_UNLOCK();
3193
3194         return rc;
3195 }
3196
3197 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3198 {
3199         uint32_t *flag = flagp;
3200
3201         vnic->flags = *flag;
3202 }
3203
3204 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3205 {
3206         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3207 }
3208
3209 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3210 {
3211         int rc = 0;
3212         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3213         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3214
3215         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3216
3217         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3218         req.req_buf_page_size = rte_cpu_to_le_16(
3219                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3220         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3221         req.req_buf_page_addr0 =
3222                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3223         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3224                 PMD_DRV_LOG(ERR,
3225                         "unable to map buffer address to physical memory\n");
3226                 return -ENOMEM;
3227         }
3228
3229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3230
3231         HWRM_CHECK_RESULT();
3232         HWRM_UNLOCK();
3233
3234         return rc;
3235 }
3236
3237 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3238 {
3239         int rc = 0;
3240         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3241         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3242
3243         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3244                 return 0;
3245
3246         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3247
3248         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3249
3250         HWRM_CHECK_RESULT();
3251         HWRM_UNLOCK();
3252
3253         return rc;
3254 }
3255
3256 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3257 {
3258         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3259         struct hwrm_func_cfg_input req = {0};
3260         int rc;
3261
3262         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3263
3264         req.fid = rte_cpu_to_le_16(0xffff);
3265         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3266         req.enables = rte_cpu_to_le_32(
3267                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3268         req.async_event_cr = rte_cpu_to_le_16(
3269                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3271
3272         HWRM_CHECK_RESULT();
3273         HWRM_UNLOCK();
3274
3275         return rc;
3276 }
3277
3278 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3279 {
3280         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3281         struct hwrm_func_vf_cfg_input req = {0};
3282         int rc;
3283
3284         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3285
3286         req.enables = rte_cpu_to_le_32(
3287                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3288         req.async_event_cr = rte_cpu_to_le_16(
3289                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3290         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3291
3292         HWRM_CHECK_RESULT();
3293         HWRM_UNLOCK();
3294
3295         return rc;
3296 }
3297
3298 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3299 {
3300         struct hwrm_func_cfg_input req = {0};
3301         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3302         uint16_t dflt_vlan, fid;
3303         uint32_t func_cfg_flags;
3304         int rc = 0;
3305
3306         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3307
3308         if (is_vf) {
3309                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3310                 fid = bp->pf.vf_info[vf].fid;
3311                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3312         } else {
3313                 fid = rte_cpu_to_le_16(0xffff);
3314                 func_cfg_flags = bp->pf.func_cfg_flags;
3315                 dflt_vlan = bp->vlan;
3316         }
3317
3318         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3319         req.fid = rte_cpu_to_le_16(fid);
3320         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3321         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3322
3323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3324
3325         HWRM_CHECK_RESULT();
3326         HWRM_UNLOCK();
3327
3328         return rc;
3329 }
3330
3331 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3332                         uint16_t max_bw, uint16_t enables)
3333 {
3334         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3335         struct hwrm_func_cfg_input req = {0};
3336         int rc;
3337
3338         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3339
3340         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3341         req.enables |= rte_cpu_to_le_32(enables);
3342         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3343         req.max_bw = rte_cpu_to_le_32(max_bw);
3344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3345
3346         HWRM_CHECK_RESULT();
3347         HWRM_UNLOCK();
3348
3349         return rc;
3350 }
3351
3352 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3353 {
3354         struct hwrm_func_cfg_input req = {0};
3355         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3356         int rc = 0;
3357
3358         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3359
3360         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3361         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3362         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3363         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3364
3365         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3366
3367         HWRM_CHECK_RESULT();
3368         HWRM_UNLOCK();
3369
3370         return rc;
3371 }
3372
3373 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3374 {
3375         int rc;
3376
3377         if (BNXT_PF(bp))
3378                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3379         else
3380                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3381
3382         return rc;
3383 }
3384
3385 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3386                               void *encaped, size_t ec_size)
3387 {
3388         int rc = 0;
3389         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3390         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3391
3392         if (ec_size > sizeof(req.encap_request))
3393                 return -1;
3394
3395         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3396
3397         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3398         memcpy(req.encap_request, encaped, ec_size);
3399
3400         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3401
3402         HWRM_CHECK_RESULT();
3403         HWRM_UNLOCK();
3404
3405         return rc;
3406 }
3407
3408 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3409                                        struct rte_ether_addr *mac)
3410 {
3411         struct hwrm_func_qcfg_input req = {0};
3412         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3413         int rc;
3414
3415         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3416
3417         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3419
3420         HWRM_CHECK_RESULT();
3421
3422         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3423
3424         HWRM_UNLOCK();
3425
3426         return rc;
3427 }
3428
3429 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3430                             void *encaped, size_t ec_size)
3431 {
3432         int rc = 0;
3433         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3434         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3435
3436         if (ec_size > sizeof(req.encap_request))
3437                 return -1;
3438
3439         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3440
3441         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3442         memcpy(req.encap_request, encaped, ec_size);
3443
3444         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3445
3446         HWRM_CHECK_RESULT();
3447         HWRM_UNLOCK();
3448
3449         return rc;
3450 }
3451
3452 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3453                          struct rte_eth_stats *stats, uint8_t rx)
3454 {
3455         int rc = 0;
3456         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3457         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3458
3459         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3460
3461         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3462
3463         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3464
3465         HWRM_CHECK_RESULT();
3466
3467         if (rx) {
3468                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3469                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3470                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3471                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3472                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3473                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3474                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3475                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3476         } else {
3477                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3478                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3479                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3480                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3481                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3482                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3483         }
3484
3485
3486         HWRM_UNLOCK();
3487
3488         return rc;
3489 }
3490
3491 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3492 {
3493         struct hwrm_port_qstats_input req = {0};
3494         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3495         struct bnxt_pf_info *pf = &bp->pf;
3496         int rc;
3497
3498         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3499
3500         req.port_id = rte_cpu_to_le_16(pf->port_id);
3501         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3502         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3503         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3504
3505         HWRM_CHECK_RESULT();
3506         HWRM_UNLOCK();
3507
3508         return rc;
3509 }
3510
3511 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3512 {
3513         struct hwrm_port_clr_stats_input req = {0};
3514         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3515         struct bnxt_pf_info *pf = &bp->pf;
3516         int rc;
3517
3518         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3519         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3520             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3521                 return 0;
3522
3523         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3524
3525         req.port_id = rte_cpu_to_le_16(pf->port_id);
3526         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3527
3528         HWRM_CHECK_RESULT();
3529         HWRM_UNLOCK();
3530
3531         return rc;
3532 }
3533
3534 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3535 {
3536         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3537         struct hwrm_port_led_qcaps_input req = {0};
3538         int rc;
3539
3540         if (BNXT_VF(bp))
3541                 return 0;
3542
3543         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3544         req.port_id = bp->pf.port_id;
3545         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3546
3547         HWRM_CHECK_RESULT();
3548
3549         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3550                 unsigned int i;
3551
3552                 bp->num_leds = resp->num_leds;
3553                 memcpy(bp->leds, &resp->led0_id,
3554                         sizeof(bp->leds[0]) * bp->num_leds);
3555                 for (i = 0; i < bp->num_leds; i++) {
3556                         struct bnxt_led_info *led = &bp->leds[i];
3557
3558                         uint16_t caps = led->led_state_caps;
3559
3560                         if (!led->led_group_id ||
3561                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3562                                 bp->num_leds = 0;
3563                                 break;
3564                         }
3565                 }
3566         }
3567
3568         HWRM_UNLOCK();
3569
3570         return rc;
3571 }
3572
3573 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3574 {
3575         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3576         struct hwrm_port_led_cfg_input req = {0};
3577         struct bnxt_led_cfg *led_cfg;
3578         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3579         uint16_t duration = 0;
3580         int rc, i;
3581
3582         if (!bp->num_leds || BNXT_VF(bp))
3583                 return -EOPNOTSUPP;
3584
3585         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3586
3587         if (led_on) {
3588                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3589                 duration = rte_cpu_to_le_16(500);
3590         }
3591         req.port_id = bp->pf.port_id;
3592         req.num_leds = bp->num_leds;
3593         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3594         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3595                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3596                 led_cfg->led_id = bp->leds[i].led_id;
3597                 led_cfg->led_state = led_state;
3598                 led_cfg->led_blink_on = duration;
3599                 led_cfg->led_blink_off = duration;
3600                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3601         }
3602
3603         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3604
3605         HWRM_CHECK_RESULT();
3606         HWRM_UNLOCK();
3607
3608         return rc;
3609 }
3610
3611 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3612                                uint32_t *length)
3613 {
3614         int rc;
3615         struct hwrm_nvm_get_dir_info_input req = {0};
3616         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3617
3618         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3619
3620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3621
3622         HWRM_CHECK_RESULT();
3623
3624         *entries = rte_le_to_cpu_32(resp->entries);
3625         *length = rte_le_to_cpu_32(resp->entry_length);
3626
3627         HWRM_UNLOCK();
3628         return rc;
3629 }
3630
3631 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3632 {
3633         int rc;
3634         uint32_t dir_entries;
3635         uint32_t entry_length;
3636         uint8_t *buf;
3637         size_t buflen;
3638         rte_iova_t dma_handle;
3639         struct hwrm_nvm_get_dir_entries_input req = {0};
3640         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3641
3642         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3643         if (rc != 0)
3644                 return rc;
3645
3646         *data++ = dir_entries;
3647         *data++ = entry_length;
3648         len -= 2;
3649         memset(data, 0xff, len);
3650
3651         buflen = dir_entries * entry_length;
3652         buf = rte_malloc("nvm_dir", buflen, 0);
3653         rte_mem_lock_page(buf);
3654         if (buf == NULL)
3655                 return -ENOMEM;
3656         dma_handle = rte_mem_virt2iova(buf);
3657         if (dma_handle == RTE_BAD_IOVA) {
3658                 PMD_DRV_LOG(ERR,
3659                         "unable to map response address to physical memory\n");
3660                 return -ENOMEM;
3661         }
3662         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3663         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3665
3666         if (rc == 0)
3667                 memcpy(data, buf, len > buflen ? buflen : len);
3668
3669         rte_free(buf);
3670         HWRM_CHECK_RESULT();
3671         HWRM_UNLOCK();
3672
3673         return rc;
3674 }
3675
3676 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3677                              uint32_t offset, uint32_t length,
3678                              uint8_t *data)
3679 {
3680         int rc;
3681         uint8_t *buf;
3682         rte_iova_t dma_handle;
3683         struct hwrm_nvm_read_input req = {0};
3684         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3685
3686         buf = rte_malloc("nvm_item", length, 0);
3687         rte_mem_lock_page(buf);
3688         if (!buf)
3689                 return -ENOMEM;
3690
3691         dma_handle = rte_mem_virt2iova(buf);
3692         if (dma_handle == RTE_BAD_IOVA) {
3693                 PMD_DRV_LOG(ERR,
3694                         "unable to map response address to physical memory\n");
3695                 return -ENOMEM;
3696         }
3697         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3698         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3699         req.dir_idx = rte_cpu_to_le_16(index);
3700         req.offset = rte_cpu_to_le_32(offset);
3701         req.len = rte_cpu_to_le_32(length);
3702         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3703         if (rc == 0)
3704                 memcpy(data, buf, length);
3705
3706         rte_free(buf);
3707         HWRM_CHECK_RESULT();
3708         HWRM_UNLOCK();
3709
3710         return rc;
3711 }
3712
3713 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3714 {
3715         int rc;
3716         struct hwrm_nvm_erase_dir_entry_input req = {0};
3717         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3718
3719         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3720         req.dir_idx = rte_cpu_to_le_16(index);
3721         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3722         HWRM_CHECK_RESULT();
3723         HWRM_UNLOCK();
3724
3725         return rc;
3726 }
3727
3728
3729 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3730                           uint16_t dir_ordinal, uint16_t dir_ext,
3731                           uint16_t dir_attr, const uint8_t *data,
3732                           size_t data_len)
3733 {
3734         int rc;
3735         struct hwrm_nvm_write_input req = {0};
3736         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3737         rte_iova_t dma_handle;
3738         uint8_t *buf;
3739
3740         buf = rte_malloc("nvm_write", data_len, 0);
3741         rte_mem_lock_page(buf);
3742         if (!buf)
3743                 return -ENOMEM;
3744
3745         dma_handle = rte_mem_virt2iova(buf);
3746         if (dma_handle == RTE_BAD_IOVA) {
3747                 PMD_DRV_LOG(ERR,
3748                         "unable to map response address to physical memory\n");
3749                 return -ENOMEM;
3750         }
3751         memcpy(buf, data, data_len);
3752
3753         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3754
3755         req.dir_type = rte_cpu_to_le_16(dir_type);
3756         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3757         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3758         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3759         req.dir_data_length = rte_cpu_to_le_32(data_len);
3760         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3761
3762         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3763
3764         rte_free(buf);
3765         HWRM_CHECK_RESULT();
3766         HWRM_UNLOCK();
3767
3768         return rc;
3769 }
3770
3771 static void
3772 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3773 {
3774         uint32_t *count = cbdata;
3775
3776         *count = *count + 1;
3777 }
3778
3779 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3780                                      struct bnxt_vnic_info *vnic __rte_unused)
3781 {
3782         return 0;
3783 }
3784
3785 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3786 {
3787         uint32_t count = 0;
3788
3789         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3790             &count, bnxt_vnic_count_hwrm_stub);
3791
3792         return count;
3793 }
3794
3795 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3796                                         uint16_t *vnic_ids)
3797 {
3798         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3799         struct hwrm_func_vf_vnic_ids_query_output *resp =
3800                                                 bp->hwrm_cmd_resp_addr;
3801         int rc;
3802
3803         /* First query all VNIC ids */
3804         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3805
3806         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3807         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3808         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3809
3810         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3811                 HWRM_UNLOCK();
3812                 PMD_DRV_LOG(ERR,
3813                 "unable to map VNIC ID table address to physical memory\n");
3814                 return -ENOMEM;
3815         }
3816         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3817         HWRM_CHECK_RESULT();
3818         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3819
3820         HWRM_UNLOCK();
3821
3822         return rc;
3823 }
3824
3825 /*
3826  * This function queries the VNIC IDs  for a specified VF. It then calls
3827  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3828  * Then it calls the hwrm_cb function to program this new vnic configuration.
3829  */
3830 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3831         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3832         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3833 {
3834         struct bnxt_vnic_info vnic;
3835         int rc = 0;
3836         int i, num_vnic_ids;
3837         uint16_t *vnic_ids;
3838         size_t vnic_id_sz;
3839         size_t sz;
3840
3841         /* First query all VNIC ids */
3842         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3843         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3844                         RTE_CACHE_LINE_SIZE);
3845         if (vnic_ids == NULL)
3846                 return -ENOMEM;
3847
3848         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3849                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3850
3851         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3852
3853         if (num_vnic_ids < 0)
3854                 return num_vnic_ids;
3855
3856         /* Retrieve VNIC, update bd_stall then update */
3857
3858         for (i = 0; i < num_vnic_ids; i++) {
3859                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3860                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3861                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3862                 if (rc)
3863                         break;
3864                 if (vnic.mru <= 4)      /* Indicates unallocated */
3865                         continue;
3866
3867                 vnic_cb(&vnic, cbdata);
3868
3869                 rc = hwrm_cb(bp, &vnic);
3870                 if (rc)
3871                         break;
3872         }
3873
3874         rte_free(vnic_ids);
3875
3876         return rc;
3877 }
3878
3879 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3880                                               bool on)
3881 {
3882         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3883         struct hwrm_func_cfg_input req = {0};
3884         int rc;
3885
3886         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3887
3888         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3889         req.enables |= rte_cpu_to_le_32(
3890                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3891         req.vlan_antispoof_mode = on ?
3892                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3893                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3894         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3895
3896         HWRM_CHECK_RESULT();
3897         HWRM_UNLOCK();
3898
3899         return rc;
3900 }
3901
3902 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3903 {
3904         struct bnxt_vnic_info vnic;
3905         uint16_t *vnic_ids;
3906         size_t vnic_id_sz;
3907         int num_vnic_ids, i;
3908         size_t sz;
3909         int rc;
3910
3911         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3912         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3913                         RTE_CACHE_LINE_SIZE);
3914         if (vnic_ids == NULL)
3915                 return -ENOMEM;
3916
3917         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3918                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3919
3920         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3921         if (rc <= 0)
3922                 goto exit;
3923         num_vnic_ids = rc;
3924
3925         /*
3926          * Loop through to find the default VNIC ID.
3927          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3928          * by sending the hwrm_func_qcfg command to the firmware.
3929          */
3930         for (i = 0; i < num_vnic_ids; i++) {
3931                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3932                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3933                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3934                                         bp->pf.first_vf_id + vf);
3935                 if (rc)
3936                         goto exit;
3937                 if (vnic.func_default) {
3938                         rte_free(vnic_ids);
3939                         return vnic.fw_vnic_id;
3940                 }
3941         }
3942         /* Could not find a default VNIC. */
3943         PMD_DRV_LOG(ERR, "No default VNIC\n");
3944 exit:
3945         rte_free(vnic_ids);
3946         return rc;
3947 }
3948
3949 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3950                          uint16_t dst_id,
3951                          struct bnxt_filter_info *filter)
3952 {
3953         int rc = 0;
3954         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3955         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3956         uint32_t enables = 0;
3957
3958         if (filter->fw_em_filter_id != UINT64_MAX)
3959                 bnxt_hwrm_clear_em_filter(bp, filter);
3960
3961         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3962
3963         req.flags = rte_cpu_to_le_32(filter->flags);
3964
3965         enables = filter->enables |
3966               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3967         req.dst_id = rte_cpu_to_le_16(dst_id);
3968
3969         if (filter->ip_addr_type) {
3970                 req.ip_addr_type = filter->ip_addr_type;
3971                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3972         }
3973         if (enables &
3974             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3975                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3976         if (enables &
3977             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3978                 memcpy(req.src_macaddr, filter->src_macaddr,
3979                        RTE_ETHER_ADDR_LEN);
3980         if (enables &
3981             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3982                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3983                        RTE_ETHER_ADDR_LEN);
3984         if (enables &
3985             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3986                 req.ovlan_vid = filter->l2_ovlan;
3987         if (enables &
3988             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3989                 req.ivlan_vid = filter->l2_ivlan;
3990         if (enables &
3991             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3992                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3993         if (enables &
3994             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3995                 req.ip_protocol = filter->ip_protocol;
3996         if (enables &
3997             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3998                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3999         if (enables &
4000             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4001                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4002         if (enables &
4003             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4004                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4005         if (enables &
4006             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4007                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4008         if (enables &
4009             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4010                 req.mirror_vnic_id = filter->mirror_vnic_id;
4011
4012         req.enables = rte_cpu_to_le_32(enables);
4013
4014         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4015
4016         HWRM_CHECK_RESULT();
4017
4018         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4019         HWRM_UNLOCK();
4020
4021         return rc;
4022 }
4023
4024 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4025 {
4026         int rc = 0;
4027         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4028         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4029
4030         if (filter->fw_em_filter_id == UINT64_MAX)
4031                 return 0;
4032
4033         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4034         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4035
4036         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4037
4038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4039
4040         HWRM_CHECK_RESULT();
4041         HWRM_UNLOCK();
4042
4043         filter->fw_em_filter_id = UINT64_MAX;
4044         filter->fw_l2_filter_id = UINT64_MAX;
4045
4046         return 0;
4047 }
4048
4049 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4050                          uint16_t dst_id,
4051                          struct bnxt_filter_info *filter)
4052 {
4053         int rc = 0;
4054         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4055         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4056                                                 bp->hwrm_cmd_resp_addr;
4057         uint32_t enables = 0;
4058
4059         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4060                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4061
4062         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4063
4064         req.flags = rte_cpu_to_le_32(filter->flags);
4065
4066         enables = filter->enables |
4067               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4068         req.dst_id = rte_cpu_to_le_16(dst_id);
4069
4070
4071         if (filter->ip_addr_type) {
4072                 req.ip_addr_type = filter->ip_addr_type;
4073                 enables |=
4074                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4075         }
4076         if (enables &
4077             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4078                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4079         if (enables &
4080             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4081                 memcpy(req.src_macaddr, filter->src_macaddr,
4082                        RTE_ETHER_ADDR_LEN);
4083         //if (enables &
4084             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4085                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4086                        //RTE_ETHER_ADDR_LEN);
4087         if (enables &
4088             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4089                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4090         if (enables &
4091             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4092                 req.ip_protocol = filter->ip_protocol;
4093         if (enables &
4094             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4095                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4096         if (enables &
4097             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4098                 req.src_ipaddr_mask[0] =
4099                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4100         if (enables &
4101             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4102                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4103         if (enables &
4104             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4105                 req.dst_ipaddr_mask[0] =
4106                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4107         if (enables &
4108             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4109                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4110         if (enables &
4111             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4112                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4113         if (enables &
4114             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4115                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4116         if (enables &
4117             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4118                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4119         if (enables &
4120             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4121                 req.mirror_vnic_id = filter->mirror_vnic_id;
4122
4123         req.enables = rte_cpu_to_le_32(enables);
4124
4125         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4126
4127         HWRM_CHECK_RESULT();
4128
4129         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4130         HWRM_UNLOCK();
4131
4132         return rc;
4133 }
4134
4135 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4136                                 struct bnxt_filter_info *filter)
4137 {
4138         int rc = 0;
4139         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4140         struct hwrm_cfa_ntuple_filter_free_output *resp =
4141                                                 bp->hwrm_cmd_resp_addr;
4142
4143         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4144                 return 0;
4145
4146         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4147
4148         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4149
4150         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4151
4152         HWRM_CHECK_RESULT();
4153         HWRM_UNLOCK();
4154
4155         filter->fw_ntuple_filter_id = UINT64_MAX;
4156
4157         return 0;
4158 }
4159
4160 static int
4161 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4162 {
4163         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4164         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4165         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4166         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4167         uint16_t *ring_tbl = vnic->rss_table;
4168         int nr_ctxs = vnic->num_lb_ctxts;
4169         int max_rings = bp->rx_nr_rings;
4170         int i, j, k, cnt;
4171         int rc = 0;
4172
4173         for (i = 0, k = 0; i < nr_ctxs; i++) {
4174                 struct bnxt_rx_ring_info *rxr;
4175                 struct bnxt_cp_ring_info *cpr;
4176
4177                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4178
4179                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4180                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4181                 req.hash_mode_flags = vnic->hash_mode;
4182
4183                 req.ring_grp_tbl_addr =
4184                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4185                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4186                                      2 * sizeof(*ring_tbl));
4187                 req.hash_key_tbl_addr =
4188                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4189
4190                 req.ring_table_pair_index = i;
4191                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4192
4193                 for (j = 0; j < 64; j++) {
4194                         uint16_t ring_id;
4195
4196                         /* Find next active ring. */
4197                         for (cnt = 0; cnt < max_rings; cnt++) {
4198                                 if (rx_queue_state[k] !=
4199                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4200                                         break;
4201                                 if (++k == max_rings)
4202                                         k = 0;
4203                         }
4204
4205                         /* Return if no rings are active. */
4206                         if (cnt == max_rings)
4207                                 return 0;
4208
4209                         /* Add rx/cp ring pair to RSS table. */
4210                         rxr = rxqs[k]->rx_ring;
4211                         cpr = rxqs[k]->cp_ring;
4212
4213                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4214                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4215                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4216                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4217
4218                         if (++k == max_rings)
4219                                 k = 0;
4220                 }
4221                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4222                                             BNXT_USE_CHIMP_MB);
4223
4224                 HWRM_CHECK_RESULT();
4225                 HWRM_UNLOCK();
4226         }
4227
4228         return rc;
4229 }
4230
4231 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4232 {
4233         unsigned int rss_idx, fw_idx, i;
4234
4235         if (!(vnic->rss_table && vnic->hash_type))
4236                 return 0;
4237
4238         if (BNXT_CHIP_THOR(bp))
4239                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4240
4241         /*
4242          * Fill the RSS hash & redirection table with
4243          * ring group ids for all VNICs
4244          */
4245         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4246                 rss_idx++, fw_idx++) {
4247                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4248                         fw_idx %= bp->rx_cp_nr_rings;
4249                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4250                                 break;
4251                         fw_idx++;
4252                 }
4253                 if (i == bp->rx_cp_nr_rings)
4254                         return 0;
4255                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4256         }
4257         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4258 }
4259
4260 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4261         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4262 {
4263         uint16_t flags;
4264
4265         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4266
4267         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4268         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4269
4270         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4271         req->num_cmpl_dma_aggr_during_int =
4272                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4273
4274         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4275
4276         /* min timer set to 1/2 of interrupt timer */
4277         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4278
4279         /* buf timer set to 1/4 of interrupt timer */
4280         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4281
4282         req->cmpl_aggr_dma_tmr_during_int =
4283                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4284
4285         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4286                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4287         req->flags = rte_cpu_to_le_16(flags);
4288 }
4289
4290 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4291                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4292 {
4293         struct hwrm_ring_aggint_qcaps_input req = {0};
4294         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4295         uint32_t enables;
4296         uint16_t flags;
4297         int rc;
4298
4299         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4300         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4301         HWRM_CHECK_RESULT();
4302
4303         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4304         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4305
4306         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4307                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4308         agg_req->flags = rte_cpu_to_le_16(flags);
4309         enables =
4310          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4311          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4312         agg_req->enables = rte_cpu_to_le_32(enables);
4313
4314         HWRM_UNLOCK();
4315         return rc;
4316 }
4317
4318 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4319                         struct bnxt_coal *coal, uint16_t ring_id)
4320 {
4321         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4322         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4323                                                 bp->hwrm_cmd_resp_addr;
4324         int rc;
4325
4326         /* Set ring coalesce parameters only for 100G NICs */
4327         if (BNXT_CHIP_THOR(bp)) {
4328                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4329                         return -1;
4330         } else if (bnxt_stratus_device(bp)) {
4331                 bnxt_hwrm_set_coal_params(coal, &req);
4332         } else {
4333                 return 0;
4334         }
4335
4336         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4337         req.ring_id = rte_cpu_to_le_16(ring_id);
4338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4339         HWRM_CHECK_RESULT();
4340         HWRM_UNLOCK();
4341         return 0;
4342 }
4343
4344 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4345 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4346 {
4347         struct hwrm_func_backing_store_qcaps_input req = {0};
4348         struct hwrm_func_backing_store_qcaps_output *resp =
4349                 bp->hwrm_cmd_resp_addr;
4350         int rc;
4351
4352         if (!BNXT_CHIP_THOR(bp) ||
4353             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4354             BNXT_VF(bp) ||
4355             bp->ctx)
4356                 return 0;
4357
4358         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4359         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4360         HWRM_CHECK_RESULT_SILENT();
4361
4362         if (!rc) {
4363                 struct bnxt_ctx_pg_info *ctx_pg;
4364                 struct bnxt_ctx_mem_info *ctx;
4365                 int total_alloc_len;
4366                 int i;
4367
4368                 total_alloc_len = sizeof(*ctx);
4369                 ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
4370                                  RTE_CACHE_LINE_SIZE);
4371                 if (!ctx) {
4372                         rc = -ENOMEM;
4373                         goto ctx_err;
4374                 }
4375                 memset(ctx, 0, total_alloc_len);
4376
4377                 ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4378                                     sizeof(*ctx_pg) * BNXT_MAX_Q,
4379                                     RTE_CACHE_LINE_SIZE);
4380                 if (!ctx_pg) {
4381                         rc = -ENOMEM;
4382                         goto ctx_err;
4383                 }
4384                 for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4385                         ctx->tqm_mem[i] = ctx_pg;
4386
4387                 bp->ctx = ctx;
4388                 ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4389                 ctx->qp_min_qp1_entries =
4390                         rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4391                 ctx->qp_max_l2_entries =
4392                         rte_le_to_cpu_16(resp->qp_max_l2_entries);
4393                 ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4394                 ctx->srq_max_l2_entries =
4395                         rte_le_to_cpu_16(resp->srq_max_l2_entries);
4396                 ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4397                 ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4398                 ctx->cq_max_l2_entries =
4399                         rte_le_to_cpu_16(resp->cq_max_l2_entries);
4400                 ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4401                 ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4402                 ctx->vnic_max_vnic_entries =
4403                         rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4404                 ctx->vnic_max_ring_table_entries =
4405                         rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4406                 ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4407                 ctx->stat_max_entries =
4408                         rte_le_to_cpu_32(resp->stat_max_entries);
4409                 ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4410                 ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4411                 ctx->tqm_min_entries_per_ring =
4412                         rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4413                 ctx->tqm_max_entries_per_ring =
4414                         rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4415                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4416                 if (!ctx->tqm_entries_multiple)
4417                         ctx->tqm_entries_multiple = 1;
4418                 ctx->mrav_max_entries =
4419                         rte_le_to_cpu_32(resp->mrav_max_entries);
4420                 ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4421                 ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4422                 ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4423         } else {
4424                 rc = 0;
4425         }
4426 ctx_err:
4427         HWRM_UNLOCK();
4428         return rc;
4429 }
4430
4431 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4432 {
4433         struct hwrm_func_backing_store_cfg_input req = {0};
4434         struct hwrm_func_backing_store_cfg_output *resp =
4435                 bp->hwrm_cmd_resp_addr;
4436         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4437         struct bnxt_ctx_pg_info *ctx_pg;
4438         uint32_t *num_entries;
4439         uint64_t *pg_dir;
4440         uint8_t *pg_attr;
4441         uint32_t ena;
4442         int i, rc;
4443
4444         if (!ctx)
4445                 return 0;
4446
4447         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4448         req.enables = rte_cpu_to_le_32(enables);
4449
4450         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4451                 ctx_pg = &ctx->qp_mem;
4452                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4453                 req.qp_num_qp1_entries =
4454                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4455                 req.qp_num_l2_entries =
4456                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4457                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4458                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4459                                       &req.qpc_pg_size_qpc_lvl,
4460                                       &req.qpc_page_dir);
4461         }
4462
4463         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4464                 ctx_pg = &ctx->srq_mem;
4465                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4466                 req.srq_num_l2_entries =
4467                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4468                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4469                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4470                                       &req.srq_pg_size_srq_lvl,
4471                                       &req.srq_page_dir);
4472         }
4473
4474         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4475                 ctx_pg = &ctx->cq_mem;
4476                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4477                 req.cq_num_l2_entries =
4478                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4479                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4480                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4481                                       &req.cq_pg_size_cq_lvl,
4482                                       &req.cq_page_dir);
4483         }
4484
4485         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4486                 ctx_pg = &ctx->vnic_mem;
4487                 req.vnic_num_vnic_entries =
4488                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4489                 req.vnic_num_ring_table_entries =
4490                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4491                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4492                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4493                                       &req.vnic_pg_size_vnic_lvl,
4494                                       &req.vnic_page_dir);
4495         }
4496
4497         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4498                 ctx_pg = &ctx->stat_mem;
4499                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4500                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4501                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4502                                       &req.stat_pg_size_stat_lvl,
4503                                       &req.stat_page_dir);
4504         }
4505
4506         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4507         num_entries = &req.tqm_sp_num_entries;
4508         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4509         pg_dir = &req.tqm_sp_page_dir;
4510         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4511         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4512                 if (!(enables & ena))
4513                         continue;
4514
4515                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4516
4517                 ctx_pg = ctx->tqm_mem[i];
4518                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4519                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4520         }
4521
4522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4523         HWRM_CHECK_RESULT();
4524         HWRM_UNLOCK();
4525
4526         return rc;
4527 }
4528
4529 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4530 {
4531         struct hwrm_port_qstats_ext_input req = {0};
4532         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4533         struct bnxt_pf_info *pf = &bp->pf;
4534         int rc;
4535
4536         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4537               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4538                 return 0;
4539
4540         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4541
4542         req.port_id = rte_cpu_to_le_16(pf->port_id);
4543         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4544                 req.tx_stat_host_addr =
4545                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4546                 req.tx_stat_size =
4547                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4548         }
4549         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4550                 req.rx_stat_host_addr =
4551                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4552                 req.rx_stat_size =
4553                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4554         }
4555         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4556
4557         if (rc) {
4558                 bp->fw_rx_port_stats_ext_size = 0;
4559                 bp->fw_tx_port_stats_ext_size = 0;
4560         } else {
4561                 bp->fw_rx_port_stats_ext_size =
4562                         rte_le_to_cpu_16(resp->rx_stat_size);
4563                 bp->fw_tx_port_stats_ext_size =
4564                         rte_le_to_cpu_16(resp->tx_stat_size);
4565         }
4566
4567         HWRM_CHECK_RESULT();
4568         HWRM_UNLOCK();
4569
4570         return rc;
4571 }
4572
4573 int
4574 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4575 {
4576         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4577         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4578                 bp->hwrm_cmd_resp_addr;
4579         int rc = 0;
4580
4581         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4582         req.tunnel_type = type;
4583         req.dest_fid = bp->fw_fid;
4584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4585         HWRM_CHECK_RESULT();
4586
4587         HWRM_UNLOCK();
4588
4589         return rc;
4590 }
4591
4592 int
4593 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4594 {
4595         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4596         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4597                 bp->hwrm_cmd_resp_addr;
4598         int rc = 0;
4599
4600         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4601         req.tunnel_type = type;
4602         req.dest_fid = bp->fw_fid;
4603         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4604         HWRM_CHECK_RESULT();
4605
4606         HWRM_UNLOCK();
4607
4608         return rc;
4609 }
4610
4611 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4612 {
4613         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4614         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4615                 bp->hwrm_cmd_resp_addr;
4616         int rc = 0;
4617
4618         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4619         req.src_fid = bp->fw_fid;
4620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4621         HWRM_CHECK_RESULT();
4622
4623         if (type)
4624                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4625
4626         HWRM_UNLOCK();
4627
4628         return rc;
4629 }
4630
4631 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4632                                    uint16_t *dst_fid)
4633 {
4634         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4635         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4636                 bp->hwrm_cmd_resp_addr;
4637         int rc = 0;
4638
4639         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4640         req.src_fid = bp->fw_fid;
4641         req.tunnel_type = tun_type;
4642         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4643         HWRM_CHECK_RESULT();
4644
4645         if (dst_fid)
4646                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4647
4648         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4649
4650         HWRM_UNLOCK();
4651
4652         return rc;
4653 }
4654
4655 int bnxt_hwrm_set_mac(struct bnxt *bp)
4656 {
4657         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4658         struct hwrm_func_vf_cfg_input req = {0};
4659         int rc = 0;
4660
4661         if (!BNXT_VF(bp))
4662                 return 0;
4663
4664         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4665
4666         req.enables =
4667                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4668         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4669
4670         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4671
4672         HWRM_CHECK_RESULT();
4673
4674         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4675         HWRM_UNLOCK();
4676
4677         return rc;
4678 }
4679
4680 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4681 {
4682         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4683         struct hwrm_func_drv_if_change_input req = {0};
4684         uint32_t flags;
4685         int rc;
4686
4687         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4688                 return 0;
4689
4690         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4691          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4692          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4693          */
4694         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4695                 return 0;
4696
4697         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4698
4699         if (up)
4700                 req.flags =
4701                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4702
4703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4704
4705         HWRM_CHECK_RESULT();
4706         flags = rte_le_to_cpu_32(resp->flags);
4707         HWRM_UNLOCK();
4708
4709         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4710                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4711                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4712         }
4713
4714         return 0;
4715 }
4716
4717 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4718 {
4719         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4720         struct bnxt_error_recovery_info *info = bp->recovery_info;
4721         struct hwrm_error_recovery_qcfg_input req = {0};
4722         uint32_t flags = 0;
4723         unsigned int i;
4724         int rc;
4725
4726         /* Older FW does not have error recovery support */
4727         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4728                 return 0;
4729
4730         if (!info) {
4731                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4732                                    sizeof(*info), 0);
4733                 bp->recovery_info = info;
4734                 if (info == NULL)
4735                         return -ENOMEM;
4736         } else {
4737                 memset(info, 0, sizeof(*info));
4738         }
4739
4740         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4741
4742         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4743
4744         HWRM_CHECK_RESULT();
4745
4746         flags = rte_le_to_cpu_32(resp->flags);
4747         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4748                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4749         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4750                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4751
4752         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4753             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4754                 rc = -EINVAL;
4755                 goto err;
4756         }
4757
4758         /* FW returned values are in units of 100msec */
4759         info->driver_polling_freq =
4760                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4761         info->master_func_wait_period =
4762                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4763         info->normal_func_wait_period =
4764                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4765         info->master_func_wait_period_after_reset =
4766                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4767         info->max_bailout_time_after_reset =
4768                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4769         info->status_regs[BNXT_FW_STATUS_REG] =
4770                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4771         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4772                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4773         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4774                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4775         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4776                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4777         info->reg_array_cnt =
4778                 rte_le_to_cpu_32(resp->reg_array_cnt);
4779
4780         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4781                 rc = -EINVAL;
4782                 goto err;
4783         }
4784
4785         for (i = 0; i < info->reg_array_cnt; i++) {
4786                 info->reset_reg[i] =
4787                         rte_le_to_cpu_32(resp->reset_reg[i]);
4788                 info->reset_reg_val[i] =
4789                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4790                 info->delay_after_reset[i] =
4791                         resp->delay_after_reset[i];
4792         }
4793 err:
4794         HWRM_UNLOCK();
4795
4796         /* Map the FW status registers */
4797         if (!rc)
4798                 rc = bnxt_map_fw_health_status_regs(bp);
4799
4800         if (rc) {
4801                 rte_free(bp->recovery_info);
4802                 bp->recovery_info = NULL;
4803         }
4804         return rc;
4805 }
4806
4807 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4808 {
4809         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4810         struct hwrm_fw_reset_input req = {0};
4811         int rc;
4812
4813         if (!BNXT_PF(bp))
4814                 return -EOPNOTSUPP;
4815
4816         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4817
4818         req.embedded_proc_type =
4819                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4820         req.selfrst_status =
4821                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4822         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4823
4824         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4825                                     BNXT_USE_KONG(bp));
4826
4827         HWRM_CHECK_RESULT();
4828         HWRM_UNLOCK();
4829
4830         return rc;
4831 }