net/bnxt: fix VF resource allocation
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32
33 struct bnxt_plcmodes_cfg {
34         uint32_t        flags;
35         uint16_t        jumbo_thresh;
36         uint16_t        hds_offset;
37         uint16_t        hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42         if (size <= 1 << 4)
43                 return 4;
44         if (size <= 1 << 12)
45                 return 12;
46         if (size <= 1 << 13)
47                 return 13;
48         if (size <= 1 << 16)
49                 return 16;
50         if (size <= 1 << 21)
51                 return 21;
52         if (size <= 1 << 22)
53                 return 22;
54         if (size <= 1 << 30)
55                 return 30;
56         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57         return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62         return 1 << page_getenum(size);
63 }
64
65 /*
66  * HWRM Functions (sent to HWRM)
67  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69  * command was failed by the ChiMP.
70  */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73                                         uint32_t msg_len)
74 {
75         unsigned int i;
76         struct input *req = msg;
77         struct output *resp = bp->hwrm_cmd_resp_addr;
78         uint32_t *data = msg;
79         uint8_t *bar;
80         uint8_t *valid;
81         uint16_t max_req_len = bp->max_req_len;
82         struct hwrm_short_input short_input = { 0 };
83
84         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87                 memset(short_cmd_req, 0, bp->max_req_len);
88                 memcpy(short_cmd_req, req, msg_len);
89
90                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91                 short_input.signature = rte_cpu_to_le_16(
92                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93                 short_input.size = rte_cpu_to_le_16(msg_len);
94                 short_input.req_addr =
95                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97                 data = (uint32_t *)&short_input;
98                 msg_len = sizeof(short_input);
99
100                 /* Sync memory write before updating doorbell */
101                 rte_wmb();
102
103                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104         }
105
106         /* Write request msg to hwrm channel */
107         for (i = 0; i < msg_len; i += 4) {
108                 bar = (uint8_t *)bp->bar0 + i;
109                 rte_write32(*data, bar);
110                 data++;
111         }
112
113         /* Zero the rest of the request space */
114         for (; i < max_req_len; i += 4) {
115                 bar = (uint8_t *)bp->bar0 + i;
116                 rte_write32(0, bar);
117         }
118
119         /* Ring channel doorbell */
120         bar = (uint8_t *)bp->bar0 + 0x100;
121         rte_write32(1, bar);
122
123         /* Poll for the valid bit */
124         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125                 /* Sanity check on the resp->resp_len */
126                 rte_rmb();
127                 if (resp->resp_len && resp->resp_len <=
128                                 bp->max_resp_len) {
129                         /* Last byte of resp contains the valid key */
130                         valid = (uint8_t *)resp + resp->resp_len - 1;
131                         if (*valid == HWRM_RESP_VALID_KEY)
132                                 break;
133                 }
134                 rte_delay_us(600);
135         }
136
137         if (i >= HWRM_CMD_TIMEOUT) {
138                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139                         req->req_type);
140                 goto err_ret;
141         }
142         return 0;
143
144 err_ret:
145         return -1;
146 }
147
148 /*
149  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
150  * spinlock, and does initial processing.
151  *
152  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
153  * releases the spinlock only if it returns.  If the regular int return codes
154  * are not used by the function, HWRM_CHECK_RESULT() should not be used
155  * directly, rather it should be copied and modified to suit the function.
156  *
157  * HWRM_UNLOCK() must be called after all response processing is completed.
158  */
159 #define HWRM_PREP(req, type) do { \
160         rte_spinlock_lock(&bp->hwrm_lock); \
161         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163         req.cmpl_ring = rte_cpu_to_le_16(-1); \
164         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165         req.target_id = rte_cpu_to_le_16(0xffff); \
166         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT() do {\
170         if (rc) { \
171                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
172                 rte_spinlock_unlock(&bp->hwrm_lock); \
173                 return rc; \
174         } \
175         if (resp->error_code) { \
176                 rc = rte_le_to_cpu_16(resp->error_code); \
177                 if (resp->resp_len >= 16) { \
178                         struct hwrm_err_output *tmp_hwrm_err_op = \
179                                                 (void *)resp; \
180                         PMD_DRV_LOG(ERR, \
181                                 "error %d:%d:%08x:%04x\n", \
182                                 rc, tmp_hwrm_err_op->cmd_err, \
183                                 rte_le_to_cpu_32(\
184                                         tmp_hwrm_err_op->opaque_0), \
185                                 rte_le_to_cpu_16(\
186                                         tmp_hwrm_err_op->opaque_1)); \
187                 } else { \
188                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
189                 } \
190                 rte_spinlock_unlock(&bp->hwrm_lock); \
191                 return rc; \
192         } \
193 } while (0)
194
195 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
196
197 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
198 {
199         int rc = 0;
200         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
201         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202
203         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
204         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
205         req.mask = 0;
206
207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
208
209         HWRM_CHECK_RESULT();
210         HWRM_UNLOCK();
211
212         return rc;
213 }
214
215 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
216                                  struct bnxt_vnic_info *vnic,
217                                  uint16_t vlan_count,
218                                  struct bnxt_vlan_table_entry *vlan_table)
219 {
220         int rc = 0;
221         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
222         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223         uint32_t mask = 0;
224
225         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
226                 return rc;
227
228         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
229         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230
231         /* FIXME add multicast flag, when multicast adding options is supported
232          * by ethtool.
233          */
234         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
235                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
236         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
237                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
238         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
239                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
240         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
241                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
242         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
243                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244         if (vnic->mc_addr_cnt) {
245                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
246                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
247                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
248         }
249         if (vlan_table) {
250                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
251                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
252                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
253                          rte_mem_virt2iova(vlan_table));
254                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
255         }
256         req.mask = rte_cpu_to_le_32(mask);
257
258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
259
260         HWRM_CHECK_RESULT();
261         HWRM_UNLOCK();
262
263         return rc;
264 }
265
266 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
267                         uint16_t vlan_count,
268                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
269 {
270         int rc = 0;
271         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
272         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
273                                                 bp->hwrm_cmd_resp_addr;
274
275         /*
276          * Older HWRM versions did not support this command, and the set_rx_mask
277          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
278          * removed from set_rx_mask call, and this command was added.
279          *
280          * This command is also present from 1.7.8.11 and higher,
281          * as well as 1.7.8.0
282          */
283         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
284                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
285                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
286                                         (11)))
287                                 return 0;
288                 }
289         }
290         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
291         req.fid = rte_cpu_to_le_16(fid);
292
293         req.vlan_tag_mask_tbl_addr =
294                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
295         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
296
297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298
299         HWRM_CHECK_RESULT();
300         HWRM_UNLOCK();
301
302         return rc;
303 }
304
305 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
306                            struct bnxt_filter_info *filter)
307 {
308         int rc = 0;
309         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
310         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
311
312         if (filter->fw_l2_filter_id == UINT64_MAX)
313                 return 0;
314
315         HWRM_PREP(req, CFA_L2_FILTER_FREE);
316
317         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
318
319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320
321         HWRM_CHECK_RESULT();
322         HWRM_UNLOCK();
323
324         filter->fw_l2_filter_id = UINT64_MAX;
325
326         return 0;
327 }
328
329 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
330                          uint16_t dst_id,
331                          struct bnxt_filter_info *filter)
332 {
333         int rc = 0;
334         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
335         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
336         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
337         const struct rte_eth_vmdq_rx_conf *conf =
338                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
339         uint32_t enables = 0;
340         uint16_t j = dst_id - 1;
341
342         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
343         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
344             conf->pool_map[j].pools & (1UL << j)) {
345                 PMD_DRV_LOG(DEBUG,
346                         "Add vlan %u to vmdq pool %u\n",
347                         conf->pool_map[j].vlan_id, j);
348
349                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
350                 filter->enables |=
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
352                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
353         }
354
355         if (filter->fw_l2_filter_id != UINT64_MAX)
356                 bnxt_hwrm_clear_l2_filter(bp, filter);
357
358         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
359
360         req.flags = rte_cpu_to_le_32(filter->flags);
361
362         enables = filter->enables |
363               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
364         req.dst_id = rte_cpu_to_le_16(dst_id);
365
366         if (enables &
367             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
368                 memcpy(req.l2_addr, filter->l2_addr,
369                        ETHER_ADDR_LEN);
370         if (enables &
371             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
372                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
373                        ETHER_ADDR_LEN);
374         if (enables &
375             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
376                 req.l2_ovlan = filter->l2_ovlan;
377         if (enables &
378             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
379                 req.l2_ovlan = filter->l2_ivlan;
380         if (enables &
381             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
382                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
383         if (enables &
384             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
385                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
386         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
387                 req.src_id = rte_cpu_to_le_32(filter->src_id);
388         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
389                 req.src_type = filter->src_type;
390
391         req.enables = rte_cpu_to_le_32(enables);
392
393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
394
395         HWRM_CHECK_RESULT();
396
397         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
398         HWRM_UNLOCK();
399
400         return rc;
401 }
402
403 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
404 {
405         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
406         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
407         uint32_t flags = 0;
408         int rc;
409
410         if (!ptp)
411                 return 0;
412
413         HWRM_PREP(req, PORT_MAC_CFG);
414
415         if (ptp->rx_filter)
416                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
417         else
418                 flags |=
419                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
420         if (ptp->tx_tstamp_en)
421                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
422         else
423                 flags |=
424                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
425         req.flags = rte_cpu_to_le_32(flags);
426         req.enables = rte_cpu_to_le_32
427                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
428         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
429
430         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
431         HWRM_UNLOCK();
432
433         return rc;
434 }
435
436 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
437 {
438         int rc = 0;
439         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
440         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
441         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
442
443 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
444         if (ptp)
445                 return 0;
446
447         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
448
449         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452
453         HWRM_CHECK_RESULT();
454
455         if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
456                 return 0;
457
458         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
459         if (!ptp)
460                 return -ENOMEM;
461
462         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
463                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
464         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
465                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
466         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
467                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
468         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
469                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
470         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
471                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
472         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
473                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
474         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
475                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
476         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
477                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
478         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
479                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
480
481         ptp->bp = bp;
482         bp->ptp_cfg = ptp;
483
484         return 0;
485 }
486
487 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
488 {
489         int rc = 0;
490         struct hwrm_func_qcaps_input req = {.req_type = 0 };
491         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
492         uint16_t new_max_vfs;
493         uint32_t flags;
494         int i;
495
496         HWRM_PREP(req, FUNC_QCAPS);
497
498         req.fid = rte_cpu_to_le_16(0xffff);
499
500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
501
502         HWRM_CHECK_RESULT();
503
504         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
505         flags = rte_le_to_cpu_32(resp->flags);
506         if (BNXT_PF(bp)) {
507                 bp->pf.port_id = resp->port_id;
508                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
509                 new_max_vfs = bp->pdev->max_vfs;
510                 if (new_max_vfs != bp->pf.max_vfs) {
511                         if (bp->pf.vf_info)
512                                 rte_free(bp->pf.vf_info);
513                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
514                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
515                         bp->pf.max_vfs = new_max_vfs;
516                         for (i = 0; i < new_max_vfs; i++) {
517                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
518                                 bp->pf.vf_info[i].vlan_table =
519                                         rte_zmalloc("VF VLAN table",
520                                                     getpagesize(),
521                                                     getpagesize());
522                                 if (bp->pf.vf_info[i].vlan_table == NULL)
523                                         PMD_DRV_LOG(ERR,
524                                         "Fail to alloc VLAN table for VF %d\n",
525                                         i);
526                                 else
527                                         rte_mem_lock_page(
528                                                 bp->pf.vf_info[i].vlan_table);
529                                 bp->pf.vf_info[i].vlan_as_table =
530                                         rte_zmalloc("VF VLAN AS table",
531                                                     getpagesize(),
532                                                     getpagesize());
533                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
534                                         PMD_DRV_LOG(ERR,
535                                         "Alloc VLAN AS table for VF %d fail\n",
536                                         i);
537                                 else
538                                         rte_mem_lock_page(
539                                                bp->pf.vf_info[i].vlan_as_table);
540                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
541                         }
542                 }
543         }
544
545         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
546         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
547         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
548         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
549         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
550         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
551         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
552         /* TODO: For now, do not support VMDq/RFS on VFs. */
553         if (BNXT_PF(bp)) {
554                 if (bp->pf.max_vfs)
555                         bp->max_vnics = 1;
556                 else
557                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
558         } else {
559                 bp->max_vnics = 1;
560         }
561         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
562         if (BNXT_PF(bp)) {
563                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
564                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
565                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
566                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
567                         HWRM_UNLOCK();
568                         bnxt_hwrm_ptp_qcfg(bp);
569                 }
570         }
571
572         HWRM_UNLOCK();
573
574         return rc;
575 }
576
577 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
578 {
579         int rc;
580
581         rc = __bnxt_hwrm_func_qcaps(bp);
582         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
583                 rc = bnxt_hwrm_func_resc_qcaps(bp);
584                 if (!rc)
585                         bp->flags |= BNXT_FLAG_NEW_RM;
586         }
587
588         return rc;
589 }
590
591 int bnxt_hwrm_func_reset(struct bnxt *bp)
592 {
593         int rc = 0;
594         struct hwrm_func_reset_input req = {.req_type = 0 };
595         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
596
597         HWRM_PREP(req, FUNC_RESET);
598
599         req.enables = rte_cpu_to_le_32(0);
600
601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
602
603         HWRM_CHECK_RESULT();
604         HWRM_UNLOCK();
605
606         return rc;
607 }
608
609 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
610 {
611         int rc;
612         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
613         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
614
615         if (bp->flags & BNXT_FLAG_REGISTERED)
616                 return 0;
617
618         HWRM_PREP(req, FUNC_DRV_RGTR);
619         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
620                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
621         req.ver_maj = RTE_VER_YEAR;
622         req.ver_min = RTE_VER_MONTH;
623         req.ver_upd = RTE_VER_MINOR;
624
625         if (BNXT_PF(bp)) {
626                 req.enables |= rte_cpu_to_le_32(
627                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
628                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
629                        RTE_MIN(sizeof(req.vf_req_fwd),
630                                sizeof(bp->pf.vf_req_fwd)));
631
632                 /*
633                  * PF can sniff HWRM API issued by VF. This can be set up by
634                  * linux driver and inherited by the DPDK PF driver. Clear
635                  * this HWRM sniffer list in FW because DPDK PF driver does
636                  * not support this.
637                  */
638                 req.flags =
639                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
640         }
641
642         req.async_event_fwd[0] |=
643                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
644                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
645                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
646         req.async_event_fwd[1] |=
647                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
648                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
649
650         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
651
652         HWRM_CHECK_RESULT();
653         HWRM_UNLOCK();
654
655         bp->flags |= BNXT_FLAG_REGISTERED;
656
657         return rc;
658 }
659
660 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
661 {
662         int rc;
663         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
664         struct hwrm_func_vf_cfg_input req = {0};
665
666         HWRM_PREP(req, FUNC_VF_CFG);
667
668         req.enables = rte_cpu_to_le_32
669                         (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
670                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
671                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
672                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
673                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
674
675         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
676         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
677                                             AGG_RING_MULTIPLIER);
678         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
679         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
680                                               bp->tx_nr_rings);
681         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
682
683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
684
685         HWRM_CHECK_RESULT();
686         HWRM_UNLOCK();
687         return rc;
688 }
689
690 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
691 {
692         int rc;
693         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
694         struct hwrm_func_resource_qcaps_input req = {0};
695
696         HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
697         req.fid = rte_cpu_to_le_16(0xffff);
698
699         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
700
701         HWRM_CHECK_RESULT();
702
703         if (BNXT_VF(bp)) {
704                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
705                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
706                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
707                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
708                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
709                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
710                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
711                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
712         }
713
714         HWRM_UNLOCK();
715         return rc;
716 }
717
718 int bnxt_hwrm_ver_get(struct bnxt *bp)
719 {
720         int rc = 0;
721         struct hwrm_ver_get_input req = {.req_type = 0 };
722         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
723         uint32_t my_version;
724         uint32_t fw_version;
725         uint16_t max_resp_len;
726         char type[RTE_MEMZONE_NAMESIZE];
727         uint32_t dev_caps_cfg;
728
729         bp->max_req_len = HWRM_MAX_REQ_LEN;
730         HWRM_PREP(req, VER_GET);
731
732         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
733         req.hwrm_intf_min = HWRM_VERSION_MINOR;
734         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
735
736         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
737
738         HWRM_CHECK_RESULT();
739
740         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
741                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
742                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
743                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
744         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
745                      (resp->hwrm_fw_min_8b << 16) |
746                      (resp->hwrm_fw_bld_8b << 8) |
747                      resp->hwrm_fw_rsvd_8b;
748         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
749                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
750
751         my_version = HWRM_VERSION_MAJOR << 16;
752         my_version |= HWRM_VERSION_MINOR << 8;
753         my_version |= HWRM_VERSION_UPDATE;
754
755         fw_version = resp->hwrm_intf_maj_8b << 16;
756         fw_version |= resp->hwrm_intf_min_8b << 8;
757         fw_version |= resp->hwrm_intf_upd_8b;
758         bp->hwrm_spec_code = fw_version;
759
760         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
761                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
762                 rc = -EINVAL;
763                 goto error;
764         }
765
766         if (my_version != fw_version) {
767                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
768                 if (my_version < fw_version) {
769                         PMD_DRV_LOG(INFO,
770                                 "Firmware API version is newer than driver.\n");
771                         PMD_DRV_LOG(INFO,
772                                 "The driver may be missing features.\n");
773                 } else {
774                         PMD_DRV_LOG(INFO,
775                                 "Firmware API version is older than driver.\n");
776                         PMD_DRV_LOG(INFO,
777                                 "Not all driver features may be functional.\n");
778                 }
779         }
780
781         if (bp->max_req_len > resp->max_req_win_len) {
782                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
783                 rc = -EINVAL;
784         }
785         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
786         max_resp_len = resp->max_resp_len;
787         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
788
789         if (bp->max_resp_len != max_resp_len) {
790                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
791                         bp->pdev->addr.domain, bp->pdev->addr.bus,
792                         bp->pdev->addr.devid, bp->pdev->addr.function);
793
794                 rte_free(bp->hwrm_cmd_resp_addr);
795
796                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
797                 if (bp->hwrm_cmd_resp_addr == NULL) {
798                         rc = -ENOMEM;
799                         goto error;
800                 }
801                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
802                 bp->hwrm_cmd_resp_dma_addr =
803                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
804                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
805                         PMD_DRV_LOG(ERR,
806                         "Unable to map response buffer to physical memory.\n");
807                         rc = -ENOMEM;
808                         goto error;
809                 }
810                 bp->max_resp_len = max_resp_len;
811         }
812
813         if ((dev_caps_cfg &
814                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
815             (dev_caps_cfg &
816              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
817                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
818
819                 rte_free(bp->hwrm_short_cmd_req_addr);
820
821                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
822                                                         bp->max_req_len, 0);
823                 if (bp->hwrm_short_cmd_req_addr == NULL) {
824                         rc = -ENOMEM;
825                         goto error;
826                 }
827                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
828                 bp->hwrm_short_cmd_req_dma_addr =
829                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
830                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
831                         rte_free(bp->hwrm_short_cmd_req_addr);
832                         PMD_DRV_LOG(ERR,
833                                 "Unable to map buffer to physical memory.\n");
834                         rc = -ENOMEM;
835                         goto error;
836                 }
837
838                 bp->flags |= BNXT_FLAG_SHORT_CMD;
839         }
840
841 error:
842         HWRM_UNLOCK();
843         return rc;
844 }
845
846 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
847 {
848         int rc;
849         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
850         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
851
852         if (!(bp->flags & BNXT_FLAG_REGISTERED))
853                 return 0;
854
855         HWRM_PREP(req, FUNC_DRV_UNRGTR);
856         req.flags = flags;
857
858         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
859
860         HWRM_CHECK_RESULT();
861         HWRM_UNLOCK();
862
863         bp->flags &= ~BNXT_FLAG_REGISTERED;
864
865         return rc;
866 }
867
868 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
869 {
870         int rc = 0;
871         struct hwrm_port_phy_cfg_input req = {0};
872         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
873         uint32_t enables = 0;
874
875         HWRM_PREP(req, PORT_PHY_CFG);
876
877         if (conf->link_up) {
878                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
879                 if (bp->link_info.auto_mode && conf->link_speed) {
880                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
881                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
882                 }
883
884                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
885                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
886                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
887                 /*
888                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
889                  * any auto mode, even "none".
890                  */
891                 if (!conf->link_speed) {
892                         /* No speeds specified. Enable AutoNeg - all speeds */
893                         req.auto_mode =
894                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
895                 }
896                 /* AutoNeg - Advertise speeds specified. */
897                 if (conf->auto_link_speed_mask &&
898                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
899                         req.auto_mode =
900                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
901                         req.auto_link_speed_mask =
902                                 conf->auto_link_speed_mask;
903                         enables |=
904                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
905                 }
906
907                 req.auto_duplex = conf->duplex;
908                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
909                 req.auto_pause = conf->auto_pause;
910                 req.force_pause = conf->force_pause;
911                 /* Set force_pause if there is no auto or if there is a force */
912                 if (req.auto_pause && !req.force_pause)
913                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
914                 else
915                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
916
917                 req.enables = rte_cpu_to_le_32(enables);
918         } else {
919                 req.flags =
920                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
921                 PMD_DRV_LOG(INFO, "Force Link Down\n");
922         }
923
924         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
925
926         HWRM_CHECK_RESULT();
927         HWRM_UNLOCK();
928
929         return rc;
930 }
931
932 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
933                                    struct bnxt_link_info *link_info)
934 {
935         int rc = 0;
936         struct hwrm_port_phy_qcfg_input req = {0};
937         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
938
939         HWRM_PREP(req, PORT_PHY_QCFG);
940
941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
942
943         HWRM_CHECK_RESULT();
944
945         link_info->phy_link_status = resp->link;
946         link_info->link_up =
947                 (link_info->phy_link_status ==
948                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
949         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
950         link_info->duplex = resp->duplex_cfg;
951         link_info->pause = resp->pause;
952         link_info->auto_pause = resp->auto_pause;
953         link_info->force_pause = resp->force_pause;
954         link_info->auto_mode = resp->auto_mode;
955         link_info->phy_type = resp->phy_type;
956         link_info->media_type = resp->media_type;
957
958         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
959         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
960         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
961         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
962         link_info->phy_ver[0] = resp->phy_maj;
963         link_info->phy_ver[1] = resp->phy_min;
964         link_info->phy_ver[2] = resp->phy_bld;
965
966         HWRM_UNLOCK();
967
968         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
969         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
970         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
971         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
972         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
973                     link_info->auto_link_speed_mask);
974         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
975                     link_info->force_link_speed);
976
977         return rc;
978 }
979
980 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
981 {
982         int rc = 0;
983         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
984         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
985         int i;
986
987         HWRM_PREP(req, QUEUE_QPORTCFG);
988
989         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
990         /* HWRM Version >= 1.9.1 */
991         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
992                 req.drv_qmap_cap =
993                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
994         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
995
996         HWRM_CHECK_RESULT();
997
998 #define GET_QUEUE_INFO(x) \
999         bp->cos_queue[x].id = resp->queue_id##x; \
1000         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1001
1002         GET_QUEUE_INFO(0);
1003         GET_QUEUE_INFO(1);
1004         GET_QUEUE_INFO(2);
1005         GET_QUEUE_INFO(3);
1006         GET_QUEUE_INFO(4);
1007         GET_QUEUE_INFO(5);
1008         GET_QUEUE_INFO(6);
1009         GET_QUEUE_INFO(7);
1010
1011         HWRM_UNLOCK();
1012
1013         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1014                 bp->tx_cosq_id = bp->cos_queue[0].id;
1015         } else {
1016                 /* iterate and find the COSq profile to use for Tx */
1017                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1018                         if (bp->cos_queue[i].profile ==
1019                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1020                                 bp->tx_cosq_id = bp->cos_queue[i].id;
1021                                 break;
1022                         }
1023                 }
1024         }
1025         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1026
1027         return rc;
1028 }
1029
1030 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1031                          struct bnxt_ring *ring,
1032                          uint32_t ring_type, uint32_t map_index,
1033                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1034 {
1035         int rc = 0;
1036         uint32_t enables = 0;
1037         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1038         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1039
1040         HWRM_PREP(req, RING_ALLOC);
1041
1042         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1043         req.fbo = rte_cpu_to_le_32(0);
1044         /* Association of ring index with doorbell index */
1045         req.logical_id = rte_cpu_to_le_16(map_index);
1046         req.length = rte_cpu_to_le_32(ring->ring_size);
1047
1048         switch (ring_type) {
1049         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1050                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1051                 /* FALLTHROUGH */
1052         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1053                 req.ring_type = ring_type;
1054                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1055                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1056                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1057                         enables |=
1058                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1059                 break;
1060         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1061                 req.ring_type = ring_type;
1062                 /*
1063                  * TODO: Some HWRM versions crash with
1064                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1065                  */
1066                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1067                 break;
1068         default:
1069                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1070                         ring_type);
1071                 HWRM_UNLOCK();
1072                 return -1;
1073         }
1074         req.enables = rte_cpu_to_le_32(enables);
1075
1076         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1077
1078         if (rc || resp->error_code) {
1079                 if (rc == 0 && resp->error_code)
1080                         rc = rte_le_to_cpu_16(resp->error_code);
1081                 switch (ring_type) {
1082                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1083                         PMD_DRV_LOG(ERR,
1084                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1085                         HWRM_UNLOCK();
1086                         return rc;
1087                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1088                         PMD_DRV_LOG(ERR,
1089                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1090                         HWRM_UNLOCK();
1091                         return rc;
1092                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1093                         PMD_DRV_LOG(ERR,
1094                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1095                         HWRM_UNLOCK();
1096                         return rc;
1097                 default:
1098                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1099                         HWRM_UNLOCK();
1100                         return rc;
1101                 }
1102         }
1103
1104         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1105         HWRM_UNLOCK();
1106         return rc;
1107 }
1108
1109 int bnxt_hwrm_ring_free(struct bnxt *bp,
1110                         struct bnxt_ring *ring, uint32_t ring_type)
1111 {
1112         int rc;
1113         struct hwrm_ring_free_input req = {.req_type = 0 };
1114         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1115
1116         HWRM_PREP(req, RING_FREE);
1117
1118         req.ring_type = ring_type;
1119         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1120
1121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1122
1123         if (rc || resp->error_code) {
1124                 if (rc == 0 && resp->error_code)
1125                         rc = rte_le_to_cpu_16(resp->error_code);
1126                 HWRM_UNLOCK();
1127
1128                 switch (ring_type) {
1129                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1130                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1131                                 rc);
1132                         return rc;
1133                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1134                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1135                                 rc);
1136                         return rc;
1137                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1138                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1139                                 rc);
1140                         return rc;
1141                 default:
1142                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1143                         return rc;
1144                 }
1145         }
1146         HWRM_UNLOCK();
1147         return 0;
1148 }
1149
1150 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1151 {
1152         int rc = 0;
1153         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1154         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1155
1156         HWRM_PREP(req, RING_GRP_ALLOC);
1157
1158         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1159         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1160         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1161         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1162
1163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1164
1165         HWRM_CHECK_RESULT();
1166
1167         bp->grp_info[idx].fw_grp_id =
1168             rte_le_to_cpu_16(resp->ring_group_id);
1169
1170         HWRM_UNLOCK();
1171
1172         return rc;
1173 }
1174
1175 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1176 {
1177         int rc;
1178         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1179         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1180
1181         HWRM_PREP(req, RING_GRP_FREE);
1182
1183         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1184
1185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1186
1187         HWRM_CHECK_RESULT();
1188         HWRM_UNLOCK();
1189
1190         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1191         return rc;
1192 }
1193
1194 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1195 {
1196         int rc = 0;
1197         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1198         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1199
1200         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1201                 return rc;
1202
1203         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1204
1205         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1206
1207         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1208
1209         HWRM_CHECK_RESULT();
1210         HWRM_UNLOCK();
1211
1212         return rc;
1213 }
1214
1215 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1216                                 unsigned int idx __rte_unused)
1217 {
1218         int rc;
1219         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1220         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1221
1222         HWRM_PREP(req, STAT_CTX_ALLOC);
1223
1224         req.update_period_ms = rte_cpu_to_le_32(0);
1225
1226         req.stats_dma_addr =
1227             rte_cpu_to_le_64(cpr->hw_stats_map);
1228
1229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1230
1231         HWRM_CHECK_RESULT();
1232
1233         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1234
1235         HWRM_UNLOCK();
1236
1237         return rc;
1238 }
1239
1240 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1241                                 unsigned int idx __rte_unused)
1242 {
1243         int rc;
1244         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1245         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1246
1247         HWRM_PREP(req, STAT_CTX_FREE);
1248
1249         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1250
1251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1252
1253         HWRM_CHECK_RESULT();
1254         HWRM_UNLOCK();
1255
1256         return rc;
1257 }
1258
1259 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1260 {
1261         int rc = 0, i, j;
1262         struct hwrm_vnic_alloc_input req = { 0 };
1263         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1264
1265         /* map ring groups to this vnic */
1266         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1267                 vnic->start_grp_id, vnic->end_grp_id);
1268         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1269                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1270         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1271         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1272         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1274         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1275                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1276         HWRM_PREP(req, VNIC_ALLOC);
1277
1278         if (vnic->func_default)
1279                 req.flags =
1280                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1282
1283         HWRM_CHECK_RESULT();
1284
1285         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1286         HWRM_UNLOCK();
1287         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1288         return rc;
1289 }
1290
1291 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1292                                         struct bnxt_vnic_info *vnic,
1293                                         struct bnxt_plcmodes_cfg *pmode)
1294 {
1295         int rc = 0;
1296         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1297         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1298
1299         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1300
1301         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1302
1303         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1304
1305         HWRM_CHECK_RESULT();
1306
1307         pmode->flags = rte_le_to_cpu_32(resp->flags);
1308         /* dflt_vnic bit doesn't exist in the _cfg command */
1309         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1310         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1311         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1312         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1313
1314         HWRM_UNLOCK();
1315
1316         return rc;
1317 }
1318
1319 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1320                                        struct bnxt_vnic_info *vnic,
1321                                        struct bnxt_plcmodes_cfg *pmode)
1322 {
1323         int rc = 0;
1324         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1325         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1326
1327         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1328
1329         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1330         req.flags = rte_cpu_to_le_32(pmode->flags);
1331         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1332         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1333         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1334         req.enables = rte_cpu_to_le_32(
1335             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1336             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1337             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1338         );
1339
1340         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1341
1342         HWRM_CHECK_RESULT();
1343         HWRM_UNLOCK();
1344
1345         return rc;
1346 }
1347
1348 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1349 {
1350         int rc = 0;
1351         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1352         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1353         uint32_t ctx_enable_flag = 0;
1354         struct bnxt_plcmodes_cfg pmodes;
1355
1356         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1357                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1358                 return rc;
1359         }
1360
1361         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1362         if (rc)
1363                 return rc;
1364
1365         HWRM_PREP(req, VNIC_CFG);
1366
1367         /* Only RSS support for now TBD: COS & LB */
1368         req.enables =
1369             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1370         if (vnic->lb_rule != 0xffff)
1371                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1372         if (vnic->cos_rule != 0xffff)
1373                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1374         if (vnic->rss_rule != 0xffff) {
1375                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1376                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1377         }
1378         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1379         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1380         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1381         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1382         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1383         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1384         req.mru = rte_cpu_to_le_16(vnic->mru);
1385         if (vnic->func_default)
1386                 req.flags |=
1387                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1388         if (vnic->vlan_strip)
1389                 req.flags |=
1390                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1391         if (vnic->bd_stall)
1392                 req.flags |=
1393                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1394         if (vnic->roce_dual)
1395                 req.flags |= rte_cpu_to_le_32(
1396                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1397         if (vnic->roce_only)
1398                 req.flags |= rte_cpu_to_le_32(
1399                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1400         if (vnic->rss_dflt_cr)
1401                 req.flags |= rte_cpu_to_le_32(
1402                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1403
1404         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1405
1406         HWRM_CHECK_RESULT();
1407         HWRM_UNLOCK();
1408
1409         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1410
1411         return rc;
1412 }
1413
1414 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1415                 int16_t fw_vf_id)
1416 {
1417         int rc = 0;
1418         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1419         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1420
1421         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1422                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1423                 return rc;
1424         }
1425         HWRM_PREP(req, VNIC_QCFG);
1426
1427         req.enables =
1428                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1429         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1430         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1431
1432         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1433
1434         HWRM_CHECK_RESULT();
1435
1436         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1437         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1438         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1439         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1440         vnic->mru = rte_le_to_cpu_16(resp->mru);
1441         vnic->func_default = rte_le_to_cpu_32(
1442                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1443         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1444                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1445         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1446                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1447         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1448                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1449         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1450                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1451         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1452                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1453
1454         HWRM_UNLOCK();
1455
1456         return rc;
1457 }
1458
1459 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1460 {
1461         int rc = 0;
1462         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1463         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1464                                                 bp->hwrm_cmd_resp_addr;
1465
1466         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1467
1468         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1469
1470         HWRM_CHECK_RESULT();
1471
1472         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1473         HWRM_UNLOCK();
1474         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1475
1476         return rc;
1477 }
1478
1479 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1480 {
1481         int rc = 0;
1482         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1483         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1484                                                 bp->hwrm_cmd_resp_addr;
1485
1486         if (vnic->rss_rule == 0xffff) {
1487                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1488                 return rc;
1489         }
1490         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1491
1492         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1493
1494         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1495
1496         HWRM_CHECK_RESULT();
1497         HWRM_UNLOCK();
1498
1499         vnic->rss_rule = INVALID_HW_RING_ID;
1500
1501         return rc;
1502 }
1503
1504 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1505 {
1506         int rc = 0;
1507         struct hwrm_vnic_free_input req = {.req_type = 0 };
1508         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1509
1510         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1511                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1512                 return rc;
1513         }
1514
1515         HWRM_PREP(req, VNIC_FREE);
1516
1517         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1518
1519         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1520
1521         HWRM_CHECK_RESULT();
1522         HWRM_UNLOCK();
1523
1524         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1525         return rc;
1526 }
1527
1528 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1529                            struct bnxt_vnic_info *vnic)
1530 {
1531         int rc = 0;
1532         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1533         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1534
1535         HWRM_PREP(req, VNIC_RSS_CFG);
1536
1537         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1538         req.hash_mode_flags = vnic->hash_mode;
1539
1540         req.ring_grp_tbl_addr =
1541             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1542         req.hash_key_tbl_addr =
1543             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1544         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1545
1546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1547
1548         HWRM_CHECK_RESULT();
1549         HWRM_UNLOCK();
1550
1551         return rc;
1552 }
1553
1554 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1555                         struct bnxt_vnic_info *vnic)
1556 {
1557         int rc = 0;
1558         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1559         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1560         uint16_t size;
1561
1562         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1563
1564         req.flags = rte_cpu_to_le_32(
1565                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1566
1567         req.enables = rte_cpu_to_le_32(
1568                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1569
1570         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1571         size -= RTE_PKTMBUF_HEADROOM;
1572
1573         req.jumbo_thresh = rte_cpu_to_le_16(size);
1574         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1575
1576         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1577
1578         HWRM_CHECK_RESULT();
1579         HWRM_UNLOCK();
1580
1581         return rc;
1582 }
1583
1584 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1585                         struct bnxt_vnic_info *vnic, bool enable)
1586 {
1587         int rc = 0;
1588         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1589         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1590
1591         HWRM_PREP(req, VNIC_TPA_CFG);
1592
1593         if (enable) {
1594                 req.enables = rte_cpu_to_le_32(
1595                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1596                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1597                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1598                 req.flags = rte_cpu_to_le_32(
1599                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1600                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1601                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1602                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1603                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1604                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1605                 req.max_agg_segs = rte_cpu_to_le_16(5);
1606                 req.max_aggs =
1607                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1608                 req.min_agg_len = rte_cpu_to_le_32(512);
1609         }
1610         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1611
1612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1613
1614         HWRM_CHECK_RESULT();
1615         HWRM_UNLOCK();
1616
1617         return rc;
1618 }
1619
1620 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1621 {
1622         struct hwrm_func_cfg_input req = {0};
1623         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1624         int rc;
1625
1626         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1627         req.enables = rte_cpu_to_le_32(
1628                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1629         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1630         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1631
1632         HWRM_PREP(req, FUNC_CFG);
1633
1634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1635         HWRM_CHECK_RESULT();
1636         HWRM_UNLOCK();
1637
1638         bp->pf.vf_info[vf].random_mac = false;
1639
1640         return rc;
1641 }
1642
1643 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1644                                   uint64_t *dropped)
1645 {
1646         int rc = 0;
1647         struct hwrm_func_qstats_input req = {.req_type = 0};
1648         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1649
1650         HWRM_PREP(req, FUNC_QSTATS);
1651
1652         req.fid = rte_cpu_to_le_16(fid);
1653
1654         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1655
1656         HWRM_CHECK_RESULT();
1657
1658         if (dropped)
1659                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1660
1661         HWRM_UNLOCK();
1662
1663         return rc;
1664 }
1665
1666 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1667                           struct rte_eth_stats *stats)
1668 {
1669         int rc = 0;
1670         struct hwrm_func_qstats_input req = {.req_type = 0};
1671         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1672
1673         HWRM_PREP(req, FUNC_QSTATS);
1674
1675         req.fid = rte_cpu_to_le_16(fid);
1676
1677         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1678
1679         HWRM_CHECK_RESULT();
1680
1681         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1682         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1683         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1684         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1685         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1686         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1687
1688         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1689         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1690         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1691         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1692         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1693         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1694
1695         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1696         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1697         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1698
1699         HWRM_UNLOCK();
1700
1701         return rc;
1702 }
1703
1704 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1705 {
1706         int rc = 0;
1707         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1708         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1709
1710         HWRM_PREP(req, FUNC_CLR_STATS);
1711
1712         req.fid = rte_cpu_to_le_16(fid);
1713
1714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1715
1716         HWRM_CHECK_RESULT();
1717         HWRM_UNLOCK();
1718
1719         return rc;
1720 }
1721
1722 /*
1723  * HWRM utility functions
1724  */
1725
1726 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1727 {
1728         unsigned int i;
1729         int rc = 0;
1730
1731         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1732                 struct bnxt_tx_queue *txq;
1733                 struct bnxt_rx_queue *rxq;
1734                 struct bnxt_cp_ring_info *cpr;
1735
1736                 if (i >= bp->rx_cp_nr_rings) {
1737                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1738                         cpr = txq->cp_ring;
1739                 } else {
1740                         rxq = bp->rx_queues[i];
1741                         cpr = rxq->cp_ring;
1742                 }
1743
1744                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1745                 if (rc)
1746                         return rc;
1747         }
1748         return 0;
1749 }
1750
1751 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1752 {
1753         int rc;
1754         unsigned int i;
1755         struct bnxt_cp_ring_info *cpr;
1756
1757         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1758
1759                 if (i >= bp->rx_cp_nr_rings) {
1760                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1761                 } else {
1762                         cpr = bp->rx_queues[i]->cp_ring;
1763                         bp->grp_info[i].fw_stats_ctx = -1;
1764                 }
1765                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1766                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1767                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1768                         if (rc)
1769                                 return rc;
1770                 }
1771         }
1772         return 0;
1773 }
1774
1775 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1776 {
1777         unsigned int i;
1778         int rc = 0;
1779
1780         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1781                 struct bnxt_tx_queue *txq;
1782                 struct bnxt_rx_queue *rxq;
1783                 struct bnxt_cp_ring_info *cpr;
1784
1785                 if (i >= bp->rx_cp_nr_rings) {
1786                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1787                         cpr = txq->cp_ring;
1788                 } else {
1789                         rxq = bp->rx_queues[i];
1790                         cpr = rxq->cp_ring;
1791                 }
1792
1793                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1794
1795                 if (rc)
1796                         return rc;
1797         }
1798         return rc;
1799 }
1800
1801 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1802 {
1803         uint16_t idx;
1804         uint32_t rc = 0;
1805
1806         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1807
1808                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1809                         continue;
1810
1811                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1812
1813                 if (rc)
1814                         return rc;
1815         }
1816         return rc;
1817 }
1818
1819 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1820                                 unsigned int idx __rte_unused)
1821 {
1822         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1823
1824         bnxt_hwrm_ring_free(bp, cp_ring,
1825                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1826         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1827         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1828                         sizeof(*cpr->cp_desc_ring));
1829         cpr->cp_raw_cons = 0;
1830 }
1831
1832 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1833 {
1834         unsigned int i;
1835         int rc = 0;
1836
1837         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1838                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1839                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1840                 struct bnxt_ring *ring = txr->tx_ring_struct;
1841                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1842                 unsigned int idx = bp->rx_cp_nr_rings + i;
1843
1844                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1845                         bnxt_hwrm_ring_free(bp, ring,
1846                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1847                         ring->fw_ring_id = INVALID_HW_RING_ID;
1848                         memset(txr->tx_desc_ring, 0,
1849                                         txr->tx_ring_struct->ring_size *
1850                                         sizeof(*txr->tx_desc_ring));
1851                         memset(txr->tx_buf_ring, 0,
1852                                         txr->tx_ring_struct->ring_size *
1853                                         sizeof(*txr->tx_buf_ring));
1854                         txr->tx_prod = 0;
1855                         txr->tx_cons = 0;
1856                 }
1857                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1858                         bnxt_free_cp_ring(bp, cpr, idx);
1859                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1860                 }
1861         }
1862
1863         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1864                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1865                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1866                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1867                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1868
1869                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1870                         bnxt_hwrm_ring_free(bp, ring,
1871                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1872                         ring->fw_ring_id = INVALID_HW_RING_ID;
1873                         bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
1874                         memset(rxr->rx_desc_ring, 0,
1875                                         rxr->rx_ring_struct->ring_size *
1876                                         sizeof(*rxr->rx_desc_ring));
1877                         memset(rxr->rx_buf_ring, 0,
1878                                         rxr->rx_ring_struct->ring_size *
1879                                         sizeof(*rxr->rx_buf_ring));
1880                         rxr->rx_prod = 0;
1881                 }
1882                 ring = rxr->ag_ring_struct;
1883                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1884                         bnxt_hwrm_ring_free(bp, ring,
1885                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1886                         ring->fw_ring_id = INVALID_HW_RING_ID;
1887                         memset(rxr->ag_buf_ring, 0,
1888                                rxr->ag_ring_struct->ring_size *
1889                                sizeof(*rxr->ag_buf_ring));
1890                         rxr->ag_prod = 0;
1891                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1892                 }
1893                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1894                         bnxt_free_cp_ring(bp, cpr, i);
1895                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1896                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1897                 }
1898         }
1899
1900         /* Default completion ring */
1901         {
1902                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1903
1904                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1905                         bnxt_free_cp_ring(bp, cpr, 0);
1906                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1907                 }
1908         }
1909
1910         return rc;
1911 }
1912
1913 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1914 {
1915         uint16_t i;
1916         uint32_t rc = 0;
1917
1918         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1919                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1920                 if (rc)
1921                         return rc;
1922         }
1923         return rc;
1924 }
1925
1926 void bnxt_free_hwrm_resources(struct bnxt *bp)
1927 {
1928         /* Release memzone */
1929         rte_free(bp->hwrm_cmd_resp_addr);
1930         rte_free(bp->hwrm_short_cmd_req_addr);
1931         bp->hwrm_cmd_resp_addr = NULL;
1932         bp->hwrm_short_cmd_req_addr = NULL;
1933         bp->hwrm_cmd_resp_dma_addr = 0;
1934         bp->hwrm_short_cmd_req_dma_addr = 0;
1935 }
1936
1937 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1938 {
1939         struct rte_pci_device *pdev = bp->pdev;
1940         char type[RTE_MEMZONE_NAMESIZE];
1941
1942         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1943                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1944         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1945         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1946         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1947         if (bp->hwrm_cmd_resp_addr == NULL)
1948                 return -ENOMEM;
1949         bp->hwrm_cmd_resp_dma_addr =
1950                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1951         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1952                 PMD_DRV_LOG(ERR,
1953                         "unable to map response address to physical memory\n");
1954                 return -ENOMEM;
1955         }
1956         rte_spinlock_init(&bp->hwrm_lock);
1957
1958         return 0;
1959 }
1960
1961 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1962 {
1963         struct bnxt_filter_info *filter;
1964         int rc = 0;
1965
1966         STAILQ_FOREACH(filter, &vnic->filter, next) {
1967                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1968                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1969                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1970                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1971                 else
1972                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1973                 //if (rc)
1974                         //break;
1975         }
1976         return rc;
1977 }
1978
1979 static int
1980 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1981 {
1982         struct bnxt_filter_info *filter;
1983         struct rte_flow *flow;
1984         int rc = 0;
1985
1986         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1987                 filter = flow->filter;
1988                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1989                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1990                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1991                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1992                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1993                 else
1994                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1995
1996                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1997                 rte_free(flow);
1998                 //if (rc)
1999                         //break;
2000         }
2001         return rc;
2002 }
2003
2004 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2005 {
2006         struct bnxt_filter_info *filter;
2007         int rc = 0;
2008
2009         STAILQ_FOREACH(filter, &vnic->filter, next) {
2010                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2011                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2012                                                      filter);
2013                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2014                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2015                                                          filter);
2016                 else
2017                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2018                                                      filter);
2019                 if (rc)
2020                         break;
2021         }
2022         return rc;
2023 }
2024
2025 void bnxt_free_tunnel_ports(struct bnxt *bp)
2026 {
2027         if (bp->vxlan_port_cnt)
2028                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2029                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2030         bp->vxlan_port = 0;
2031         if (bp->geneve_port_cnt)
2032                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2033                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2034         bp->geneve_port = 0;
2035 }
2036
2037 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2038 {
2039         int i;
2040
2041         if (bp->vnic_info == NULL)
2042                 return;
2043
2044         /*
2045          * Cleanup VNICs in reverse order, to make sure the L2 filter
2046          * from vnic0 is last to be cleaned up.
2047          */
2048         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2049                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2050
2051                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2052
2053                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2054
2055                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2056
2057                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2058
2059                 bnxt_hwrm_vnic_free(bp, vnic);
2060         }
2061         /* Ring resources */
2062         bnxt_free_all_hwrm_rings(bp);
2063         bnxt_free_all_hwrm_ring_grps(bp);
2064         bnxt_free_all_hwrm_stat_ctxs(bp);
2065         bnxt_free_tunnel_ports(bp);
2066 }
2067
2068 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2069 {
2070         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2071
2072         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2073                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2074
2075         switch (conf_link_speed) {
2076         case ETH_LINK_SPEED_10M_HD:
2077         case ETH_LINK_SPEED_100M_HD:
2078                 /* FALLTHROUGH */
2079                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2080         }
2081         return hw_link_duplex;
2082 }
2083
2084 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2085 {
2086         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2087 }
2088
2089 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2090 {
2091         uint16_t eth_link_speed = 0;
2092
2093         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2094                 return ETH_LINK_SPEED_AUTONEG;
2095
2096         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2097         case ETH_LINK_SPEED_100M:
2098         case ETH_LINK_SPEED_100M_HD:
2099                 /* FALLTHROUGH */
2100                 eth_link_speed =
2101                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2102                 break;
2103         case ETH_LINK_SPEED_1G:
2104                 eth_link_speed =
2105                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2106                 break;
2107         case ETH_LINK_SPEED_2_5G:
2108                 eth_link_speed =
2109                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2110                 break;
2111         case ETH_LINK_SPEED_10G:
2112                 eth_link_speed =
2113                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2114                 break;
2115         case ETH_LINK_SPEED_20G:
2116                 eth_link_speed =
2117                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2118                 break;
2119         case ETH_LINK_SPEED_25G:
2120                 eth_link_speed =
2121                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2122                 break;
2123         case ETH_LINK_SPEED_40G:
2124                 eth_link_speed =
2125                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2126                 break;
2127         case ETH_LINK_SPEED_50G:
2128                 eth_link_speed =
2129                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2130                 break;
2131         case ETH_LINK_SPEED_100G:
2132                 eth_link_speed =
2133                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2134                 break;
2135         default:
2136                 PMD_DRV_LOG(ERR,
2137                         "Unsupported link speed %d; default to AUTO\n",
2138                         conf_link_speed);
2139                 break;
2140         }
2141         return eth_link_speed;
2142 }
2143
2144 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2145                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2146                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2147                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2148
2149 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2150 {
2151         uint32_t one_speed;
2152
2153         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2154                 return 0;
2155
2156         if (link_speed & ETH_LINK_SPEED_FIXED) {
2157                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2158
2159                 if (one_speed & (one_speed - 1)) {
2160                         PMD_DRV_LOG(ERR,
2161                                 "Invalid advertised speeds (%u) for port %u\n",
2162                                 link_speed, port_id);
2163                         return -EINVAL;
2164                 }
2165                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2166                         PMD_DRV_LOG(ERR,
2167                                 "Unsupported advertised speed (%u) for port %u\n",
2168                                 link_speed, port_id);
2169                         return -EINVAL;
2170                 }
2171         } else {
2172                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2173                         PMD_DRV_LOG(ERR,
2174                                 "Unsupported advertised speeds (%u) for port %u\n",
2175                                 link_speed, port_id);
2176                         return -EINVAL;
2177                 }
2178         }
2179         return 0;
2180 }
2181
2182 static uint16_t
2183 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2184 {
2185         uint16_t ret = 0;
2186
2187         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2188                 if (bp->link_info.support_speeds)
2189                         return bp->link_info.support_speeds;
2190                 link_speed = BNXT_SUPPORTED_SPEEDS;
2191         }
2192
2193         if (link_speed & ETH_LINK_SPEED_100M)
2194                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2195         if (link_speed & ETH_LINK_SPEED_100M_HD)
2196                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2197         if (link_speed & ETH_LINK_SPEED_1G)
2198                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2199         if (link_speed & ETH_LINK_SPEED_2_5G)
2200                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2201         if (link_speed & ETH_LINK_SPEED_10G)
2202                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2203         if (link_speed & ETH_LINK_SPEED_20G)
2204                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2205         if (link_speed & ETH_LINK_SPEED_25G)
2206                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2207         if (link_speed & ETH_LINK_SPEED_40G)
2208                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2209         if (link_speed & ETH_LINK_SPEED_50G)
2210                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2211         if (link_speed & ETH_LINK_SPEED_100G)
2212                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2213         return ret;
2214 }
2215
2216 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2217 {
2218         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2219
2220         switch (hw_link_speed) {
2221         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2222                 eth_link_speed = ETH_SPEED_NUM_100M;
2223                 break;
2224         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2225                 eth_link_speed = ETH_SPEED_NUM_1G;
2226                 break;
2227         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2228                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2229                 break;
2230         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2231                 eth_link_speed = ETH_SPEED_NUM_10G;
2232                 break;
2233         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2234                 eth_link_speed = ETH_SPEED_NUM_20G;
2235                 break;
2236         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2237                 eth_link_speed = ETH_SPEED_NUM_25G;
2238                 break;
2239         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2240                 eth_link_speed = ETH_SPEED_NUM_40G;
2241                 break;
2242         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2243                 eth_link_speed = ETH_SPEED_NUM_50G;
2244                 break;
2245         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2246                 eth_link_speed = ETH_SPEED_NUM_100G;
2247                 break;
2248         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2249         default:
2250                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2251                         hw_link_speed);
2252                 break;
2253         }
2254         return eth_link_speed;
2255 }
2256
2257 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2258 {
2259         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2260
2261         switch (hw_link_duplex) {
2262         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2263         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2264                 /* FALLTHROUGH */
2265                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2266                 break;
2267         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2268                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2269                 break;
2270         default:
2271                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2272                         hw_link_duplex);
2273                 break;
2274         }
2275         return eth_link_duplex;
2276 }
2277
2278 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2279 {
2280         int rc = 0;
2281         struct bnxt_link_info *link_info = &bp->link_info;
2282
2283         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2284         if (rc) {
2285                 PMD_DRV_LOG(ERR,
2286                         "Get link config failed with rc %d\n", rc);
2287                 goto exit;
2288         }
2289         if (link_info->link_speed)
2290                 link->link_speed =
2291                         bnxt_parse_hw_link_speed(link_info->link_speed);
2292         else
2293                 link->link_speed = ETH_SPEED_NUM_NONE;
2294         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2295         link->link_status = link_info->link_up;
2296         link->link_autoneg = link_info->auto_mode ==
2297                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2298                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2299 exit:
2300         return rc;
2301 }
2302
2303 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2304 {
2305         int rc = 0;
2306         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2307         struct bnxt_link_info link_req;
2308         uint16_t speed, autoneg;
2309
2310         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2311                 return 0;
2312
2313         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2314                         bp->eth_dev->data->port_id);
2315         if (rc)
2316                 goto error;
2317
2318         memset(&link_req, 0, sizeof(link_req));
2319         link_req.link_up = link_up;
2320         if (!link_up)
2321                 goto port_phy_cfg;
2322
2323         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2324         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2325         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2326         /* Autoneg can be done only when the FW allows */
2327         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2328                                 bp->link_info.force_link_speed)) {
2329                 link_req.phy_flags |=
2330                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2331                 link_req.auto_link_speed_mask =
2332                         bnxt_parse_eth_link_speed_mask(bp,
2333                                                        dev_conf->link_speeds);
2334         } else {
2335                 if (bp->link_info.phy_type ==
2336                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2337                     bp->link_info.phy_type ==
2338                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2339                     bp->link_info.media_type ==
2340                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2341                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2342                         return -EINVAL;
2343                 }
2344
2345                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2346                 /* If user wants a particular speed try that first. */
2347                 if (speed)
2348                         link_req.link_speed = speed;
2349                 else if (bp->link_info.force_link_speed)
2350                         link_req.link_speed = bp->link_info.force_link_speed;
2351                 else
2352                         link_req.link_speed = bp->link_info.auto_link_speed;
2353         }
2354         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2355         link_req.auto_pause = bp->link_info.auto_pause;
2356         link_req.force_pause = bp->link_info.force_pause;
2357
2358 port_phy_cfg:
2359         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2360         if (rc) {
2361                 PMD_DRV_LOG(ERR,
2362                         "Set link config failed with rc %d\n", rc);
2363         }
2364
2365 error:
2366         return rc;
2367 }
2368
2369 /* JIRA 22088 */
2370 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2371 {
2372         struct hwrm_func_qcfg_input req = {0};
2373         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2374         uint16_t flags;
2375         int rc = 0;
2376
2377         HWRM_PREP(req, FUNC_QCFG);
2378         req.fid = rte_cpu_to_le_16(0xffff);
2379
2380         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2381
2382         HWRM_CHECK_RESULT();
2383
2384         /* Hard Coded.. 0xfff VLAN ID mask */
2385         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2386         flags = rte_le_to_cpu_16(resp->flags);
2387         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2388                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2389
2390         switch (resp->port_partition_type) {
2391         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2392         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2393         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2394                 /* FALLTHROUGH */
2395                 bp->port_partition_type = resp->port_partition_type;
2396                 break;
2397         default:
2398                 bp->port_partition_type = 0;
2399                 break;
2400         }
2401
2402         HWRM_UNLOCK();
2403
2404         return rc;
2405 }
2406
2407 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2408                                    struct hwrm_func_qcaps_output *qcaps)
2409 {
2410         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2411         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2412                sizeof(qcaps->mac_address));
2413         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2414         qcaps->max_rx_rings = fcfg->num_rx_rings;
2415         qcaps->max_tx_rings = fcfg->num_tx_rings;
2416         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2417         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2418         qcaps->max_vfs = 0;
2419         qcaps->first_vf_id = 0;
2420         qcaps->max_vnics = fcfg->num_vnics;
2421         qcaps->max_decap_records = 0;
2422         qcaps->max_encap_records = 0;
2423         qcaps->max_tx_wm_flows = 0;
2424         qcaps->max_tx_em_flows = 0;
2425         qcaps->max_rx_wm_flows = 0;
2426         qcaps->max_rx_em_flows = 0;
2427         qcaps->max_flow_id = 0;
2428         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2429         qcaps->max_sp_tx_rings = 0;
2430         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2431 }
2432
2433 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2434 {
2435         struct hwrm_func_cfg_input req = {0};
2436         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2437         int rc;
2438
2439         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2440                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2441                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2442                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2443                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2444                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2445                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2446                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2447                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2448                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2449         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2450         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2451         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2452                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2453                                    BNXT_NUM_VLANS);
2454         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2455         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2456         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2457         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2458         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2459         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2460         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2461         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2462         req.fid = rte_cpu_to_le_16(0xffff);
2463
2464         HWRM_PREP(req, FUNC_CFG);
2465
2466         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2467
2468         HWRM_CHECK_RESULT();
2469         HWRM_UNLOCK();
2470
2471         return rc;
2472 }
2473
2474 static void populate_vf_func_cfg_req(struct bnxt *bp,
2475                                      struct hwrm_func_cfg_input *req,
2476                                      int num_vfs)
2477 {
2478         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2479                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2480                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2481                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2482                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2483                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2484                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2485                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2486                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2487                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2488
2489         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2490                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2491                                     BNXT_NUM_VLANS);
2492         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2493                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2494                                     BNXT_NUM_VLANS);
2495         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2496                                                 (num_vfs + 1));
2497         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2498         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2499                                                (num_vfs + 1));
2500         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2501         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2502         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2503         /* TODO: For now, do not support VMDq/RFS on VFs. */
2504         req->num_vnics = rte_cpu_to_le_16(1);
2505         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2506                                                  (num_vfs + 1));
2507 }
2508
2509 static void add_random_mac_if_needed(struct bnxt *bp,
2510                                      struct hwrm_func_cfg_input *cfg_req,
2511                                      int vf)
2512 {
2513         struct ether_addr mac;
2514
2515         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2516                 return;
2517
2518         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2519                 cfg_req->enables |=
2520                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2521                 eth_random_addr(cfg_req->dflt_mac_addr);
2522                 bp->pf.vf_info[vf].random_mac = true;
2523         } else {
2524                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2525         }
2526 }
2527
2528 static void reserve_resources_from_vf(struct bnxt *bp,
2529                                       struct hwrm_func_cfg_input *cfg_req,
2530                                       int vf)
2531 {
2532         struct hwrm_func_qcaps_input req = {0};
2533         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2534         int rc;
2535
2536         /* Get the actual allocated values now */
2537         HWRM_PREP(req, FUNC_QCAPS);
2538         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2539         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2540
2541         if (rc) {
2542                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2543                 copy_func_cfg_to_qcaps(cfg_req, resp);
2544         } else if (resp->error_code) {
2545                 rc = rte_le_to_cpu_16(resp->error_code);
2546                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2547                 copy_func_cfg_to_qcaps(cfg_req, resp);
2548         }
2549
2550         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2551         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2552         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2553         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2554         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2555         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2556         /*
2557          * TODO: While not supporting VMDq with VFs, max_vnics is always
2558          * forced to 1 in this case
2559          */
2560         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2561         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2562
2563         HWRM_UNLOCK();
2564 }
2565
2566 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2567 {
2568         struct hwrm_func_qcfg_input req = {0};
2569         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2570         int rc;
2571
2572         /* Check for zero MAC address */
2573         HWRM_PREP(req, FUNC_QCFG);
2574         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2575         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2576         if (rc) {
2577                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2578                 return -1;
2579         } else if (resp->error_code) {
2580                 rc = rte_le_to_cpu_16(resp->error_code);
2581                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2582                 return -1;
2583         }
2584         rc = rte_le_to_cpu_16(resp->vlan);
2585
2586         HWRM_UNLOCK();
2587
2588         return rc;
2589 }
2590
2591 static int update_pf_resource_max(struct bnxt *bp)
2592 {
2593         struct hwrm_func_qcfg_input req = {0};
2594         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2595         int rc;
2596
2597         /* And copy the allocated numbers into the pf struct */
2598         HWRM_PREP(req, FUNC_QCFG);
2599         req.fid = rte_cpu_to_le_16(0xffff);
2600         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2601         HWRM_CHECK_RESULT();
2602
2603         /* Only TX ring value reflects actual allocation? TODO */
2604         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2605         bp->pf.evb_mode = resp->evb_mode;
2606
2607         HWRM_UNLOCK();
2608
2609         return rc;
2610 }
2611
2612 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2613 {
2614         int rc;
2615
2616         if (!BNXT_PF(bp)) {
2617                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2618                 return -1;
2619         }
2620
2621         rc = bnxt_hwrm_func_qcaps(bp);
2622         if (rc)
2623                 return rc;
2624
2625         bp->pf.func_cfg_flags &=
2626                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2627                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2628         bp->pf.func_cfg_flags |=
2629                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2630         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2631         return rc;
2632 }
2633
2634 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2635 {
2636         struct hwrm_func_cfg_input req = {0};
2637         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2638         int i;
2639         size_t sz;
2640         int rc = 0;
2641         size_t req_buf_sz;
2642
2643         if (!BNXT_PF(bp)) {
2644                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2645                 return -1;
2646         }
2647
2648         rc = bnxt_hwrm_func_qcaps(bp);
2649
2650         if (rc)
2651                 return rc;
2652
2653         bp->pf.active_vfs = num_vfs;
2654
2655         /*
2656          * First, configure the PF to only use one TX ring.  This ensures that
2657          * there are enough rings for all VFs.
2658          *
2659          * If we don't do this, when we call func_alloc() later, we will lock
2660          * extra rings to the PF that won't be available during func_cfg() of
2661          * the VFs.
2662          *
2663          * This has been fixed with firmware versions above 20.6.54
2664          */
2665         bp->pf.func_cfg_flags &=
2666                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2667                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2668         bp->pf.func_cfg_flags |=
2669                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2670         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2671         if (rc)
2672                 return rc;
2673
2674         /*
2675          * Now, create and register a buffer to hold forwarded VF requests
2676          */
2677         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2678         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2679                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2680         if (bp->pf.vf_req_buf == NULL) {
2681                 rc = -ENOMEM;
2682                 goto error_free;
2683         }
2684         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2685                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2686         for (i = 0; i < num_vfs; i++)
2687                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2688                                         (i * HWRM_MAX_REQ_LEN);
2689
2690         rc = bnxt_hwrm_func_buf_rgtr(bp);
2691         if (rc)
2692                 goto error_free;
2693
2694         populate_vf_func_cfg_req(bp, &req, num_vfs);
2695
2696         bp->pf.active_vfs = 0;
2697         for (i = 0; i < num_vfs; i++) {
2698                 add_random_mac_if_needed(bp, &req, i);
2699
2700                 HWRM_PREP(req, FUNC_CFG);
2701                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2702                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2703                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2704
2705                 /* Clear enable flag for next pass */
2706                 req.enables &= ~rte_cpu_to_le_32(
2707                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2708
2709                 if (rc || resp->error_code) {
2710                         PMD_DRV_LOG(ERR,
2711                                 "Failed to initizlie VF %d\n", i);
2712                         PMD_DRV_LOG(ERR,
2713                                 "Not all VFs available. (%d, %d)\n",
2714                                 rc, resp->error_code);
2715                         HWRM_UNLOCK();
2716                         break;
2717                 }
2718
2719                 HWRM_UNLOCK();
2720
2721                 reserve_resources_from_vf(bp, &req, i);
2722                 bp->pf.active_vfs++;
2723                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2724         }
2725
2726         /*
2727          * Now configure the PF to use "the rest" of the resources
2728          * We're using STD_TX_RING_MODE here though which will limit the TX
2729          * rings.  This will allow QoS to function properly.  Not setting this
2730          * will cause PF rings to break bandwidth settings.
2731          */
2732         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2733         if (rc)
2734                 goto error_free;
2735
2736         rc = update_pf_resource_max(bp);
2737         if (rc)
2738                 goto error_free;
2739
2740         return rc;
2741
2742 error_free:
2743         bnxt_hwrm_func_buf_unrgtr(bp);
2744         return rc;
2745 }
2746
2747 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2748 {
2749         struct hwrm_func_cfg_input req = {0};
2750         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2751         int rc;
2752
2753         HWRM_PREP(req, FUNC_CFG);
2754
2755         req.fid = rte_cpu_to_le_16(0xffff);
2756         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2757         req.evb_mode = bp->pf.evb_mode;
2758
2759         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2760         HWRM_CHECK_RESULT();
2761         HWRM_UNLOCK();
2762
2763         return rc;
2764 }
2765
2766 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2767                                 uint8_t tunnel_type)
2768 {
2769         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2770         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2771         int rc = 0;
2772
2773         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2774         req.tunnel_type = tunnel_type;
2775         req.tunnel_dst_port_val = port;
2776         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2777         HWRM_CHECK_RESULT();
2778
2779         switch (tunnel_type) {
2780         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2781                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2782                 bp->vxlan_port = port;
2783                 break;
2784         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2785                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2786                 bp->geneve_port = port;
2787                 break;
2788         default:
2789                 break;
2790         }
2791
2792         HWRM_UNLOCK();
2793
2794         return rc;
2795 }
2796
2797 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2798                                 uint8_t tunnel_type)
2799 {
2800         struct hwrm_tunnel_dst_port_free_input req = {0};
2801         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2802         int rc = 0;
2803
2804         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2805
2806         req.tunnel_type = tunnel_type;
2807         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2808         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2809
2810         HWRM_CHECK_RESULT();
2811         HWRM_UNLOCK();
2812
2813         return rc;
2814 }
2815
2816 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2817                                         uint32_t flags)
2818 {
2819         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2820         struct hwrm_func_cfg_input req = {0};
2821         int rc;
2822
2823         HWRM_PREP(req, FUNC_CFG);
2824
2825         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2826         req.flags = rte_cpu_to_le_32(flags);
2827         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2828
2829         HWRM_CHECK_RESULT();
2830         HWRM_UNLOCK();
2831
2832         return rc;
2833 }
2834
2835 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2836 {
2837         uint32_t *flag = flagp;
2838
2839         vnic->flags = *flag;
2840 }
2841
2842 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2843 {
2844         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2845 }
2846
2847 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2848 {
2849         int rc = 0;
2850         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2851         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2852
2853         HWRM_PREP(req, FUNC_BUF_RGTR);
2854
2855         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2856         req.req_buf_page_size = rte_cpu_to_le_16(
2857                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2858         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2859         req.req_buf_page_addr0 =
2860                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2861         if (req.req_buf_page_addr0 == 0) {
2862                 PMD_DRV_LOG(ERR,
2863                         "unable to map buffer address to physical memory\n");
2864                 return -ENOMEM;
2865         }
2866
2867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2868
2869         HWRM_CHECK_RESULT();
2870         HWRM_UNLOCK();
2871
2872         return rc;
2873 }
2874
2875 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2876 {
2877         int rc = 0;
2878         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2879         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2880
2881         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2882
2883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2884
2885         HWRM_CHECK_RESULT();
2886         HWRM_UNLOCK();
2887
2888         return rc;
2889 }
2890
2891 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2892 {
2893         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2894         struct hwrm_func_cfg_input req = {0};
2895         int rc;
2896
2897         HWRM_PREP(req, FUNC_CFG);
2898
2899         req.fid = rte_cpu_to_le_16(0xffff);
2900         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2901         req.enables = rte_cpu_to_le_32(
2902                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2903         req.async_event_cr = rte_cpu_to_le_16(
2904                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2905         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2906
2907         HWRM_CHECK_RESULT();
2908         HWRM_UNLOCK();
2909
2910         return rc;
2911 }
2912
2913 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2914 {
2915         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2916         struct hwrm_func_vf_cfg_input req = {0};
2917         int rc;
2918
2919         HWRM_PREP(req, FUNC_VF_CFG);
2920
2921         req.enables = rte_cpu_to_le_32(
2922                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2923         req.async_event_cr = rte_cpu_to_le_16(
2924                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2926
2927         HWRM_CHECK_RESULT();
2928         HWRM_UNLOCK();
2929
2930         return rc;
2931 }
2932
2933 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2934 {
2935         struct hwrm_func_cfg_input req = {0};
2936         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2937         uint16_t dflt_vlan, fid;
2938         uint32_t func_cfg_flags;
2939         int rc = 0;
2940
2941         HWRM_PREP(req, FUNC_CFG);
2942
2943         if (is_vf) {
2944                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2945                 fid = bp->pf.vf_info[vf].fid;
2946                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2947         } else {
2948                 fid = rte_cpu_to_le_16(0xffff);
2949                 func_cfg_flags = bp->pf.func_cfg_flags;
2950                 dflt_vlan = bp->vlan;
2951         }
2952
2953         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2954         req.fid = rte_cpu_to_le_16(fid);
2955         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2956         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2957
2958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2959
2960         HWRM_CHECK_RESULT();
2961         HWRM_UNLOCK();
2962
2963         return rc;
2964 }
2965
2966 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2967                         uint16_t max_bw, uint16_t enables)
2968 {
2969         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2970         struct hwrm_func_cfg_input req = {0};
2971         int rc;
2972
2973         HWRM_PREP(req, FUNC_CFG);
2974
2975         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2976         req.enables |= rte_cpu_to_le_32(enables);
2977         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2978         req.max_bw = rte_cpu_to_le_32(max_bw);
2979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2980
2981         HWRM_CHECK_RESULT();
2982         HWRM_UNLOCK();
2983
2984         return rc;
2985 }
2986
2987 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2988 {
2989         struct hwrm_func_cfg_input req = {0};
2990         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2991         int rc = 0;
2992
2993         HWRM_PREP(req, FUNC_CFG);
2994
2995         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2996         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2997         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2998         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2999
3000         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3001
3002         HWRM_CHECK_RESULT();
3003         HWRM_UNLOCK();
3004
3005         return rc;
3006 }
3007
3008 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3009 {
3010         int rc;
3011
3012         if (BNXT_PF(bp))
3013                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3014         else
3015                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3016
3017         return rc;
3018 }
3019
3020 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3021                               void *encaped, size_t ec_size)
3022 {
3023         int rc = 0;
3024         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3025         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3026
3027         if (ec_size > sizeof(req.encap_request))
3028                 return -1;
3029
3030         HWRM_PREP(req, REJECT_FWD_RESP);
3031
3032         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3033         memcpy(req.encap_request, encaped, ec_size);
3034
3035         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3036
3037         HWRM_CHECK_RESULT();
3038         HWRM_UNLOCK();
3039
3040         return rc;
3041 }
3042
3043 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3044                                        struct ether_addr *mac)
3045 {
3046         struct hwrm_func_qcfg_input req = {0};
3047         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3048         int rc;
3049
3050         HWRM_PREP(req, FUNC_QCFG);
3051
3052         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3054
3055         HWRM_CHECK_RESULT();
3056
3057         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3058
3059         HWRM_UNLOCK();
3060
3061         return rc;
3062 }
3063
3064 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3065                             void *encaped, size_t ec_size)
3066 {
3067         int rc = 0;
3068         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3069         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3070
3071         if (ec_size > sizeof(req.encap_request))
3072                 return -1;
3073
3074         HWRM_PREP(req, EXEC_FWD_RESP);
3075
3076         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3077         memcpy(req.encap_request, encaped, ec_size);
3078
3079         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3080
3081         HWRM_CHECK_RESULT();
3082         HWRM_UNLOCK();
3083
3084         return rc;
3085 }
3086
3087 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3088                          struct rte_eth_stats *stats, uint8_t rx)
3089 {
3090         int rc = 0;
3091         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3092         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3093
3094         HWRM_PREP(req, STAT_CTX_QUERY);
3095
3096         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3097
3098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3099
3100         HWRM_CHECK_RESULT();
3101
3102         if (rx) {
3103                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3104                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3105                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3106                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3107                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3108                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3109                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3110                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3111         } else {
3112                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3113                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3114                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3115                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3116                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3117                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3118                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3119         }
3120
3121
3122         HWRM_UNLOCK();
3123
3124         return rc;
3125 }
3126
3127 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3128 {
3129         struct hwrm_port_qstats_input req = {0};
3130         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3131         struct bnxt_pf_info *pf = &bp->pf;
3132         int rc;
3133
3134         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3135                 return 0;
3136
3137         HWRM_PREP(req, PORT_QSTATS);
3138
3139         req.port_id = rte_cpu_to_le_16(pf->port_id);
3140         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3141         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3142         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3143
3144         HWRM_CHECK_RESULT();
3145         HWRM_UNLOCK();
3146
3147         return rc;
3148 }
3149
3150 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3151 {
3152         struct hwrm_port_clr_stats_input req = {0};
3153         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3154         struct bnxt_pf_info *pf = &bp->pf;
3155         int rc;
3156
3157         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3158                 return 0;
3159
3160         HWRM_PREP(req, PORT_CLR_STATS);
3161
3162         req.port_id = rte_cpu_to_le_16(pf->port_id);
3163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3164
3165         HWRM_CHECK_RESULT();
3166         HWRM_UNLOCK();
3167
3168         return rc;
3169 }
3170
3171 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3172 {
3173         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3174         struct hwrm_port_led_qcaps_input req = {0};
3175         int rc;
3176
3177         if (BNXT_VF(bp))
3178                 return 0;
3179
3180         HWRM_PREP(req, PORT_LED_QCAPS);
3181         req.port_id = bp->pf.port_id;
3182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3183
3184         HWRM_CHECK_RESULT();
3185
3186         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3187                 unsigned int i;
3188
3189                 bp->num_leds = resp->num_leds;
3190                 memcpy(bp->leds, &resp->led0_id,
3191                         sizeof(bp->leds[0]) * bp->num_leds);
3192                 for (i = 0; i < bp->num_leds; i++) {
3193                         struct bnxt_led_info *led = &bp->leds[i];
3194
3195                         uint16_t caps = led->led_state_caps;
3196
3197                         if (!led->led_group_id ||
3198                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3199                                 bp->num_leds = 0;
3200                                 break;
3201                         }
3202                 }
3203         }
3204
3205         HWRM_UNLOCK();
3206
3207         return rc;
3208 }
3209
3210 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3211 {
3212         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3213         struct hwrm_port_led_cfg_input req = {0};
3214         struct bnxt_led_cfg *led_cfg;
3215         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3216         uint16_t duration = 0;
3217         int rc, i;
3218
3219         if (!bp->num_leds || BNXT_VF(bp))
3220                 return -EOPNOTSUPP;
3221
3222         HWRM_PREP(req, PORT_LED_CFG);
3223
3224         if (led_on) {
3225                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3226                 duration = rte_cpu_to_le_16(500);
3227         }
3228         req.port_id = bp->pf.port_id;
3229         req.num_leds = bp->num_leds;
3230         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3231         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3232                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3233                 led_cfg->led_id = bp->leds[i].led_id;
3234                 led_cfg->led_state = led_state;
3235                 led_cfg->led_blink_on = duration;
3236                 led_cfg->led_blink_off = duration;
3237                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3238         }
3239
3240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3241
3242         HWRM_CHECK_RESULT();
3243         HWRM_UNLOCK();
3244
3245         return rc;
3246 }
3247
3248 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3249                                uint32_t *length)
3250 {
3251         int rc;
3252         struct hwrm_nvm_get_dir_info_input req = {0};
3253         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3254
3255         HWRM_PREP(req, NVM_GET_DIR_INFO);
3256
3257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3258
3259         HWRM_CHECK_RESULT();
3260         HWRM_UNLOCK();
3261
3262         if (!rc) {
3263                 *entries = rte_le_to_cpu_32(resp->entries);
3264                 *length = rte_le_to_cpu_32(resp->entry_length);
3265         }
3266         return rc;
3267 }
3268
3269 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3270 {
3271         int rc;
3272         uint32_t dir_entries;
3273         uint32_t entry_length;
3274         uint8_t *buf;
3275         size_t buflen;
3276         rte_iova_t dma_handle;
3277         struct hwrm_nvm_get_dir_entries_input req = {0};
3278         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3279
3280         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3281         if (rc != 0)
3282                 return rc;
3283
3284         *data++ = dir_entries;
3285         *data++ = entry_length;
3286         len -= 2;
3287         memset(data, 0xff, len);
3288
3289         buflen = dir_entries * entry_length;
3290         buf = rte_malloc("nvm_dir", buflen, 0);
3291         rte_mem_lock_page(buf);
3292         if (buf == NULL)
3293                 return -ENOMEM;
3294         dma_handle = rte_mem_virt2iova(buf);
3295         if (dma_handle == 0) {
3296                 PMD_DRV_LOG(ERR,
3297                         "unable to map response address to physical memory\n");
3298                 return -ENOMEM;
3299         }
3300         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3301         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3303
3304         HWRM_CHECK_RESULT();
3305         HWRM_UNLOCK();
3306
3307         if (rc == 0)
3308                 memcpy(data, buf, len > buflen ? buflen : len);
3309
3310         rte_free(buf);
3311
3312         return rc;
3313 }
3314
3315 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3316                              uint32_t offset, uint32_t length,
3317                              uint8_t *data)
3318 {
3319         int rc;
3320         uint8_t *buf;
3321         rte_iova_t dma_handle;
3322         struct hwrm_nvm_read_input req = {0};
3323         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3324
3325         buf = rte_malloc("nvm_item", length, 0);
3326         rte_mem_lock_page(buf);
3327         if (!buf)
3328                 return -ENOMEM;
3329
3330         dma_handle = rte_mem_virt2iova(buf);
3331         if (dma_handle == 0) {
3332                 PMD_DRV_LOG(ERR,
3333                         "unable to map response address to physical memory\n");
3334                 return -ENOMEM;
3335         }
3336         HWRM_PREP(req, NVM_READ);
3337         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3338         req.dir_idx = rte_cpu_to_le_16(index);
3339         req.offset = rte_cpu_to_le_32(offset);
3340         req.len = rte_cpu_to_le_32(length);
3341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3342         HWRM_CHECK_RESULT();
3343         HWRM_UNLOCK();
3344         if (rc == 0)
3345                 memcpy(data, buf, length);
3346
3347         rte_free(buf);
3348         return rc;
3349 }
3350
3351 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3352 {
3353         int rc;
3354         struct hwrm_nvm_erase_dir_entry_input req = {0};
3355         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3356
3357         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3358         req.dir_idx = rte_cpu_to_le_16(index);
3359         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3360         HWRM_CHECK_RESULT();
3361         HWRM_UNLOCK();
3362
3363         return rc;
3364 }
3365
3366
3367 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3368                           uint16_t dir_ordinal, uint16_t dir_ext,
3369                           uint16_t dir_attr, const uint8_t *data,
3370                           size_t data_len)
3371 {
3372         int rc;
3373         struct hwrm_nvm_write_input req = {0};
3374         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3375         rte_iova_t dma_handle;
3376         uint8_t *buf;
3377
3378         HWRM_PREP(req, NVM_WRITE);
3379
3380         req.dir_type = rte_cpu_to_le_16(dir_type);
3381         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3382         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3383         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3384         req.dir_data_length = rte_cpu_to_le_32(data_len);
3385
3386         buf = rte_malloc("nvm_write", data_len, 0);
3387         rte_mem_lock_page(buf);
3388         if (!buf)
3389                 return -ENOMEM;
3390
3391         dma_handle = rte_mem_virt2iova(buf);
3392         if (dma_handle == 0) {
3393                 PMD_DRV_LOG(ERR,
3394                         "unable to map response address to physical memory\n");
3395                 return -ENOMEM;
3396         }
3397         memcpy(buf, data, data_len);
3398         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3399
3400         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3401
3402         HWRM_CHECK_RESULT();
3403         HWRM_UNLOCK();
3404
3405         rte_free(buf);
3406         return rc;
3407 }
3408
3409 static void
3410 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3411 {
3412         uint32_t *count = cbdata;
3413
3414         *count = *count + 1;
3415 }
3416
3417 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3418                                      struct bnxt_vnic_info *vnic __rte_unused)
3419 {
3420         return 0;
3421 }
3422
3423 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3424 {
3425         uint32_t count = 0;
3426
3427         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3428             &count, bnxt_vnic_count_hwrm_stub);
3429
3430         return count;
3431 }
3432
3433 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3434                                         uint16_t *vnic_ids)
3435 {
3436         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3437         struct hwrm_func_vf_vnic_ids_query_output *resp =
3438                                                 bp->hwrm_cmd_resp_addr;
3439         int rc;
3440
3441         /* First query all VNIC ids */
3442         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3443
3444         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3445         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3446         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3447
3448         if (req.vnic_id_tbl_addr == 0) {
3449                 HWRM_UNLOCK();
3450                 PMD_DRV_LOG(ERR,
3451                 "unable to map VNIC ID table address to physical memory\n");
3452                 return -ENOMEM;
3453         }
3454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3455         if (rc) {
3456                 HWRM_UNLOCK();
3457                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3458                 return -1;
3459         } else if (resp->error_code) {
3460                 rc = rte_le_to_cpu_16(resp->error_code);
3461                 HWRM_UNLOCK();
3462                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3463                 return -1;
3464         }
3465         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3466
3467         HWRM_UNLOCK();
3468
3469         return rc;
3470 }
3471
3472 /*
3473  * This function queries the VNIC IDs  for a specified VF. It then calls
3474  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3475  * Then it calls the hwrm_cb function to program this new vnic configuration.
3476  */
3477 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3478         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3479         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3480 {
3481         struct bnxt_vnic_info vnic;
3482         int rc = 0;
3483         int i, num_vnic_ids;
3484         uint16_t *vnic_ids;
3485         size_t vnic_id_sz;
3486         size_t sz;
3487
3488         /* First query all VNIC ids */
3489         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3490         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3491                         RTE_CACHE_LINE_SIZE);
3492         if (vnic_ids == NULL) {
3493                 rc = -ENOMEM;
3494                 return rc;
3495         }
3496         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3497                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3498
3499         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3500
3501         if (num_vnic_ids < 0)
3502                 return num_vnic_ids;
3503
3504         /* Retrieve VNIC, update bd_stall then update */
3505
3506         for (i = 0; i < num_vnic_ids; i++) {
3507                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3508                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3509                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3510                 if (rc)
3511                         break;
3512                 if (vnic.mru <= 4)      /* Indicates unallocated */
3513                         continue;
3514
3515                 vnic_cb(&vnic, cbdata);
3516
3517                 rc = hwrm_cb(bp, &vnic);
3518                 if (rc)
3519                         break;
3520         }
3521
3522         rte_free(vnic_ids);
3523
3524         return rc;
3525 }
3526
3527 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3528                                               bool on)
3529 {
3530         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3531         struct hwrm_func_cfg_input req = {0};
3532         int rc;
3533
3534         HWRM_PREP(req, FUNC_CFG);
3535
3536         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3537         req.enables |= rte_cpu_to_le_32(
3538                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3539         req.vlan_antispoof_mode = on ?
3540                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3541                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3542         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3543
3544         HWRM_CHECK_RESULT();
3545         HWRM_UNLOCK();
3546
3547         return rc;
3548 }
3549
3550 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3551 {
3552         struct bnxt_vnic_info vnic;
3553         uint16_t *vnic_ids;
3554         size_t vnic_id_sz;
3555         int num_vnic_ids, i;
3556         size_t sz;
3557         int rc;
3558
3559         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3560         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3561                         RTE_CACHE_LINE_SIZE);
3562         if (vnic_ids == NULL) {
3563                 rc = -ENOMEM;
3564                 return rc;
3565         }
3566
3567         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3568                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3569
3570         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3571         if (rc <= 0)
3572                 goto exit;
3573         num_vnic_ids = rc;
3574
3575         /*
3576          * Loop through to find the default VNIC ID.
3577          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3578          * by sending the hwrm_func_qcfg command to the firmware.
3579          */
3580         for (i = 0; i < num_vnic_ids; i++) {
3581                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3582                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3583                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3584                                         bp->pf.first_vf_id + vf);
3585                 if (rc)
3586                         goto exit;
3587                 if (vnic.func_default) {
3588                         rte_free(vnic_ids);
3589                         return vnic.fw_vnic_id;
3590                 }
3591         }
3592         /* Could not find a default VNIC. */
3593         PMD_DRV_LOG(ERR, "No default VNIC\n");
3594 exit:
3595         rte_free(vnic_ids);
3596         return -1;
3597 }
3598
3599 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3600                          uint16_t dst_id,
3601                          struct bnxt_filter_info *filter)
3602 {
3603         int rc = 0;
3604         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3605         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3606         uint32_t enables = 0;
3607
3608         if (filter->fw_em_filter_id != UINT64_MAX)
3609                 bnxt_hwrm_clear_em_filter(bp, filter);
3610
3611         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3612
3613         req.flags = rte_cpu_to_le_32(filter->flags);
3614
3615         enables = filter->enables |
3616               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3617         req.dst_id = rte_cpu_to_le_16(dst_id);
3618
3619         if (filter->ip_addr_type) {
3620                 req.ip_addr_type = filter->ip_addr_type;
3621                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3622         }
3623         if (enables &
3624             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3625                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3626         if (enables &
3627             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3628                 memcpy(req.src_macaddr, filter->src_macaddr,
3629                        ETHER_ADDR_LEN);
3630         if (enables &
3631             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3632                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3633                        ETHER_ADDR_LEN);
3634         if (enables &
3635             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3636                 req.ovlan_vid = filter->l2_ovlan;
3637         if (enables &
3638             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3639                 req.ivlan_vid = filter->l2_ivlan;
3640         if (enables &
3641             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3642                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3643         if (enables &
3644             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3645                 req.ip_protocol = filter->ip_protocol;
3646         if (enables &
3647             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3648                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3649         if (enables &
3650             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3651                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3652         if (enables &
3653             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3654                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3655         if (enables &
3656             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3657                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3658         if (enables &
3659             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3660                 req.mirror_vnic_id = filter->mirror_vnic_id;
3661
3662         req.enables = rte_cpu_to_le_32(enables);
3663
3664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3665
3666         HWRM_CHECK_RESULT();
3667
3668         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3669         HWRM_UNLOCK();
3670
3671         return rc;
3672 }
3673
3674 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3675 {
3676         int rc = 0;
3677         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3678         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3679
3680         if (filter->fw_em_filter_id == UINT64_MAX)
3681                 return 0;
3682
3683         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3684         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3685
3686         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3687
3688         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3689
3690         HWRM_CHECK_RESULT();
3691         HWRM_UNLOCK();
3692
3693         filter->fw_em_filter_id = UINT64_MAX;
3694         filter->fw_l2_filter_id = UINT64_MAX;
3695
3696         return 0;
3697 }
3698
3699 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3700                          uint16_t dst_id,
3701                          struct bnxt_filter_info *filter)
3702 {
3703         int rc = 0;
3704         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3705         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3706                                                 bp->hwrm_cmd_resp_addr;
3707         uint32_t enables = 0;
3708
3709         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3710                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3711
3712         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3713
3714         req.flags = rte_cpu_to_le_32(filter->flags);
3715
3716         enables = filter->enables |
3717               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3718         req.dst_id = rte_cpu_to_le_16(dst_id);
3719
3720
3721         if (filter->ip_addr_type) {
3722                 req.ip_addr_type = filter->ip_addr_type;
3723                 enables |=
3724                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3725         }
3726         if (enables &
3727             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3728                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3729         if (enables &
3730             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3731                 memcpy(req.src_macaddr, filter->src_macaddr,
3732                        ETHER_ADDR_LEN);
3733         //if (enables &
3734             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3735                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3736                        //ETHER_ADDR_LEN);
3737         if (enables &
3738             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3739                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3740         if (enables &
3741             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3742                 req.ip_protocol = filter->ip_protocol;
3743         if (enables &
3744             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3745                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3746         if (enables &
3747             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3748                 req.src_ipaddr_mask[0] =
3749                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3750         if (enables &
3751             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3752                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3753         if (enables &
3754             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3755                 req.dst_ipaddr_mask[0] =
3756                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3757         if (enables &
3758             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3759                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3760         if (enables &
3761             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3762                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3763         if (enables &
3764             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3765                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3766         if (enables &
3767             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3768                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3769         if (enables &
3770             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3771                 req.mirror_vnic_id = filter->mirror_vnic_id;
3772
3773         req.enables = rte_cpu_to_le_32(enables);
3774
3775         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3776
3777         HWRM_CHECK_RESULT();
3778
3779         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3780         HWRM_UNLOCK();
3781
3782         return rc;
3783 }
3784
3785 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3786                                 struct bnxt_filter_info *filter)
3787 {
3788         int rc = 0;
3789         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3790         struct hwrm_cfa_ntuple_filter_free_output *resp =
3791                                                 bp->hwrm_cmd_resp_addr;
3792
3793         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3794                 return 0;
3795
3796         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3797
3798         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3799
3800         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3801
3802         HWRM_CHECK_RESULT();
3803         HWRM_UNLOCK();
3804
3805         filter->fw_ntuple_filter_id = UINT64_MAX;
3806         filter->fw_l2_filter_id = UINT64_MAX;
3807
3808         return 0;
3809 }
3810
3811 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3812 {
3813         unsigned int rss_idx, fw_idx, i;
3814
3815         if (vnic->rss_table && vnic->hash_type) {
3816                 /*
3817                  * Fill the RSS hash & redirection table with
3818                  * ring group ids for all VNICs
3819                  */
3820                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3821                         rss_idx++, fw_idx++) {
3822                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3823                                 fw_idx %= bp->rx_cp_nr_rings;
3824                                 if (vnic->fw_grp_ids[fw_idx] !=
3825                                     INVALID_HW_RING_ID)
3826                                         break;
3827                                 fw_idx++;
3828                         }
3829                         if (i == bp->rx_cp_nr_rings)
3830                                 return 0;
3831                         vnic->rss_table[rss_idx] =
3832                                 vnic->fw_grp_ids[fw_idx];
3833                 }
3834                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3835         }
3836         return 0;
3837 }