HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
- req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+ req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
flags = rte_le_to_cpu_32(resp->flags);
if (BNXT_PF(bp)) {
- bp->pf.port_id = resp->port_id;
- bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
- bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
+ bp->pf->port_id = resp->port_id;
+ bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
- if (new_max_vfs != bp->pf.max_vfs) {
- if (bp->pf.vf_info)
- rte_free(bp->pf.vf_info);
- bp->pf.vf_info = rte_malloc("bnxt_vf_info",
- sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
- bp->pf.max_vfs = new_max_vfs;
+ if (new_max_vfs != bp->pf->max_vfs) {
+ if (bp->pf->vf_info)
+ rte_free(bp->pf->vf_info);
+ bp->pf->vf_info = rte_malloc("bnxt_vf_info",
+ sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
+ bp->pf->max_vfs = new_max_vfs;
for (i = 0; i < new_max_vfs; i++) {
- bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
- bp->pf.vf_info[i].vlan_table =
+ bp->pf->vf_info[i].fid =
+ bp->pf->first_vf_id + i;
+ bp->pf->vf_info[i].vlan_table =
rte_zmalloc("VF VLAN table",
getpagesize(),
getpagesize());
- if (bp->pf.vf_info[i].vlan_table == NULL)
+ if (bp->pf->vf_info[i].vlan_table == NULL)
PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
rte_mem_lock_page(
- bp->pf.vf_info[i].vlan_table);
- bp->pf.vf_info[i].vlan_as_table =
+ bp->pf->vf_info[i].vlan_table);
+ bp->pf->vf_info[i].vlan_as_table =
rte_zmalloc("VF VLAN AS table",
getpagesize(),
getpagesize());
- if (bp->pf.vf_info[i].vlan_as_table == NULL)
+ if (bp->pf->vf_info[i].vlan_as_table == NULL)
PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
rte_mem_lock_page(
- bp->pf.vf_info[i].vlan_as_table);
- STAILQ_INIT(&bp->pf.vf_info[i].filter);
+ bp->pf->vf_info[i].vlan_as_table);
+ STAILQ_INIT(&bp->pf->vf_info[i].filter);
}
}
}
bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
- if (bp->pf.max_vfs)
+ if (bp->pf->max_vfs)
bp->max_vnics = 1;
else
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_l2_ctx, bp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
if (BNXT_PF(bp)) {
- bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
- memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+ memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
RTE_MIN(sizeof(req.vf_req_fwd),
- sizeof(bp->pf.vf_req_fwd)));
+ sizeof(bp->pf->vf_req_fwd)));
/*
* PF can sniff HWRM API issued by VF. This can be set up by
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- bp->pf.vf_info[vf].random_mac = false;
+ bp->pf->vf_info[vf].random_mac = false;
return rc;
}
req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
}
- req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
cfg_req->enables |=
rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
rte_eth_random_addr(cfg_req->dflt_mac_addr);
- bp->pf.vf_info[vf].random_mac = true;
+ bp->pf->vf_info[vf].random_mac = true;
} else {
memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
RTE_ETHER_ADDR_LEN);
/* Get the actual allocated values now */
HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
/* Check for zero MAC address */
HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
rc = rte_le_to_cpu_16(resp->vlan);
/* Only TX ring value reflects actual allocation? TODO */
bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
- bp->pf.evb_mode = resp->evb_mode;
+ bp->pf->evb_mode = resp->evb_mode;
HWRM_UNLOCK();
if (rc)
return rc;
- bp->pf.func_cfg_flags &=
+ bp->pf->func_cfg_flags &=
~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
- bp->pf.func_cfg_flags |=
+ bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
- bp->pf.active_vfs = num_vfs;
+ bp->pf->active_vfs = num_vfs;
/*
* First, configure the PF to only use one TX ring. This ensures that
*
* This has been fixed with firmware versions above 20.6.54
*/
- bp->pf.func_cfg_flags &=
+ bp->pf->func_cfg_flags &=
~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
- bp->pf.func_cfg_flags |=
+ bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, 1);
if (rc)
* Now, create and register a buffer to hold forwarded VF requests
*/
req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
- bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+ bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
- if (bp->pf.vf_req_buf == NULL) {
+ if (bp->pf->vf_req_buf == NULL) {
rc = -ENOMEM;
goto error_free;
}
for (sz = 0; sz < req_buf_sz; sz += getpagesize())
- rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+ rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
for (i = 0; i < num_vfs; i++)
- bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+ bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
(i * HWRM_MAX_REQ_LEN);
rc = bnxt_hwrm_func_buf_rgtr(bp);
populate_vf_func_cfg_req(bp, &req, num_vfs);
- bp->pf.active_vfs = 0;
+ bp->pf->active_vfs = 0;
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
rc = bnxt_hwrm_send_message(bp,
&req,
sizeof(req),
HWRM_UNLOCK();
reserve_resources_from_vf(bp, &req, i);
- bp->pf.active_vfs++;
- bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+ bp->pf->active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
}
/*
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
- req.evb_mode = bp->pf.evb_mode;
+ req.evb_mode = bp->pf->evb_mode;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.flags = rte_cpu_to_le_32(flags);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
req.req_buf_page_size = rte_cpu_to_le_16(
- page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+ page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
- rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
if (is_vf) {
- dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
- fid = bp->pf.vf_info[vf].fid;
- func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
+ fid = bp->pf->vf_info[vf].fid;
+ func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
} else {
fid = rte_cpu_to_le_16(0xffff);
- func_cfg_flags = bp->pf.func_cfg_flags;
+ func_cfg_flags = bp->pf->func_cfg_flags;
dflt_vlan = bp->vlan;
}
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(enables);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
req.max_bw = rte_cpu_to_le_32(max_bw);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
- req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+ req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
{
struct hwrm_port_qstats_input req = {0};
struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
{
struct hwrm_port_clr_stats_input req = {0};
struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
/* Not allowed on NS2 device, NPAR, MultiHost, VF */
return 0;
HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
- req.port_id = bp->pf.port_id;
+ req.port_id = bp->pf->port_id;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
duration = rte_cpu_to_le_16(500);
}
- req.port_id = bp->pf.port_id;
+ req.port_id = bp->pf->port_id;
req.num_leds = bp->leds->num_leds;
led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
/* First query all VNIC ids */
HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
- req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+ req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
+ req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
size_t sz;
/* First query all VNIC ids */
- vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL)
for (i = 0; i < num_vnic_ids; i++) {
memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
- rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
if (rc)
break;
if (vnic.mru <= 4) /* Indicates unallocated */
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
req.vlan_antispoof_mode = on ?
size_t sz;
int rc;
- vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL)
memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
- bp->pf.first_vf_id + vf);
+ bp->pf->first_vf_id + vf);
if (rc)
goto exit;
if (vnic.func_default) {
{
struct hwrm_port_qstats_ext_input req = {0};
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
}
req.flags = rte_cpu_to_le_32(flags);
- req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+ req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
}
if (on)
- bp->pf.evb_mode = BNXT_EVB_MODE_VEB;
+ bp->pf->evb_mode = BNXT_EVB_MODE_VEB;
else
- bp->pf.evb_mode = BNXT_EVB_MODE_VEPA;
+ bp->pf->evb_mode = BNXT_EVB_MODE_VEPA;
rc = bnxt_hwrm_pf_evb_mode(bp);
}
/* Stall all active VFs */
- for (i = 0; i < bp->pf.active_vfs; i++) {
+ for (i = 0; i < bp->pf->active_vfs; i++) {
rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i,
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
}
bp = eth_dev->data->dev_private;
- if (!bp->pf.active_vfs)
+ if (!bp->pf->active_vfs)
return -EINVAL;
- if (vf >= bp->pf.max_vfs)
+ if (vf >= bp->pf->max_vfs)
return -EINVAL;
/* Add up the per queue BW and configure MAX BW of the VF */
}
/* Requested BW already configured */
- if (tot_rate == bp->pf.vf_info[vf].max_tx_rate)
+ if (tot_rate == bp->pf->vf_info[vf].max_tx_rate)
return 0;
rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate,
HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW);
if (!rc)
- bp->pf.vf_info[vf].max_tx_rate = tot_rate;
+ bp->pf->vf_info[vf].max_tx_rate = tot_rate;
return rc;
}
return -EINVAL;
/* Prev setting same as new setting. */
- if (on == bp->pf.vf_info[vf].mac_spoof_en)
+ if (on == bp->pf->vf_info[vf].mac_spoof_en)
return 0;
- func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ func_flags = bp->pf->vf_info[vf].func_cfg_flags;
func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE);
rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
if (!rc) {
- bp->pf.vf_info[vf].mac_spoof_en = on;
- bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ bp->pf->vf_info[vf].mac_spoof_en = on;
+ bp->pf->vf_info[vf].func_cfg_flags = func_flags;
}
return rc;
rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on);
if (!rc) {
- bp->pf.vf_info[vf].vlan_spoof_en = on;
+ bp->pf->vf_info[vf].vlan_spoof_en = on;
if (on) {
if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp,
- bp->pf.first_vf_id + vf,
- bp->pf.vf_info[vf].vlan_count,
- bp->pf.vf_info[vf].vlan_as_table))
+ bp->pf->first_vf_id + vf,
+ bp->pf->vf_info[vf].vlan_count,
+ bp->pf->vf_info[vf].vlan_as_table))
rc = -1;
}
} else {
}
bp = dev->data->dev_private;
- if (!bp->pf.vf_info)
+ if (!bp->pf->vf_info)
return -EINVAL;
if (vf >= bp->pdev->max_vfs)
flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
if (on)
- bp->pf.vf_info[vf].l2_rx_mask |= flag;
+ bp->pf->vf_info[vf].l2_rx_mask |= flag;
else
- bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+ bp->pf->vf_info[vf].l2_rx_mask &= ~flag;
rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
vf_vnic_set_rxmask_cb,
- &bp->pf.vf_info[vf].l2_rx_mask,
+ &bp->pf->vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
if (bnxt_hwrm_vnic_qcfg(bp, &vnic,
- bp->pf.first_vf_id + vf) == 0) {
+ bp->pf->first_vf_id + vf) == 0) {
if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic,
- bp->pf.vf_info[vf].vlan_count,
- bp->pf.vf_info[vf].vlan_table))
+ bp->pf->vf_info[vf].vlan_count,
+ bp->pf->vf_info[vf].vlan_table))
rc = -1;
} else {
rc = -1;
return -ENOTSUP;
bp = dev->data->dev_private;
- if (!bp->pf.vf_info)
+ if (!bp->pf->vf_info)
return -EINVAL;
for (i = 0; vf_mask; i++, vf_mask >>= 1) {
- cnt = bp->pf.vf_info[i].vlan_count;
+ cnt = bp->pf->vf_info[i].vlan_count;
if ((vf_mask & 1) == 0)
continue;
- if (bp->pf.vf_info[i].vlan_table == NULL) {
+ if (bp->pf->vf_info[i].vlan_table == NULL) {
rc = -1;
continue;
}
- if (bp->pf.vf_info[i].vlan_as_table == NULL) {
+ if (bp->pf->vf_info[i].vlan_as_table == NULL) {
rc = -1;
continue;
}
/* First, search for a duplicate... */
for (j = 0; j < cnt; j++) {
if (rte_be_to_cpu_16(
- bp->pf.vf_info[i].vlan_table[j].vid) == vlan)
+ bp->pf->vf_info[i].vlan_table[j].vid) ==
+ vlan)
break;
}
if (j == cnt) {
}
/* cnt is one less than vlan_count */
- cnt = bp->pf.vf_info[i].vlan_count++;
+ cnt = bp->pf->vf_info[i].vlan_count++;
/*
* And finally, add to the
* end of the table
*/
- vase = &bp->pf.vf_info[i].vlan_as_table[cnt];
+ vase = &bp->pf->vf_info[i].vlan_as_table[cnt];
// TODO: Hardcoded TPID
vase->tpid = rte_cpu_to_be_16(0x8100);
vase->vid = rte_cpu_to_be_16(vlan);
vase->mask = rte_cpu_to_be_16(0xfff);
- ve = &bp->pf.vf_info[i].vlan_table[cnt];
+ ve = &bp->pf->vf_info[i].vlan_table[cnt];
/* TODO: Hardcoded TPID */
ve->tpid = rte_cpu_to_be_16(0x8100);
ve->vid = rte_cpu_to_be_16(vlan);
} else {
for (j = 0; j < cnt; j++) {
if (rte_be_to_cpu_16(
- bp->pf.vf_info[i].vlan_table[j].vid) != vlan)
+ bp->pf->vf_info[i].vlan_table[j].vid) !=
+ vlan)
continue;
- memmove(&bp->pf.vf_info[i].vlan_table[j],
- &bp->pf.vf_info[i].vlan_table[j + 1],
+ memmove(&bp->pf->vf_info[i].vlan_table[j],
+ &bp->pf->vf_info[i].vlan_table[j + 1],
getpagesize() - ((j + 1) *
sizeof(struct bnxt_vlan_table_entry)));
- memmove(&bp->pf.vf_info[i].vlan_as_table[j],
- &bp->pf.vf_info[i].vlan_as_table[j + 1],
+ memmove(&bp->pf->vf_info[i].vlan_as_table[j],
+ &bp->pf->vf_info[i].vlan_as_table[j + 1],
getpagesize() - ((j + 1) * sizeof(struct
bnxt_vlan_antispoof_table_entry)));
j--;
- cnt = --bp->pf.vf_info[i].vlan_count;
+ cnt = --bp->pf->vf_info[i].vlan_count;
}
}
bnxt_set_vf_table(bp, i);
return -ENOTSUP;
}
- return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats,
+ return bnxt_hwrm_func_qstats(bp, bp->pf->first_vf_id + vf_id, stats,
NULL);
}
return -ENOTSUP;
}
- return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
+ return bnxt_hwrm_func_clr_stats(bp, bp->pf->first_vf_id + vf_id);
}
int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -ENOTSUP;
}
- return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id,
+ return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf->first_vf_id + vf_id,
count);
}
}
/* If the VF currently uses a random MAC, update default to this one */
- if (bp->pf.vf_info[vf_id].random_mac) {
+ if (bp->pf->vf_info[vf_id].random_mac) {
if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0)
bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
}
memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
vnic.fw_vnic_id = rte_le_to_cpu_16(rc);
- rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf_id);
if (rc < 0)
goto exit;
- STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) {
+ STAILQ_FOREACH(filter, &bp->pf->vf_info[vf_id].filter, next) {
if (filter->flags ==
HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX &&
filter->enables ==
return -ENOTSUP;
}
- bp->pf.vf_info[vf].dflt_vlan = vlan_id;
+ bp->pf->vf_info[vf].dflt_vlan = vlan_id;
if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) ==
- bp->pf.vf_info[vf].dflt_vlan)
+ bp->pf->vf_info[vf].dflt_vlan)
return 0;
rc = bnxt_hwrm_set_vf_vlan(bp, vf);
return -EINVAL;
/* Prev setting same as new setting. */
- if (on == bp->pf.vf_info[vf].persist_stats)
+ if (on == bp->pf->vf_info[vf].persist_stats)
return 0;
- func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ func_flags = bp->pf->vf_info[vf].func_cfg_flags;
if (on)
func_flags |=
rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
if (!rc) {
- bp->pf.vf_info[vf].persist_stats = on;
- bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ bp->pf->vf_info[vf].persist_stats = on;
+ bp->pf->vf_info[vf].func_cfg_flags = func_flags;
}
return rc;