ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
-static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
/***********************/
goto err_out;
}
}
+ bnxt_print_link_info(bp->eth_dev);
return 0;
return 0;
}
-static inline int
-rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = ð_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return 1;
-
- return 0;
-}
-
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
{
struct rte_eth_link *link = ð_dev->data->dev_link;
if (rc)
goto error;
- bnxt_link_update_op(eth_dev, 0);
+ bnxt_link_update_op(eth_dev, 1);
if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
vlan_mask |= ETH_VLAN_FILTER_MASK;
if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
vlan_mask |= ETH_VLAN_STRIP_MASK;
- bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+ rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+ if (rc)
+ goto error;
return 0;
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc = 0;
- eth_dev->data->dev_link.link_status = 1;
- bnxt_set_hwrm_link_config(bp, true);
+ if (!bp->link_info.link_up)
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (!rc)
+ eth_dev->data->dev_link.link_status = 1;
+
+ bnxt_print_link_info(eth_dev);
return 0;
}
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
+ bp->link_info.link_up = 0;
+
return 0;
}
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
- rte_bnxt_atomic_write_link_status(eth_dev, &new);
+ memcpy(ð_dev->data->dev_link, &new,
+ sizeof(struct rte_eth_link));
bnxt_print_link_info(eth_dev);
}
return bnxt_del_vlan_filter(bp, vlan_id);
}
-static void
+static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
if (mask & ETH_VLAN_EXTEND_MASK)
RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+
+ return 0;
}
static void
RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
+ goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
+ goto exit;
}
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
if (vnic == NULL) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
+ goto exit;
}
if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
if (match)
*ret = -EEXIST;
+exit:
return mfilter;
}
}
static struct bnxt_filter_info*
-bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
+bnxt_match_ntuple_filter(struct bnxt *bp,
struct bnxt_filter_info *bfilter)
{
struct bnxt_filter_info *mfilter = NULL;
+ int i;
- STAILQ_FOREACH(mfilter, &vnic->filter, next) {
- if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
- bfilter->src_ipaddr_mask[0] ==
- mfilter->src_ipaddr_mask[0] &&
- bfilter->src_port == mfilter->src_port &&
- bfilter->src_port_mask == mfilter->src_port_mask &&
- bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
- bfilter->dst_ipaddr_mask[0] ==
- mfilter->dst_ipaddr_mask[0] &&
- bfilter->dst_port == mfilter->dst_port &&
- bfilter->dst_port_mask == mfilter->dst_port_mask &&
- bfilter->flags == mfilter->flags &&
- bfilter->enables == mfilter->enables)
- return mfilter;
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables)
+ return mfilter;
+ }
}
return NULL;
}
bfilter->ethertype = 0x800;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
- mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter);
if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
RTE_LOG(ERR, PMD, "filter exists.");
goto free_filter;
STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
} else {
+ if (mfilter == NULL) {
+ /* This should not happen. But for Coverity! */
+ ret = -ENOENT;
+ goto free_filter;
+ }
ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
ret = bnxt_parse_fdir_filter(bp, fdir, filter);
if (ret != 0)
goto free_filter;
+ filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (filter_op == RTE_ETH_FILTER_ADD) {
- filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
ret = bnxt_hwrm_set_ntuple_filter(bp,
filter->dst_id,
filter);
const struct rte_memzone *mz = NULL;
static int version_printed;
uint32_t total_alloc_len;
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
struct bnxt *bp;
int rc;
RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
bp = eth_dev->data->dev_private;
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
+ ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
RTE_LOG(ERR, PMD,