#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
- "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n";
+ "Broadcom NetXtreme driver " DRV_MODULE_NAME;
int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
- if (bp->eth_dev->data->mtu > ETHER_MTU) {
+ if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags |= BNXT_FLAG_JUMBO;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
- dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
- + VLAN_TAG_SIZE * 2;
+ dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
- BNXT_NUM_VLANS;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
}
return 0;
bnxt_filter_info, next);
bnxt_hwrm_clear_l2_filter(bp, filter);
filter->mac_index = INVALID_MAC_INDEX;
- memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
+ memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
STAILQ_INSERT_TAIL(&bp->free_filter_list,
filter, next);
}
}
static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
- memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+ memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
}
new_filter->mac_index =
filter->mac_index;
memcpy(new_filter->l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
/* MAC only filter */
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
/* Inherit MAC from the previous filter */
new_filter->mac_index = filter->mac_index;
memcpy(new_filter->l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
new_filter->l2_ivlan = vlan_id;
new_filter->l2_ivlan_mask = 0xF000;
}
static int
-bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
+bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
/* Default Filter is tied to VNIC 0 */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
return rc;
- memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
- memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter->enables |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
static int
bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
/* TODO Check for Duplicate mcast addresses */
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
for (i = 0; i < nb_mc_addr; i++) {
- memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
- off += ETHER_ADDR_LEN;
+ memcpy(vnic->mc_list + off, &mc_addr_list[i],
+ RTE_ETHER_ADDR_LEN);
+ off += RTE_ETHER_ADDR_LEN;
}
vnic->mc_addr_cnt = i;
bnxt_dev_info_get_op(eth_dev, &dev_info);
- if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
+ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
- ETHER_MIN_MTU, BNXT_MAX_MTU);
+ RTE_ETHER_MIN_MTU, BNXT_MAX_MTU);
return -EINVAL;
}
- if (new_mtu > ETHER_MTU) {
+ if (new_mtu > RTE_ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
}
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE * 2;
eth_dev->data->mtu = new_mtu;
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
uint16_t size = 0;
- vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc)
break;
int match = 0;
*ret = 0;
- if (efilter->ether_type == ETHER_TYPE_IPv4 ||
- efilter->ether_type == ETHER_TYPE_IPv6) {
+ if (efilter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ efilter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
if ((!memcmp(efilter->mac_addr.addr_bytes,
- mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
mfilter->flags ==
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
mfilter->ethertype == efilter->ether_type)) {
} else {
STAILQ_FOREACH(mfilter, &vnic->filter, next)
if ((!memcmp(efilter->mac_addr.addr_bytes,
- mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
mfilter->ethertype == efilter->ether_type &&
mfilter->flags ==
HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
}
bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
bfilter->ethertype = efilter->ether_type;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
} else {
filter->dst_id = vnic->fw_vnic_id;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
if (filter->dst_macaddr[i] == 0x00)
filter1 = STAILQ_FIRST(&vnic0->filter);
else
mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
mf->l2_ivlan == nf->l2_ivlan &&
mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
- !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr, nf->l2_addr,
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_macaddr, nf->src_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->dst_macaddr, nf->dst_macaddr,
- ETHER_ADDR_LEN) &&
+ RTE_ETHER_ADDR_LEN) &&
!memcmp(mf->src_ipaddr, nf->src_ipaddr,
sizeof(nf->src_ipaddr)) &&
!memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
return -ERANGE;
}
win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
- rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
return 0;
}
static void bnxt_unmap_ptp_regs(struct bnxt *bp)
{
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
}
static uint64_t bnxt_cc_read(struct bnxt *bp)
return -EAGAIN;
port_id = pf->port_id;
- rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
- ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+ rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(WARNING,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(WARNING,
- "Using rte_mem_virt2iova()\n");
+ PMD_DRV_LOG(INFO,
+ "Memzone physical address same as virtual using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
- ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
+ RTE_ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN * bp->max_l2_ctx);
+ RTE_ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
goto error_free;
}
- if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
PMD_DRV_LOG(ERR,
"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
}
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
- memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
{
bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
if (bnxt_logtype_driver >= 0)
- rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
}
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);