static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *handle);
static void txgbe_dev_interrupt_handler(void *param);
struct rte_kvargs *kvlist;
u16 auto_neg = 1;
u16 poll = 0;
- u16 present = 1;
+ u16 present = 0;
u16 sgmii = 0;
u16 ffe_set = 0;
u16 ffe_main = 27;
struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
const struct rte_memzone *mz;
uint32_t ctrl_ext;
uint16_t csum;
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
struct txgbe_fdir_filter *fdir_filter;
- if (fdir_info->hash_map)
- rte_free(fdir_info->hash_map);
- if (fdir_info->hash_handle)
- rte_hash_free(fdir_info->hash_handle);
+ rte_free(fdir_info->hash_map);
+ rte_hash_free(fdir_info->hash_handle);
while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list,
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
struct txgbe_l2_tn_filter *l2_tn_filter;
- if (l2_tn_info->hash_map)
- rte_free(l2_tn_info->hash_map);
- if (l2_tn_info->hash_handle)
- rte_hash_free(l2_tn_info->hash_handle);
+ rte_free(l2_tn_info->hash_map);
+ rte_hash_free(l2_tn_info->hash_handle);
while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
restart = (rxcfg & TXGBE_RXCFG_ENA) &&
!(rxcfg & TXGBE_RXCFG_VLAN);
rxcfg |= TXGBE_RXCFG_VLAN;
vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
+ case RTE_ETH_VLAN_TYPE_INNER:
if (vlan_ext) {
wr32m(hw, TXGBE_VLANCTL,
TXGBE_VLANCTL_TPID_MASK,
TXGBE_TAGTPID_LSB(tpid));
}
break;
- case ETH_VLAN_TYPE_OUTER:
+ case RTE_ETH_VLAN_TYPE_OUTER:
if (vlan_ext) {
/* Only the high 16-bits is valid */
wr32m(hw, TXGBE_EXTAG,
rxq = dev->data->rx_queues[queue];
if (on) {
- rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
- rxq->vlan_flags = PKT_RX_VLAN;
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
txgbe_vlan_strip_queue_set(dev, i, 1);
else
txgbe_vlan_strip_queue_set(dev, i, 0);
struct rte_eth_rxmode *rxmode;
struct txgbe_rx_queue *rxq;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
rxmode = &dev->data->dev_conf.rxmode;
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
else
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}
}
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_STRIP_MASK)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
txgbe_vlan_hw_strip_config(dev);
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
txgbe_vlan_hw_filter_enable(dev);
else
txgbe_vlan_hw_filter_disable(dev);
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
txgbe_vlan_hw_extend_enable(dev);
else
txgbe_vlan_hw_extend_disable(dev);
}
- if (mask & ETH_QINQ_STRIP_MASK) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+ if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
txgbe_qinq_hw_strip_enable(dev);
else
txgbe_qinq_hw_strip_disable(dev);
switch (nb_rx_q) {
case 1:
case 2:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
break;
case 4:
- RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
break;
default:
return -EINVAL;
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
break;
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
" unsupported mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
- case ETH_MQ_RX_RSS:
- case ETH_MQ_RX_VMDQ_RSS:
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ case RTE_ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
return -EINVAL;
}
break;
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode =
- ETH_MQ_RX_VMDQ_ONLY;
+ RTE_ETH_MQ_RX_VMDQ_ONLY;
break;
- default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
}
switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
break;
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
dev->data->dev_conf.txmode.mq_mode =
- ETH_MQ_TX_VMDQ_ONLY;
+ RTE_ETH_MQ_TX_VMDQ_ONLY;
break;
}
return -EINVAL;
}
} else {
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
" not supported.");
return -EINVAL;
}
/* check configuration for vmdb+dcb mode */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools must be %d or %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
- if (!(conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
+ if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+ conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools != %d and"
" nb_queue_pools != %d.",
- ETH_16_POOLS, ETH_32_POOLS);
+ RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
- if (!(conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
+ if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+ conf->nb_tcs == RTE_ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
- ETH_4_TCS, ETH_8_TCS);
+ RTE_ETH_4_TCS, RTE_ETH_8_TCS);
return -EINVAL;
}
}
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* multiple queue mode checking */
ret = txgbe_check_mq_mode(dev);
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err;
bool link_up = false, negotiate = 0;
PMD_INIT_FUNC_TRACE();
- /* TXGBE devices don't support:
- * - half duplex (checked afterwards for valid speeds)
- * - fixed speed: TODO implement
- */
- if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR,
- "Invalid link_speeds for port %u, fix speed not supported",
- dev->data->port_id);
- return -EINVAL;
- }
-
/* Stop the link setup handler before resetting the HW. */
rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
return -1;
hw->mac.start_hw(hw);
hw->mac.get_link_status = true;
+ hw->dev_start = true;
/* configure PF module if SRIOV enabled */
txgbe_pf_host_configure(dev);
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
-
- /* confiugre msix for sleep until rx interrupt */
+ /* configure msix for sleep until rx interrupt */
txgbe_configure_msix(dev);
/* initialize transmission unit */
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
+ mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
err = txgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
}
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
txgbe_vmdq_vlan_hw_filter_enable(dev);
}
if (err)
goto error;
- allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G;
+ allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_10G;
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds & ~allowed_speeds) {
+ if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
PMD_INIT_LOG(ERR, "Invalid link setting");
goto error;
}
speed = 0x0;
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed = (TXGBE_LINK_SPEED_100M_FULL |
TXGBE_LINK_SPEED_1GB_FULL |
TXGBE_LINK_SPEED_10GB_FULL);
} else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
speed |= TXGBE_LINK_SPEED_10GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
speed |= TXGBE_LINK_SPEED_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_1G)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
speed |= TXGBE_LINK_SPEED_1GB_FULL;
- if (*link_speeds & ETH_LINK_SPEED_100M)
+ if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
speed |= TXGBE_LINK_SPEED_100M_FULL;
}
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
int vf;
struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec != NULL) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
/* reset hierarchy commit */
tm_conf->committed = false;
hw->adapter_stopped = true;
dev->data->dev_started = 0;
+ hw->dev_start = false;
return 0;
}
} else {
/* Turn on the laser */
hw->mac.enable_tx_laser(hw);
+ hw->dev_start = true;
txgbe_dev_link_update(dev, 0);
}
} else {
/* Turn off the laser */
hw->mac.disable_tx_laser(hw);
+ hw->dev_start = false;
txgbe_dev_link_update(dev, 0);
}
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
int retries = 0;
int ret;
}
static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
+ struct rte_eth_xstat_name *xstats_names,
unsigned int limit)
{
unsigned int i;
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
dev_info->tx_desc_lim = tx_desc_lim;
dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
- dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
- dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
/* Driver-preferred Rx/Tx parameters */
dev_info->default_rxportconf.burst_size = 32;
int wait = 1;
memset(&link, 0, sizeof(link));
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
- link.link_duplex = ETH_LINK_HALF_DUPLEX;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ RTE_ETH_LINK_SPEED_FIXED);
hw->mac.get_link_status = true;
err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
if (err != 0) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
return rte_eth_linkstatus_set(dev, &link);
}
txgbe_dev_setup_link_alarm_handler, dev);
}
return rte_eth_linkstatus_set(dev, &link);
+ } else if (!hw->dev_start) {
+ return rte_eth_linkstatus_set(dev, &link);
}
intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (link_speed) {
default:
case TXGBE_LINK_SPEED_UNKNOWN:
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case TXGBE_LINK_SPEED_100M_FULL:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case TXGBE_LINK_SPEED_1GB_FULL:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link.link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case TXGBE_LINK_SPEED_2_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_2_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case TXGBE_LINK_SPEED_5GB_FULL:
- link.link_speed = ETH_SPEED_NUM_5G;
+ link.link_speed = RTE_ETH_SPEED_NUM_5G;
break;
case TXGBE_LINK_SPEED_10GB_FULL:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
}
* - On failure, a negative value.
*/
static int
-txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
uint32_t eicr;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO &&
+ rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX)
+ wr32(hw, TXGBE_PX_INTA, 1);
+
/* clear all cause mask */
txgbe_disable_intr(hw);
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_INIT_LOG(INFO, " Port %d: Link Down",
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t eicr;
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- txgbe_dev_interrupt_get_status(dev);
+ txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
txgbe_dev_interrupt_action(dev, dev->intr_handle);
}
struct txgbe_hw *hw;
hw = TXGBE_DEV_HW(dev);
- return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
+ return txgbe_led_on(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP;
}
static int
struct txgbe_hw *hw;
hw = TXGBE_DEV_HW(dev);
- return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
+ return txgbe_led_off(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP;
}
static int
tx_pause = 0;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
return -ENOTSUP;
}
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += 4) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
if (!mask)
continue;
PMD_INIT_FUNC_TRACE();
- if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
for (i = 0; i < reta_size; i += 4) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
if (!mask)
continue;
txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
- struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
struct rte_eth_dev_data *dev_data = dev->data;
- int ret;
-
- ret = txgbe_dev_info_get(dev, &dev_info);
- if (ret != 0)
- return ret;
-
- /* check that mtu is within the allowed range */
- if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
- return -EINVAL;
/* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
*/
if (dev_data->dev_started && !dev_data->scattered_rx &&
- (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+ (frame_size + 2 * RTE_VLAN_HLEN >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
}
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
if (hw->mode)
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
TXGBE_FRAME_SIZE_MAX);
return -ENOTSUP;
if (on) {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = ~0;
wr32(hw, TXGBE_UCADDRTBL(i), ~0);
}
} else {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = 0;
wr32(hw, TXGBE_UCADDRTBL(i), 0);
}
{
uint32_t new_val = orig_val;
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
new_val |= TXGBE_POOLETHCTL_UTA;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
new_val |= TXGBE_POOLETHCTL_MCHA;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
new_val |= TXGBE_POOLETHCTL_UCHA;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
new_val |= TXGBE_POOLETHCTL_BCA;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
new_val |= TXGBE_POOLETHCTL_MCP;
return new_val;
txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
uint32_t mask;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
wr32(hw, TXGBE_IVARMISC, tmp);
} else {
/* rx or tx causes */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
tmp &= ~(0xFF << idx);
txgbe_configure_msix(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
uint32_t vec = TXGBE_MISC_VEC_ID;
queue_id++) {
/* by default, 1:1 mapping */
txgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
+ rte_intr_vec_list_index_set(intr_handle,
+ queue_id, vec);
+ if (vec < base + rte_intr_nb_efd_get(intr_handle)
+ - 1)
vec++;
}
rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
- case ETH_SPEED_NUM_100M:
+ case RTE_ETH_SPEED_NUM_100M:
incval = TXGBE_INCVAL_100;
shift = TXGBE_INCVAL_SHIFT_100;
break;
- case ETH_SPEED_NUM_1G:
+ case RTE_ETH_SPEED_NUM_1G:
incval = TXGBE_INCVAL_1GB;
shift = TXGBE_INCVAL_SHIFT_1GB;
break;
- case ETH_SPEED_NUM_10G:
+ case RTE_ETH_SPEED_NUM_10G:
default:
incval = TXGBE_INCVAL_10GB;
shift = TXGBE_INCVAL_SHIFT_10GB;
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
wr32(hw, TXGBE_TSTIMEINC, 0);
return 0;
uint8_t nb_tcs;
uint8_t i, j;
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
else
dcb_info->nb_tcs = 1;
if (dcb_config->vt_mode) { /* vt is enabled */
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
for (j = 0; j < nb_tcs; j++) {
} else { /* vt is disabled */
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
- if (dcb_info->nb_tcs == ETH_4_TCS) {
+ if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
- } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
for (i = 0; i < dcb_info->nb_tcs; i++) {
dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
}
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
break;
default:
return ret;
switch (l2_tunnel->l2_tunnel_type) {
- case RTE_L2_TUNNEL_TYPE_E_TAG:
+ case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
break;
default:
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
ret = -EINVAL;
}
wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
if (udp_tunnel->udp_port == 0) {
PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
ret = -EINVAL;
return -EINVAL;
switch (udp_tunnel->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN:
cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_VXLANPORT, 0);
break;
- case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_ETH_TUNNEL_TYPE_GENEVE:
cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_GENEVEPORT, 0);
break;
- case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_ETH_TUNNEL_TYPE_TEREDO:
cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",
}
wr32(hw, TXGBE_TEREDOPORT, 0);
break;
- case RTE_TUNNEL_TYPE_VXLAN_GPE:
+ case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
if (cur_port != udp_tunnel->udp_port) {
PMD_DRV_LOG(ERR, "Port %u does not exist.",