status);
/* re-enable UIO interrupt handling */
- ret = rte_intr_ack(&pci_dev->intr_handle);
+ ret = rte_intr_ack(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
ret);
return -EINVAL;
/* enable UIO interrupt handling */
- ret = rte_intr_enable(&pci_dev->intr_handle);
+ ret = rte_intr_enable(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
ret);
RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
/* enable UIO interrupt handling */
- ret = rte_intr_disable(&pci_dev->intr_handle);
+ ret = rte_intr_disable(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
ret);
int ret;
/* register a callback handler with UIO for interrupt notifications */
- ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ ret = rte_intr_callback_register(pci_dev->intr_handle,
avp_dev_interrupt_handler,
(void *)eth_dev);
if (ret < 0) {
src_offset = 0;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- ol_flags = PKT_RX_VLAN;
+ ol_flags = RTE_MBUF_F_RX_VLAN;
vlan_tci = pkt_buf->vlan_tci;
} else {
ol_flags = 0;
m->port = avp->port_id;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- m->ol_flags = PKT_RX_VLAN;
+ m->ol_flags = RTE_MBUF_F_RX_VLAN;
m->vlan_tci = pkt_buf->vlan_tci;
}
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}
/* Setup required number of queues */
_avp_set_queue_counts(eth_dev);
- mask = (ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ mask = (RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_eth_link *link = ð_dev->data->dev_link;
- link->link_speed = ETH_SPEED_NUM_10G;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_status = !!(avp->flags & AVP_F_LINKUP);
return -1;
dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
return 0;
struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
uint64_t offloads = dev_conf->rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}