net/hns3: use HW ops to config MAC features
[dpdk.git] / drivers / net / avp / avp_ethdev.c
index 6cb8bb4..7ac5558 100644 (file)
@@ -711,7 +711,7 @@ avp_dev_interrupt_handler(void *data)
                            status);
 
        /* re-enable UIO interrupt handling */
-       ret = rte_intr_ack(&pci_dev->intr_handle);
+       ret = rte_intr_ack(pci_dev->intr_handle);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
                            ret);
@@ -730,7 +730,7 @@ avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
                return -EINVAL;
 
        /* enable UIO interrupt handling */
-       ret = rte_intr_enable(&pci_dev->intr_handle);
+       ret = rte_intr_enable(pci_dev->intr_handle);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
                            ret);
@@ -759,7 +759,7 @@ avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
                    RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
 
        /* enable UIO interrupt handling */
-       ret = rte_intr_disable(&pci_dev->intr_handle);
+       ret = rte_intr_disable(pci_dev->intr_handle);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
                            ret);
@@ -776,7 +776,7 @@ avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
        int ret;
 
        /* register a callback handler with UIO for interrupt notifications */
-       ret = rte_intr_callback_register(&pci_dev->intr_handle,
+       ret = rte_intr_callback_register(pci_dev->intr_handle,
                                         avp_dev_interrupt_handler,
                                         (void *)eth_dev);
        if (ret < 0) {
@@ -1059,17 +1059,18 @@ static int
 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
                         struct avp_dev *avp)
 {
-       unsigned int max_rx_pkt_len;
+       unsigned int max_rx_pktlen;
 
-       max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
 
-       if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
-           (max_rx_pkt_len > avp->host_mbuf_size)) {
+       if (max_rx_pktlen > avp->guest_mbuf_size ||
+           max_rx_pktlen > avp->host_mbuf_size) {
                /*
                 * If the guest MTU is greater than either the host or guest
                 * buffers then chained mbufs have to be enabled in the TX
                 * direction.  It is assumed that the application will not need
-                * to send packets larger than their max_rx_pkt_len (MRU).
+                * to send packets larger than their MTU.
                 */
                return 1;
        }
@@ -1124,7 +1125,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
        PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
                    avp->max_rx_pkt_len,
-                   eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                   eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
                    avp->host_mbuf_size,
                    avp->guest_mbuf_size);
 
@@ -1310,7 +1311,7 @@ avp_dev_copy_from_buffers(struct avp_dev *avp,
        src_offset = 0;
 
        if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
-               ol_flags = PKT_RX_VLAN;
+               ol_flags = RTE_MBUF_F_RX_VLAN;
                vlan_tci = pkt_buf->vlan_tci;
        } else {
                ol_flags = 0;
@@ -1568,7 +1569,7 @@ avp_recv_pkts(void *rx_queue,
                m->port = avp->port_id;
 
                if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
-                       m->ol_flags = PKT_RX_VLAN;
+                       m->ol_flags = RTE_MBUF_F_RX_VLAN;
                        m->vlan_tci = pkt_buf->vlan_tci;
                }
 
@@ -1674,7 +1675,7 @@ avp_dev_copy_to_buffers(struct avp_dev *avp,
        first_buf->nb_segs = count;
        first_buf->pkt_len = total_length;
 
-       if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+       if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
                first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
                first_buf->vlan_tci = mbuf->vlan_tci;
        }
@@ -1889,8 +1890,8 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * function; send it truncated to avoid the performance
                         * hit of having to manage returning the already
                         * allocated buffer to the free list.  This should not
-                        * happen since the application should have set the
-                        * max_rx_pkt_len based on its MTU and it should be
+                        * happen since the application should have not send
+                        * packages larger than its MTU and it should be
                         * policing its own packet sizes.
                         */
                        txq->errors++;
@@ -1905,7 +1906,7 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                pkt_buf->nb_segs = 1;
                pkt_buf->next = NULL;
 
-               if (m->ol_flags & PKT_TX_VLAN_PKT) {
+               if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
                        pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
                        pkt_buf->vlan_tci = m->vlan_tci;
                }
@@ -1997,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
        /* Setup required number of queues */
        _avp_set_queue_counts(eth_dev);
 
-       mask = (ETH_VLAN_STRIP_MASK |
-               ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK);
+       mask = (RTE_ETH_VLAN_STRIP_MASK |
+               RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK);
        ret = avp_vlan_offload_set(eth_dev, mask);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@@ -2139,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
        struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct rte_eth_link *link = &eth_dev->data->dev_link;
 
-       link->link_speed = ETH_SPEED_NUM_10G;
-       link->link_duplex = ETH_LINK_FULL_DUPLEX;
+       link->link_speed = RTE_ETH_SPEED_NUM_10G;
+       link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
        link->link_status = !!(avp->flags & AVP_F_LINKUP);
 
        return -1;
@@ -2190,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
        dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
        dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
        if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-               dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-               dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+               dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+               dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
        }
 
        return 0;
@@ -2204,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
        struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
        uint64_t offloads = dev_conf->rxmode.offloads;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
                if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
-                       if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                                avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
                        else
                                avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2215,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
                }
        }
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
                        PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
        }