net/cnxk: support CPT CTX write through microcode op
[dpdk.git] / drivers / net / enic / enic_main.c
index dfc7f5d..5cc6d9f 100644 (file)
@@ -250,7 +250,7 @@ void enic_init_vnic_resources(struct enic *enic)
                        error_interrupt_offset);
                /* Compute unsupported ol flags for enic_prep_pkts() */
                enic->wq[index].tx_offload_notsup_mask =
-                       PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
+                       RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
 
                cq_idx = enic_cq_wq(enic, index);
                vnic_cq_init(&enic->cq[cq_idx],
@@ -430,7 +430,7 @@ int enic_link_update(struct rte_eth_dev *eth_dev)
 
        memset(&link, 0, sizeof(link));
        link.link_status = enic_get_link_status(enic);
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
        link.link_speed = vnic_dev_port_speed(enic->vdev);
 
        return rte_eth_linkstatus_set(eth_dev, &link);
@@ -448,7 +448,7 @@ enic_intr_handler(void *arg)
        rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        enic_log_q_error(enic);
        /* Re-enable irq in case of INTx */
-       rte_intr_ack(&enic->pdev->intr_handle);
+       rte_intr_ack(enic->pdev->intr_handle);
 }
 
 static int enic_rxq_intr_init(struct enic *enic)
@@ -477,14 +477,16 @@ static int enic_rxq_intr_init(struct enic *enic)
                        " interrupts\n");
                return err;
        }
-       intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
-                                           rxq_intr_count * sizeof(int), 0);
-       if (intr_handle->intr_vec == NULL) {
+
+       if (rte_intr_vec_list_alloc(intr_handle, "enic_intr_vec",
+                                          rxq_intr_count)) {
                dev_err(enic, "Failed to allocate intr_vec\n");
                return -ENOMEM;
        }
        for (i = 0; i < rxq_intr_count; i++)
-               intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+               if (rte_intr_vec_list_index_set(intr_handle, i,
+                                                  i + ENICPMD_RXQ_INTR_OFFSET))
+                       return -rte_errno;
        return 0;
 }
 
@@ -494,10 +496,8 @@ static void enic_rxq_intr_deinit(struct enic *enic)
 
        intr_handle = enic->rte_dev->intr_handle;
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec != NULL) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+
+       rte_intr_vec_list_free(intr_handle);
 }
 
 static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
@@ -597,7 +597,7 @@ int enic_enable(struct enic *enic)
        }
 
        eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        /* vnic notification of link status has already been turned on in
         * enic_dev_init() which is called during probe time.  Here we are
@@ -638,11 +638,11 @@ int enic_enable(struct enic *enic)
         * and vlan insertion are supported.
         */
        simple_tx_offloads = enic->tx_offload_capa &
-               (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                DEV_TX_OFFLOAD_VLAN_INSERT |
-                DEV_TX_OFFLOAD_IPV4_CKSUM |
-                DEV_TX_OFFLOAD_UDP_CKSUM |
-                DEV_TX_OFFLOAD_TCP_CKSUM);
+               (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+                RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
        if ((eth_dev->data->dev_conf.txmode.offloads &
             ~simple_tx_offloads) == 0) {
                ENICPMD_LOG(DEBUG, " use the simple tx handler");
@@ -667,10 +667,10 @@ int enic_enable(struct enic *enic)
        vnic_dev_enable_wait(enic->vdev);
 
        /* Register and enable error interrupt */
-       rte_intr_callback_register(&(enic->pdev->intr_handle),
+       rte_intr_callback_register(enic->pdev->intr_handle,
                enic_intr_handler, (void *)enic->rte_dev);
 
-       rte_intr_enable(&(enic->pdev->intr_handle));
+       rte_intr_enable(enic->pdev->intr_handle);
        /* Unmask LSC interrupt */
        vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
 
@@ -858,7 +858,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
        if (enic->rte_dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_SCATTER) {
+           RTE_ETH_RX_OFFLOAD_SCATTER) {
                dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
                /* ceil((max pkt len)/mbuf_size) */
                mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
@@ -1111,8 +1111,8 @@ int enic_disable(struct enic *enic)
                (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
        }
        enic_rxq_intr_deinit(enic);
-       rte_intr_disable(&enic->pdev->intr_handle);
-       rte_intr_callback_unregister(&enic->pdev->intr_handle,
+       rte_intr_disable(enic->pdev->intr_handle);
+       rte_intr_callback_unregister(enic->pdev->intr_handle,
                                     enic_intr_handler,
                                     (void *)enic->rte_dev);
 
@@ -1385,15 +1385,15 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
        rss_hash_type = 0;
        rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
        if (enic->rq_count > 1 &&
-           (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+           (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
            rss_hf != 0) {
                rss_enable = 1;
-               if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                             ETH_RSS_NONFRAG_IPV4_OTHER))
+               if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                             RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
-               if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+               if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
-               if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+               if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
                        if (enic->udp_rss_weak) {
                                /*
@@ -1404,12 +1404,12 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
                                rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
                        }
                }
-               if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
-                             ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+               if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX |
+                             RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
-               if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+               if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX))
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
-               if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+               if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) {
                        rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
                        if (enic->udp_rss_weak)
                                rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -1745,14 +1745,14 @@ enic_enable_overlay_offload(struct enic *enic)
                return -EINVAL;
        }
        enic->tx_offload_capa |=
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               (enic->geneve ? DEV_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
-               (enic->vxlan ? DEV_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               (enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
+               (enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
        enic->tx_offload_mask |=
-               PKT_TX_OUTER_IPV6 |
-               PKT_TX_OUTER_IPV4 |
-               PKT_TX_OUTER_IP_CKSUM |
-               PKT_TX_TUNNEL_MASK;
+               RTE_MBUF_F_TX_OUTER_IPV6 |
+               RTE_MBUF_F_TX_OUTER_IPV4 |
+               RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+               RTE_MBUF_F_TX_TUNNEL_MASK;
        enic->overlay_offload = true;
 
        if (enic->vxlan && enic->geneve)