net/ice/base: implement shared rate limiter
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
index ce589b9..6cfbb58 100644 (file)
@@ -34,6 +34,7 @@
 #include <rte_mbuf.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
+#include <rte_security_driver.h>
 #include <rte_prefetch.h>
 #include <rte_udp.h>
 #include <rte_tcp.h>
@@ -453,7 +454,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                seqnum_seed |= tx_offload.l2_len
                               << IXGBE_ADVTXD_TUNNEL_LEN;
        }
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (ol_flags & PKT_TX_SEC_OFFLOAD) {
                union ixgbe_crypto_tx_desc_md *md =
                                (union ixgbe_crypto_tx_desc_md *)mdata;
@@ -646,7 +647,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint32_t ctx = 0;
        uint32_t new_ctx;
        union ixgbe_tx_offload tx_offload;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        uint8_t use_ipsec;
 #endif
 
@@ -676,7 +677,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * are needed for offload functionality.
                 */
                ol_flags = tx_pkt->ol_flags;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
 #endif
 
@@ -690,11 +691,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_offload.tso_segsz = tx_pkt->tso_segsz;
                        tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
                        tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                        if (use_ipsec) {
                                union ixgbe_crypto_tx_desc_md *ipsec_mdata =
                                        (union ixgbe_crypto_tx_desc_md *)
-                                                       &tx_pkt->udata64;
+                                               rte_security_dynfield(tx_pkt);
                                tx_offload.sa_idx = ipsec_mdata->sa_idx;
                                tx_offload.sec_pad_len = ipsec_mdata->pad_len;
                        }
@@ -859,7 +860,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                }
 
                                ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-                                       tx_offload, &tx_pkt->udata64);
+                                       tx_offload,
+                                       rte_security_dynfield(tx_pkt));
 
                                txe->last_id = tx_last;
                                tx_id = txe->next_id;
@@ -877,7 +879,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                }
 
                olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                if (use_ipsec)
                        olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
 #endif
@@ -1461,7 +1463,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
                pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
        }
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (rx_status & IXGBE_RXD_STAT_SECP) {
                pkt_flags |= PKT_RX_SEC_OFFLOAD;
                if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
@@ -2401,7 +2403,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
 {
        struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
        if (txq->offloads == 0 &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                        !(txq->using_ipsec) &&
 #endif
                        txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) {
@@ -2498,7 +2500,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
        /* Use a simple Tx queue (no offloads, no multi segs) if possible */
        if ((txq->offloads == 0) &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                        !(txq->using_ipsec) &&
 #endif
                        (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
@@ -2558,7 +2560,7 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
            hw->mac.type == ixgbe_mac_X550EM_a)
                tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (dev->security_ctx)
                tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
 #endif
@@ -2726,7 +2728,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->offloads = offloads;
        txq->ops = &def_txq_ops;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
                        DEV_TX_OFFLOAD_SECURITY);
 #endif
@@ -3009,7 +3011,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
            hw->mac.type == ixgbe_mac_X550EM_a)
                offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if (dev->security_ctx)
                offloads |= DEV_RX_OFFLOAD_SECURITY;
 #endif
@@ -4839,7 +4841,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
                rxq->rx_using_sse = rx_using_sse;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
                rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
                                DEV_RX_OFFLOAD_SECURITY);
 #endif
@@ -5351,7 +5353,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
                        ixgbe_setup_loopback_link_x540_x550(hw, true);
        }
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        if ((dev->data->dev_conf.rxmode.offloads &
                        DEV_RX_OFFLOAD_SECURITY) ||
                (dev->data->dev_conf.txmode.offloads &
@@ -5920,7 +5922,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
        return 0;
 }
 
-/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */
+/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */
 #if defined(RTE_ARCH_PPC_64)
 int
 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)