#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
-#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-#endif
-
/*********************************************************************
*
* TX functions
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
return nb_tx;
}
-#ifdef RTE_IXGBE_INC_VECTOR
static uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
return nb_tx;
}
-#endif
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
uint8_t use_ipsec;
#endif
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
#endif
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec) {
union ixgbe_crypto_tx_desc_md *ipsec_mdata =
(union ixgbe_crypto_tx_desc_md *)
- &tx_pkt->udata64;
+ rte_security_dynfield(tx_pkt);
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload, &tx_pkt->udata64);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec)
olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
#endif
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) {
+ rte_errno = EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr,
cur_free_trigger);
}
(unsigned) nb_rx);
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(
+ rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
*
**********************************************************************/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) {
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
txq->sw_ring_v != NULL)) {
return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
return ixgbe_tx_done_cleanup_full(txq, free_cnt);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
rte_free(txq->sw_ring);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if ((txq->offloads == 0) &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
-#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
-#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
hw->mac.type == ixgbe_mac_X550EM_a)
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
#endif
return tx_offload_capa;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_SECURITY);
#endif
*
* @m scattered cluster head
*/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint16_t i, nb_segs = m->nb_segs;
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
-#ifdef RTE_IXGBE_INC_VECTOR
/* SSE Vector driver has a different way of releasing mbufs. */
if (rxq->rx_using_sse) {
ixgbe_rx_queue_release_mbufs_vec(rxq);
return;
}
-#endif
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int __attribute__((cold))
+static inline int __rte_cold
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
-#ifdef RTE_IXGBE_INC_VECTOR
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
rxq->rxrearm_start = 0;
rxq->rxrearm_nb = 0;
#endif
hw->mac.type == ixgbe_mac_X550EM_a)
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
offloads |= DEV_RX_OFFLOAD_SECURITY;
#endif
return offloads;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
if (unlikely(offset >= rxq->nb_rx_desc))
return -EINVAL;
-#ifdef RTE_IXGBE_INC_VECTOR
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
if (rxq->rx_using_sse)
nb_hold = rxq->rxrearm_nb;
else
/*
* Set up link loopback for X540/X550 mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
{
uint32_t macc;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "tx_ring", i);
}
dev->data->nb_tx_queues = 0;
}
IXGBE_WRITE_FLUSH(hw);
}
-static int __attribute__((cold))
+static int __rte_cold
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
- !adapter->rx_bulk_alloc_allowed) {
+ !adapter->rx_bulk_alloc_allowed ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
- "preconditions or RTE_IXGBE_INC_VECTOR is "
- "not enabled",
+ "preconditions",
dev->data->port_id);
adapter->rx_vec_allowed = false;
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY);
#endif
/*
* Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/*
* Start Transmit and Receive Units.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
ixgbe_setup_loopback_link_x540_x550(hw, true);
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if ((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads &
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Start Transmit and Receive Units.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
return 0;
}
-/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-__rte_weak int
+/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */
+#if defined(RTE_ARCH_PPC_64)
+int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-__rte_weak uint16_t
+uint16_t
ixgbe_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-__rte_weak uint16_t
+uint16_t
ixgbe_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-__rte_weak int
+int
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
{
return -1;
}
+
+uint16_t
+ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return;
+}
+#endif