#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
-#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-#endif
-
/*********************************************************************
*
* TX functions
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
return nb_tx;
}
-#ifdef RTE_IXGBE_INC_VECTOR
static uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
return nb_tx;
}
-#endif
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
uint8_t use_ipsec;
#endif
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
#endif
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec) {
union ixgbe_crypto_tx_desc_md *ipsec_mdata =
(union ixgbe_crypto_tx_desc_md *)
- &tx_pkt->udata64;
+ rte_security_dynfield(tx_pkt);
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload, &tx_pkt->udata64);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec)
olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
#endif
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
*/
if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return i;
+ }
+
+ /* check the size of packet */
+ if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) {
+ rte_errno = EINVAL;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}
/*
* Check if VLAN present only.
* Do not check whether L3/L4 rx checksum done by NIC or not,
- * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ * That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr,
cur_free_trigger);
}
(unsigned) nb_rx);
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
bool eop;
struct ixgbe_rx_entry *rxe;
struct ixgbe_scattered_rx_entry *sc_entry;
- struct ixgbe_scattered_rx_entry *next_sc_entry;
+ struct ixgbe_scattered_rx_entry *next_sc_entry = NULL;
struct ixgbe_rx_entry *next_rxe = NULL;
struct rte_mbuf *first_seg;
struct rte_mbuf *rxm;
- struct rte_mbuf *nmb;
+ struct rte_mbuf *nmb = NULL;
union ixgbe_adv_rx_desc rxd;
uint16_t data_len;
uint16_t next_id;
* of the ixgbe PMD.
*
* TODO:
- * - Get rid of "volatile" crap and let the compiler do its
- * job.
+ * - Get rid of "volatile" and let the compiler do its job.
* - Use the proper memory barrier (rte_rmb()) to ensure the
* memory ordering below.
*/
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(
+ rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
*
**********************************************************************/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
}
-static void __attribute__((cold))
+static int
+ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct ixgbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (txq->tx_rs_thresh > txq->nb_tx_desc -
+ txq->nb_tx_free || tx_id == tx_last)
+ break;
+
+ if (pkt_cnt < free_cnt) {
+ if (ixgbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+ break;
+
+ n = ixgbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+static int
+ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
+ uint32_t free_cnt __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) {
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ txq->sw_ring_v != NULL)) {
+ return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
+ } else {
+ return ixgbe_tx_done_cleanup_simple(txq, free_cnt);
+ }
+ }
+
+ return ixgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
rte_free(txq->sw_ring);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
-#ifdef RTE_LIBRTE_SECURITY
+ if ((txq->offloads == 0) &&
+#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
-#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
-#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
}
}
-int __attribute__((cold))
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+int __rte_cold
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
* tx_rs_thresh must be a divisor of the ring size.
* tx_free_thresh must be greater than 0.
* tx_free_thresh must be less than the size of the ring minus 3.
+ * tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
* One descriptor in the TX ring is used as a sentinel to avoid a
* H/W race condition, hence the maximum threshold constraints.
* When set to zero use default values.
*/
- tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
- tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
+ nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
+ if (tx_conf->tx_rs_thresh > 0)
+ tx_rs_thresh = tx_conf->tx_rs_thresh;
+ if (tx_rs_thresh + tx_free_thresh > nb_desc) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
+ "exceed nb_desc. (tx_rs_thresh=%u "
+ "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (unsigned int)tx_free_thresh,
+ (unsigned int)nb_desc,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -(EINVAL);
+ }
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
"of TX descriptors minus 2. (tx_rs_thresh=%u "
txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_SECURITY);
#endif
*
* @m scattered cluster head
*/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint16_t i, nb_segs = m->nb_segs;
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
-#ifdef RTE_IXGBE_INC_VECTOR
/* SSE Vector driver has a different way of releasing mbufs. */
if (rxq->rx_using_sse) {
ixgbe_rx_queue_release_mbufs_vec(rxq);
return;
}
-#endif
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int __attribute__((cold))
+static inline int __rte_cold
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
-#ifdef RTE_IXGBE_INC_VECTOR
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
rxq->rxrearm_start = 0;
rxq->rxrearm_nb = 0;
#endif
offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == ixgbe_mac_82598EB)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
if (ixgbe_is_vf(dev) == 0)
- offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND);
+ offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
/*
* RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
* mode.
*/
if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) &&
!RTE_ETH_DEV_SRIOV(dev).active)
offloads |= DEV_RX_OFFLOAD_TCP_LRO;
hw->mac.type == ixgbe_mac_X550EM_a)
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
offloads |= DEV_RX_OFFLOAD_SECURITY;
#endif
return offloads;
}
-static int
-ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
- uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
uint16_t len;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!ixgbe_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- ixgbe_get_rx_port_offloads(dev),
- ixgbe_get_rx_queue_offloads(dev));
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
/*
* Validate number of receive descriptors.
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
if (unlikely(offset >= rxq->nb_rx_desc))
return -EINVAL;
-#ifdef RTE_IXGBE_INC_VECTOR
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
if (rxq->rx_using_sse)
nb_hold = rxq->rxrearm_nb;
else
return RTE_ETH_TX_DESC_FULL;
}
-void __attribute__((cold))
+/*
+ * Set up link loopback for X540/X550 mode Tx->Rx.
+ */
+static inline void __rte_cold
+ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
+{
+ uint32_t macc;
+ PMD_INIT_FUNC_TRACE();
+
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+ macc = IXGBE_READ_REG(hw, IXGBE_MACC);
+
+ if (enable) {
+ /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
+ autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
+ /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
+ macc |= IXGBE_MACC_FLU;
+ } else {
+ autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
+ macc &= ~IXGBE_MACC_FLU;
+ }
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
+}
+
+void __rte_cold
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
ixgbe_reset_rx_queue(adapter, rxq);
}
}
+ /* If loopback mode was enabled, reconfigure the link accordingly */
+ if (dev->data->dev_conf.lpbk_mode != 0) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ ixgbe_setup_loopback_link_x540_x550(hw, false);
+ }
}
void
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "tx_ring", i);
}
dev->data->nb_tx_queues = 0;
}
ixgbe_rss_configure(struct rte_eth_dev *dev)
{
struct rte_eth_rss_conf rss_conf;
+ struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
uint32_t reta;
uint16_t i;
uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
+ adapter = dev->data->dev_private;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
- reta = 0;
- for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
- reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
-
- if (j == dev->data->nb_rx_queues)
- j = 0;
- reta = (reta << 8) | j;
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, reta_reg,
- rte_bswap32(reta));
+ if (adapter->rss_reta_updated == 0) {
+ reta = 0;
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << 8) | j;
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, reta_reg,
+ rte_bswap32(reta));
+ }
}
/*
uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
struct ixgbe_dcb_tc_config *tc;
- uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_bw_conf *bw_conf =
IXGBE_WRITE_FLUSH(hw);
}
-static int __attribute__((cold))
+static int __rte_cold
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
/* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
uint16_t maxdesc =
- IPV4_MAX_PKT_LEN /
+ RTE_IPV4_MAX_PKT_LEN /
(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
if (maxdesc >= 16)
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
/*
* In order to allow Vector Rx there are a few configuration
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
- !adapter->rx_bulk_alloc_allowed) {
+ !adapter->rx_bulk_alloc_allowed ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
- "preconditions or RTE_IXGBE_INC_VECTOR is "
- "not enabled",
+ "preconditions",
dev->data->port_id);
adapter->rx_vec_allowed = false;
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY);
#endif
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* at most 500us latency for a single RSC aggregation.
*/
eitr &= ~IXGBE_EITR_ITR_INT_MASK;
- eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
/*
* Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
- else
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
/*
- * If loopback mode is configured for 82599, set LPBK bit.
+ * If loopback mode is configured, set LPBK bit.
*/
- if (hw->mac.type == ixgbe_mac_82599EB &&
- dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ if (dev->data->dev_conf.lpbk_mode != 0) {
+ rc = ixgbe_check_supported_loopback_mode(dev);
+ if (rc < 0) {
+ PMD_INIT_LOG(ERR, "Unsupported loopback mode");
+ return rc;
+ }
hlreg0 |= IXGBE_HLREG0_LPBK;
- else
+ } else {
hlreg0 &= ~IXGBE_HLREG0_LPBK;
+ }
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
- 0 : ETHER_CRC_LEN;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- else
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
/*
* Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
ixgbe_dev_mq_tx_configure(dev);
}
+/*
+ * Check if requested loopback mode is supported
+ */
+int
+ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ return 0;
+
+ return -ENOTSUP;
+}
+
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/*
* Start Transmit and Receive Units.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
rxctrl |= IXGBE_RXCTRL_RXEN;
hw->mac.ops.enable_rx_dma(hw, rxctrl);
- /* If loopback mode is enabled for 82599, set up the link accordingly */
- if (hw->mac.type == ixgbe_mac_82599EB &&
- dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
- ixgbe_setup_loopback_link_82599(hw);
+ /* If loopback mode is enabled, set up the link accordingly */
+ if (dev->data->dev_conf.lpbk_mode != 0) {
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_setup_loopback_link_82599(hw);
+ else if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ ixgbe_setup_loopback_link_x540_x550(hw, true);
+ }
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if ((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads &
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /* Allocate buffers for descriptor rings */
- if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
- rx_queue_id);
- return -1;
- }
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxq = dev->data->rx_queues[rx_queue_id];
- /* Wait until RX Enable ready */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
- rx_queue_id);
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct ixgbe_adapter *adapter =
- (struct ixgbe_adapter *)dev->data->dev_private;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable bit clear */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
- rx_queue_id);
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(adapter, rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ txq = dev->data->tx_queues[tx_queue_id];
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
- IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d", tx_queue_id);
- }
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- return -1;
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
/*
* Stop Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id >= dev->data->nb_tx_queues)
- return -1;
-
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
}
if (txq->ops != NULL) {
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/*
* [VF] Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Start Transmit and Receive Units.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
}
}
+int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- reta = (reta << 8) | conf->queue[j];
+ reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
*/
if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
ixgbe_rss_disable(dev);
- return -EINVAL;
+ return 0;
}
if (rss_conf.rss_key == NULL)
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
-/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */
+#if defined(RTE_ARCH_PPC_64)
+int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+uint16_t
ixgbe_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+uint16_t
ixgbe_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-int __attribute__((weak))
+int
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
{
return -1;
}
+
+uint16_t
+ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return;
+}
+#endif