#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
- PMD_TX_FREE_LOG(DEBUG,
- "TX descriptor %4u is not done"
- "(port=%d queue=%d)",
- desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
/* Failed to clean any descriptors, better luck next time */
return -(1);
}
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
- PMD_TX_FREE_LOG(DEBUG,
- "Cleaning %4u TX descriptors: %4u to %4u "
- "(port=%d queue=%d)",
- nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
/*
* The last descriptor to clean is done, so that means all the
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
uint8_t use_ipsec;
#endif
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
#endif
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec) {
union ixgbe_crypto_tx_desc_md *ipsec_mdata =
(union ixgbe_crypto_tx_desc_md *)
- &tx_pkt->udata64;
+ rte_security_dynfield(tx_pkt);
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
* nb_used better be less than or equal to txq->tx_rs_thresh
*/
if (nb_used > txq->nb_tx_free) {
- PMD_TX_FREE_LOG(DEBUG,
- "Not enough free TX descriptors "
- "nb_used=%4u nb_free=%4u "
- "(port=%d queue=%d)",
- nb_used, txq->nb_tx_free,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
if (ixgbe_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
/* nb_used better be <= txq->tx_rs_thresh */
if (unlikely(nb_used > txq->tx_rs_thresh)) {
- PMD_TX_FREE_LOG(DEBUG,
- "The number of descriptors needed to "
- "transmit the packet exceeds the "
- "RS bit threshold. This will impact "
- "performance."
- "nb_used=%4u nb_free=%4u "
- "tx_rs_thresh=%4u. "
- "(port=%d queue=%d)",
- nb_used, txq->nb_tx_free,
- txq->tx_rs_thresh,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_rs_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_rs_thresh,
+ txq->port_id, txq->queue_id);
/*
* Loop here until there are enough TX
* descriptors or until the ring cannot be
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload, &tx_pkt->udata64);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (use_ipsec)
olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
#endif
/* Set RS bit only on threshold packets' last descriptor */
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
- PMD_TX_FREE_LOG(DEBUG,
- "Setting RS bit on TXD id="
- "%4u (port=%d queue=%d)",
- tx_last, txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
cmd_type_len |= IXGBE_TXD_CMD_RS;
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
};
+static int
+ixgbe_monitor_callback(const uint64_t value,
+ const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+ const uint64_t m = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD);
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ return (value & m) == m ? -1 : 0;
+}
+
+int
+ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+
+ desc = rxq->rx_tail;
+ rxdp = &rxq->rx_ring[desc];
+ /* watch for changes in status bit */
+ pmc->addr = &rxdp->wb.upper.status_error;
+
+ /* comparison callback */
+ pmc->fn = ixgbe_monitor_callback;
+
+ /* the registers are 32-bit */
+ pmc->size = sizeof(uint32_t);
+
+ return 0;
+}
+
/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
}
static inline uint64_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
+rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info,
+ uint8_t rx_udp_csum_zero_err)
{
uint64_t pkt_flags;
pkt_flags = error_to_pkt_flags_map[(rx_status >>
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+ /* Mask out the bad UDP checksum error if the hardware has UDP zero
+ * checksum error issue, so that the software application will then
+ * have to recompute the checksum itself if needed.
+ */
+ if ((rx_status & IXGBE_RXDADV_ERR_TCPE) &&
+ (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+ rx_udp_csum_zero_err)
+ pkt_flags &= ~PKT_RX_L4_CKSUM_BAD;
+
if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
(rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
- pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+ pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
/* convert descriptor fields to rte mbuf flags */
pkt_flags = rx_desc_status_to_pkt_flags(s[j],
vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j],
+ (uint16_t)pkt_info[j],
+ rxq->rx_udp_csum_zero_err);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
((uint16_t)pkt_info[j]);
mb->ol_flags = pkt_flags;
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr,
cur_free_trigger);
}
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
+ rxq->rx_udp_csum_zero_err);
pkt_flags = pkt_flags |
ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
rxm->ol_flags = pkt_flags;
(unsigned) nb_rx);
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
+ rxq->rx_udp_csum_zero_err);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
head->ol_flags = pkt_flags;
head->packet_type =
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(
+ rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WC_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) {
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
txq->sw_ring_v != NULL)) {
return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if ((txq->offloads == 0) &&
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
hw->mac.type == ixgbe_mac_X550EM_a)
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
#endif
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_SECURITY);
#endif
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+
+ if (rxq->pkt_first_seg != NULL)
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
hw->mac.type == ixgbe_mac_X550EM_a)
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
offloads |= DEV_RX_OFFLOAD_SECURITY;
#endif
else
rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
+ /*
+ * 82599 errata, UDP frames with a 0 checksum can be marked as checksum
+ * errors.
+ */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ rxq->rx_udp_csum_zero_err = 1;
+
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
- !adapter->rx_bulk_alloc_allowed) {
+ !adapter->rx_bulk_alloc_allowed ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
"preconditions",
dev->data->port_id);
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY);
#endif
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
+ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
else
rfctl |= IXGBE_RFCTL_RSC_DIS;
+ /* disable NFS filtering */
+ rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS;
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
ixgbe_setup_loopback_link_x540_x550(hw, true);
}
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
if ((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads &
* ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
* VF packets received can work in all cases.
*/
- ixgbevf_rlpml_set_vf(hw,
- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ if (ixgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+ PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return -EINVAL;
+ }
/*
* Assume no header split and no VLAN strip support
return 0;
}
-/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */
+/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */
#if defined(RTE_ARCH_PPC_64)
int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)