#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
- PMD_TX_FREE_LOG(DEBUG,
- "TX descriptor %4u is not done"
- "(port=%d queue=%d)",
- desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
/* Failed to clean any descriptors, better luck next time */
return -(1);
}
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
- PMD_TX_FREE_LOG(DEBUG,
- "Cleaning %4u TX descriptors: %4u to %4u "
- "(port=%d queue=%d)",
- nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
/*
* The last descriptor to clean is done, so that means all the
if (use_ipsec) {
union ixgbe_crypto_tx_desc_md *ipsec_mdata =
(union ixgbe_crypto_tx_desc_md *)
- &tx_pkt->udata64;
+ rte_security_dynfield(tx_pkt);
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
* nb_used better be less than or equal to txq->tx_rs_thresh
*/
if (nb_used > txq->nb_tx_free) {
- PMD_TX_FREE_LOG(DEBUG,
- "Not enough free TX descriptors "
- "nb_used=%4u nb_free=%4u "
- "(port=%d queue=%d)",
- nb_used, txq->nb_tx_free,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
if (ixgbe_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
/* nb_used better be <= txq->tx_rs_thresh */
if (unlikely(nb_used > txq->tx_rs_thresh)) {
- PMD_TX_FREE_LOG(DEBUG,
- "The number of descriptors needed to "
- "transmit the packet exceeds the "
- "RS bit threshold. This will impact "
- "performance."
- "nb_used=%4u nb_free=%4u "
- "tx_rs_thresh=%4u. "
- "(port=%d queue=%d)",
- nb_used, txq->nb_tx_free,
- txq->tx_rs_thresh,
- txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_rs_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_rs_thresh,
+ txq->port_id, txq->queue_id);
/*
* Loop here until there are enough TX
* descriptors or until the ring cannot be
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload, &tx_pkt->udata64);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
/* Set RS bit only on threshold packets' last descriptor */
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
- PMD_TX_FREE_LOG(DEBUG,
- "Setting RS bit on TXD id="
- "%4u (port=%d queue=%d)",
- tx_last, txq->port_id, txq->queue_id);
+ PMD_TX_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
cmd_type_len |= IXGBE_TXD_CMD_RS;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
};
+static int
+ixgbe_monitor_callback(const uint64_t value,
+ const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
+{
+ const uint64_t m = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD);
+ /*
+ * we expect the DD bit to be set to 1 if this descriptor was already
+ * written to.
+ */
+ return (value & m) == m ? -1 : 0;
+}
+
+int
+ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+
+ desc = rxq->rx_tail;
+ rxdp = &rxq->rx_ring[desc];
+ /* watch for changes in status bit */
+ pmc->addr = &rxdp->wb.upper.status_error;
+
+ /* comparison callback */
+ pmc->fn = ixgbe_monitor_callback;
+
+ /* the registers are 32-bit */
+ pmc->size = sizeof(uint32_t);
+
+ return 0;
+}
+
/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
}
static inline uint64_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
+rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info,
+ uint8_t rx_udp_csum_zero_err)
{
uint64_t pkt_flags;
pkt_flags = error_to_pkt_flags_map[(rx_status >>
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+ /* Mask out the bad UDP checksum error if the hardware has UDP zero
+ * checksum error issue, so that the software application will then
+ * have to recompute the checksum itself if needed.
+ */
+ if ((rx_status & IXGBE_RXDADV_ERR_TCPE) &&
+ (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+ rx_udp_csum_zero_err)
+ pkt_flags &= ~PKT_RX_L4_CKSUM_BAD;
+
if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
(rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
- pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+ pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
}
#ifdef RTE_LIB_SECURITY
/* convert descriptor fields to rte mbuf flags */
pkt_flags = rx_desc_status_to_pkt_flags(s[j],
vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j],
+ (uint16_t)pkt_info[j],
+ rxq->rx_udp_csum_zero_err);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
((uint16_t)pkt_info[j]);
mb->ol_flags = pkt_flags;
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
+ rxq->rx_udp_csum_zero_err);
pkt_flags = pkt_flags |
ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
rxm->ol_flags = pkt_flags;
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info,
+ rxq->rx_udp_csum_zero_err);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
head->ol_flags = pkt_flags;
head->packet_type =
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+
+ if (rxq->pkt_first_seg != NULL)
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
else
rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
+ /*
+ * 82599 errata, UDP frames with a 0 checksum can be marked as checksum
+ * errors.
+ */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ rxq->rx_udp_csum_zero_err = 1;
+
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
+ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
else
rfctl |= IXGBE_RFCTL_RSC_DIS;
+ /* disable NFS filtering */
+ rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS;
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
* ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
* VF packets received can work in all cases.
*/
- ixgbevf_rlpml_set_vf(hw,
- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ if (ixgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+ PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return -EINVAL;
+ }
/*
* Assume no header split and no VLAN strip support