/* Init the RX tail register. */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
if (rx_queue_id < dev->data->nb_rx_queues) {
rxq = dev->data->rx_queues[rx_queue_id];
- err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
{
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+ uint32_t rxdid = ICE_RXDID_LEGACY_1;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint32_t regval;
rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
rx_ctx.dtype = 0; /* No Header Split mode */
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
rx_ctx.dsize = 1; /* 32B descriptors */
-#endif
rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
/* TPH: Transaction Layer Packet (TLP) processing hints */
rx_ctx.tphrdesc_ena = 1;
/* Init the RX tail register. */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
rx_queue_id);
rxq = pf->fdir.rxq;
- err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
rx_queue_id);
}
ice_reset_rx_queue(rxq);
- rxq->q_set = TRUE;
+ rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
}
ice_reset_tx_queue(txq);
- txq->q_set = TRUE;
+ txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
ice_set_tx_function_flag(dev, txq);
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
*/
- txq->q_set = TRUE;
+ txq->q_set = true;
pf->fdir.txq = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
}
/* Allocate RX hardware ring descriptors. */
- ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
+ ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
rxq->rx_ring_dma = rz->iova;
memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
- sizeof(union ice_rx_flex_desc));
+ sizeof(union ice_32byte_rx_desc));
rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
/*
* Don't need to allocate software ring and reset for the fdir
* rx queue, just set the queue has been configured.
*/
- rxq->q_set = TRUE;
+ rxq->q_set = true;
pf->fdir.rxq = rxq;
rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
return ctx_desc;
}
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define ICE_MAX_DATA_PER_TXD \
+ (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+ struct rte_mbuf *txd = tx_pkt;
+ uint16_t count = 0;
+
+ while (txd != NULL) {
+ count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
+ txd = txd->next;
+ }
+
+ return count;
+}
+
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
uint32_t td_offset = 0;
uint32_t td_tag = 0;
uint16_t tx_last;
+ uint16_t slen;
uint64_t buf_dma_addr;
uint64_t ol_flags;
union ice_tx_offload tx_offload = {0};
/* Check if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_xmit_cleanup(txq);
+ (void)ice_xmit_cleanup(txq);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = *tx_pkts++;
/* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus the number of context descriptor if needed.
+ * Recalculate the needed tx descs when TSO enabled in case
+ * the mbuf data size exceeds max data size that hw allows
+ * per tx desc.
*/
- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ if (ol_flags & PKT_TX_TCP_SEG)
+ nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
+ nb_ctx);
+ else
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
txe->mbuf = m_seg;
/* Setup TX Descriptor */
+ slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
+
+ while ((ol_flags & PKT_TX_TCP_SEG) &&
+ unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((uint64_t)ICE_MAX_DATA_PER_TXD <<
+ ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+
+ buf_dma_addr += ICE_MAX_DATA_PER_TXD;
+ slen -= ICE_MAX_DATA_PER_TXD;
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ txd = &tx_ring[tx_id];
+ txn = &sw_ring[txe->next_id];
+ }
+
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
- ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((uint64_t)m_seg->data_len <<
- ICE_TXD_QW1_TX_BUF_SZ_S) |
- ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
+ ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
txe->last_id = tx_last;
tx_id = txe->next_id;
return txq->tx_rs_thresh;
}
+static int
+ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ struct ice_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (txq->tx_rs_thresh > txq->nb_tx_desc -
+ txq->nb_tx_free || tx_id == tx_last)
+ break;
+
+ if (pkt_cnt < free_cnt) {
+ if (ice_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+#ifdef RTE_ARCH_X86
+static int
+ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
+ uint32_t free_cnt __rte_unused)
+{
+ return -ENOTSUP;
+}
+#endif
+
+static int
+ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+ break;
+
+ n = ice_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+#ifdef RTE_ARCH_X86
+ if (ad->tx_vec_allowed)
+ return ice_tx_done_cleanup_vec(q, free_cnt);
+#endif
+ if (ad->tx_simple_allowed)
+ return ice_tx_done_cleanup_simple(q, free_cnt);
+ else
+ return ice_tx_done_cleanup_full(q, free_cnt);
+}
+
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
RTE_PTYPE_L4_TCP,
[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP,
- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_ICMP,
/* IPv6 --> IPv4 */
ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
}
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
+ (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
+
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
+{
+ volatile union ice_32byte_rx_desc *rxdp;
+ uint64_t qword1;
+ uint32_t rx_status;
+ uint32_t error;
+ uint32_t id;
+ int ret = -EAGAIN;
+
+ rxdp = (volatile union ice_32byte_rx_desc *)
+ (&rxq->rx_ring[rxq->rx_tail]);
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
+ >> ICE_RXD_QW1_STATUS_S;
+
+ if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
+ ret = 0;
+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
+ id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
+ if (error) {
+ if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
+ else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
+ PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
+ ret = -EINVAL;
+ goto err;
+ }
+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
+ if (error) {
+ PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
+ ret = -EINVAL;
+ }
+err:
+ rxdp->wb.qword1.status_error_len = 0;
+ rxq->rx_tail++;
+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+ rxq->rx_tail = 0;
+ if (rxq->rx_tail == 0)
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ else
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
+ }
+
+ return ret;
+}
+
#define ICE_FDIR_MAX_WAIT_US 10000
int
ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
{
struct ice_tx_queue *txq = pf->fdir.txq;
+ struct ice_rx_queue *rxq = pf->fdir.rxq;
volatile struct ice_fltr_desc *fdirdp;
volatile struct ice_tx_desc *txdp;
uint32_t td_cmd;
return -ETIMEDOUT;
}
- return 0;
+ for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
+ int ret;
+
+ ret = ice_check_fdir_programming_status(rxq);
+ if (ret == -EAGAIN)
+ rte_delay_us(1);
+ else
+ return ret;
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+
+
}