{
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
- uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+ uint32_t rxdid = ICE_RXDID_LEGACY_1;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint32_t regval;
rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
rx_ctx.dtype = 0; /* No Header Split mode */
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
rx_ctx.dsize = 1; /* 32B descriptors */
-#endif
rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
/* TPH: Transaction Layer Packet (TLP) processing hints */
rx_ctx.tphrdesc_ena = 1;
}
/* Allocate RX hardware ring descriptors. */
- ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
+ ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
rxq->rx_ring_dma = rz->iova;
memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
- sizeof(union ice_rx_flex_desc));
+ sizeof(union ice_32byte_rx_desc));
rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
/*
ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
}
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
+ (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
+
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
+{
+ volatile union ice_32byte_rx_desc *rxdp;
+ uint64_t qword1;
+ uint32_t rx_status;
+ uint32_t error;
+ uint32_t id;
+ int ret = -EAGAIN;
+
+ rxdp = (volatile union ice_32byte_rx_desc *)
+ (&rxq->rx_ring[rxq->rx_tail]);
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
+ >> ICE_RXD_QW1_STATUS_S;
+
+ if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
+ ret = 0;
+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
+ id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
+ if (error) {
+ if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
+ else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
+ PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
+ ret = -EINVAL;
+ goto err;
+ }
+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
+ if (error) {
+ PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
+ ret = -EINVAL;
+ }
+err:
+ rxdp->wb.qword1.status_error_len = 0;
+ rxq->rx_tail++;
+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+ rxq->rx_tail = 0;
+ if (rxq->rx_tail == 0)
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ else
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
+ }
+
+ return ret;
+}
+
#define ICE_FDIR_MAX_WAIT_US 10000
int
ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
{
struct ice_tx_queue *txq = pf->fdir.txq;
+ struct ice_rx_queue *rxq = pf->fdir.rxq;
volatile struct ice_fltr_desc *fdirdp;
volatile struct ice_tx_desc *txdp;
uint32_t td_cmd;
return -ETIMEDOUT;
}
- return 0;
+ for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
+ int ret;
+
+ ret = ice_check_fdir_programming_status(rxq);
+ if (ret == -EAGAIN)
+ rte_delay_us(1);
+ else
+ return ret;
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+
+
}