return 0;
}
+static enum ice_status
+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
+{
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+ struct ice_rlan_ctx rx_ctx;
+ enum ice_status err;
+ uint32_t regval;
+
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = 1024;
+
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+ rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+ rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+ rx_ctx.dtype = 0; /* No Header Split mode */
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ rx_ctx.dsize = 1; /* 32B descriptors */
+#endif
+ rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+ /* TPH: Transaction Layer Packet (TLP) processing hints */
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ /* Low Receive Queue Threshold defined in 64 descriptors units.
+ * When the number of free descriptors goes below the lrxqthresh,
+ * an immediate interrupt is triggered.
+ */
+ rx_ctx.lrxqthresh = 2;
+ /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 0;
+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
+
+ err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
+ rxq->queue_id);
+ return -EINVAL;
+ }
+ err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
+ rxq->queue_id);
+ return -EINVAL;
+ }
+
+ rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
+
+ /* Init the Rx tail register*/
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return 0;
+}
+
+int
+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ice_rx_queue *rxq;
+ int err;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = pf->fdir.rxq;
+ if (!rxq || !rxq->q_set) {
+ PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
+ err = ice_fdir_program_hw_rx_queue(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
+ rx_queue_id);
+ return -EIO;
+ }
+
+ /* Init the RX tail register. */
+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
+ rx_queue_id);
+
+ ice_reset_rx_queue(rxq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_tx_queue *txq;
+ int err;
+ struct ice_vsi *vsi;
+ struct ice_hw *hw;
+ struct ice_aqc_add_tx_qgrp txq_elem;
+ struct ice_tlan_ctx tx_ctx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = pf->fdir.txq;
+ if (!txq || !txq->q_set) {
+ PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
+ tx_queue_id);
+ return -EINVAL;
+ }
+
+ vsi = txq->vsi;
+ hw = ICE_VSI_TO_HW(vsi);
+
+ memset(&txq_elem, 0, sizeof(txq_elem));
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ txq_elem.num_txqs = 1;
+ txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
+
+ tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->nb_tx_desc;
+ tx_ctx.pf_num = hw->pf_id;
+ tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ tx_ctx.src_vsi = vsi->vsi_id;
+ tx_ctx.port_num = hw->port_info->lport;
+ tx_ctx.tso_ena = 1; /* tso enable */
+ tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
+ tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+
+ ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
+
+ /* Init the Tx tail register*/
+ ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+ /* Fix me, we assume TC always 0 here */
+ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
+ &txq_elem, sizeof(txq_elem), NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
+ return -EIO;
+ }
+ /* store the schedule node id */
+ txq->q_teid = txq_elem.txqs[0].q_teid;
+
+ return 0;
+}
+
/* Free all mbufs for descriptors in tx queue */
static void
_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
return 0;
}
+int
+ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ice_rx_queue *rxq;
+ int err;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ rxq = pf->fdir.rxq;
+
+ err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
+ rx_queue_id);
+ return -EINVAL;
+ }
+ ice_rx_queue_release_mbufs(rxq);
+
+ return 0;
+}
+
+int
+ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ice_tx_queue *txq;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ enum ice_status status;
+ uint16_t q_ids[1];
+ uint32_t q_teids[1];
+ uint16_t q_handle = tx_queue_id;
+
+ txq = pf->fdir.txq;
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "TX queue %u is not available",
+ tx_queue_id);
+ return -EINVAL;
+ }
+ vsi = txq->vsi;
+
+ q_ids[0] = txq->reg_idx;
+ q_teids[0] = txq->q_teid;
+
+ /* Fix me, we assume TC always 0 here */
+ status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
+ q_ids, q_teids, ICE_NO_RESET, 0, NULL);
+ if (status != ICE_SUCCESS) {
+ PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
+ return -EINVAL;
+ }
+
+ ice_tx_queue_release_mbufs(txq);
+
+ return 0;
+}
+
int
ice_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
xtr->magic = PROTO_XTR_MAGIC_ID;
}
+
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
#endif
}
dev->data->nb_tx_queues = 0;
}
+#define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
+#define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
+
+int
+ice_fdir_setup_tx_resources(struct ice_pf *pf)
+{
+ struct ice_tx_queue *txq;
+ const struct rte_memzone *tz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return -EINVAL;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("ice fdir tx queue",
+ sizeof(struct ice_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure.");
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
+ ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ ICE_FDIR_QUEUE_ID, ring_size,
+ ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!tz) {
+ ice_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
+ txq->queue_id = ICE_FDIR_QUEUE_ID;
+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ txq->vsi = pf->fdir.fdir_vsi;
+
+ txq->tx_ring_dma = tz->iova;
+ txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+ /*
+ * don't need to allocate software ring and reset for the fdir
+ * program queue just set the queue has been configured.
+ */
+ txq->q_set = TRUE;
+ pf->fdir.txq = txq;
+
+ txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
+
+ return ICE_SUCCESS;
+}
+
+int
+ice_fdir_setup_rx_resources(struct ice_pf *pf)
+{
+ struct ice_rx_queue *rxq;
+ const struct rte_memzone *rz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return -EINVAL;
+ }
+
+ dev = pf->adapter->eth_dev;
+
+ /* Allocate the RX queue data structure. */
+ rxq = rte_zmalloc_socket("ice fdir rx queue",
+ sizeof(struct ice_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue structure.");
+ return -ENOMEM;
+ }
+
+ /* Allocate RX hardware ring descriptors. */
+ ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;
+ ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
+
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ ICE_FDIR_QUEUE_ID, ring_size,
+ ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
+ if (!rz) {
+ ice_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+ return -ENOMEM;
+ }
+
+ rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
+ rxq->queue_id = ICE_FDIR_QUEUE_ID;
+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ rxq->vsi = pf->fdir.fdir_vsi;
+
+ rxq->rx_ring_dma = rz->iova;
+ memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
+ sizeof(union ice_rx_flex_desc));
+ rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
+
+ /*
+ * Don't need to allocate software ring and reset for the fdir
+ * rx queue, just set the queue has been configured.
+ */
+ rxq->q_set = TRUE;
+ pf->fdir.rxq = rxq;
+
+ rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
+
+ return ICE_SUCCESS;
+}
+
uint16_t
ice_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
}
+
+#define ICE_FDIR_MAX_WAIT_US 10000
+
+int
+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
+{
+ struct ice_tx_queue *txq = pf->fdir.txq;
+ volatile struct ice_fltr_desc *fdirdp;
+ volatile struct ice_tx_desc *txdp;
+ uint32_t td_cmd;
+ uint16_t i;
+
+ fdirdp = (volatile struct ice_fltr_desc *)
+ (&txq->tx_ring[txq->tx_tail]);
+ fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
+ fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
+
+ txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = ICE_TX_DESC_CMD_EOP |
+ ICE_TX_DESC_CMD_RS |
+ ICE_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2;
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ rte_delay_us(1);
+ }
+ if (i >= ICE_FDIR_MAX_WAIT_US) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}