crypto/dpaa_sec: support authonly and chain with raw API
[dpdk.git] / drivers / net / ice / ice_rxtx.c
index bb75183..7a2220d 100644 (file)
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
        uint32_t rxdid = ICE_RXDID_COMMS_OVS;
        uint32_t regval;
+       struct ice_adapter *ad = rxq->vsi->adapter;
 
        /* Set buffer size as the head split is disabled. */
        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
                QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+       if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
                regval |= QRXFLXP_CNTXT_TS_M;
 
        ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -704,6 +705,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        tx_ctx.tso_ena = 1; /* tso enable */
        tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
        tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+       tx_ctx.tsyn_ena = 1;
 
        ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
                    ice_tlan_ctx_info);
@@ -1150,6 +1152,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
@@ -1205,6 +1208,7 @@ ice_rx_queue_release(void *rxq)
 
        q->rx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -1351,6 +1355,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = nb_desc;
        txq->tx_rs_thresh = tx_rs_thresh;
        txq->tx_free_thresh = tx_free_thresh;
@@ -1389,6 +1394,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 void
 ice_tx_queue_release(void *txq)
 {
@@ -1401,6 +1418,7 @@ ice_tx_queue_release(void *txq)
 
        q->tx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -1564,6 +1582,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
+       struct ice_adapter *ad = rxq->vsi->adapter;
 
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1618,6 +1637,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
                                }
                        }
 
+                       if (ad->ptp_ena && ((mb->packet_type &
+                           RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                               rxq->time_high =
+                                  rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+                               mb->timesync = rxq->queue_id;
+                               pkt_flags |= PKT_RX_IEEE1588_PTP;
+                       }
+
                        mb->ol_flags |= pkt_flags;
                }
 
@@ -1804,6 +1831,7 @@ ice_recv_scattered_pkts(void *rx_queue,
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
+       struct ice_adapter *ad = rxq->vsi->adapter;
 
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
@@ -1926,6 +1954,14 @@ ice_recv_scattered_pkts(void *rx_queue,
                        }
                }
 
+               if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+                   == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                       rxq->time_high =
+                          rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+                       first_seg->timesync = rxq->queue_id;
+                       pkt_flags |= PKT_RX_IEEE1588_PTP;
+               }
+
                first_seg->ol_flags |= pkt_flags;
                /* Prefetch data of first segment, if configured to do so. */
                rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2124,7 +2160,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
@@ -2133,7 +2168,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
        dev->data->nb_tx_queues = 0;
 }
@@ -2180,6 +2214,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
        txq->queue_id = ICE_FDIR_QUEUE_ID;
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2238,6 +2273,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
        rxq->queue_id = ICE_FDIR_QUEUE_ID;
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2284,6 +2320,7 @@ ice_recv_pkts(void *rx_queue,
        struct ice_vsi *vsi = rxq->vsi;
        struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
        uint64_t ts_ns;
+       struct ice_adapter *ad = rxq->vsi->adapter;
 
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
@@ -2347,6 +2384,14 @@ ice_recv_pkts(void *rx_queue,
                        }
                }
 
+               if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+                   RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+                       rxq->time_high =
+                          rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+                       rxm->timesync = rxq->queue_id;
+                       pkt_flags |= PKT_RX_IEEE1588_PTP;
+               }
+
                rxm->ol_flags |= pkt_flags;
                /* copy old mbuf to rx_pkts */
                rx_pkts[nb_rx++] = rxm;
@@ -2558,7 +2603,8 @@ ice_calc_context_desc(uint64_t flags)
        static uint64_t mask = PKT_TX_TCP_SEG |
                PKT_TX_QINQ |
                PKT_TX_OUTER_IP_CKSUM |
-               PKT_TX_TUNNEL_MASK;
+               PKT_TX_TUNNEL_MASK |
+               PKT_TX_IEEE1588_TMST;
 
        return (flags & mask) ? 1 : 0;
 }
@@ -2726,6 +2772,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        if (ol_flags & PKT_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
                                        ice_set_tso_ctx(tx_pkt, tx_offload);
+                       else if (ol_flags & PKT_TX_IEEE1588_TMST)
+                               cd_type_cmd_tso_mss |=
+                                       ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+                                       ICE_TXD_CTX_QW1_CMD_S);
 
                        ctx_txd->tunneling_params =
                                rte_cpu_to_le_32(cd_tunneling_params);
@@ -3127,6 +3177,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
                ad->rx_use_avx512 = false;
                ad->rx_use_avx2 = false;
                rx_check_ret = ice_rx_vec_dev_check(dev);
+               if (ad->ptp_ena)
+                       rx_check_ret = -1;
                if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
                    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
                        ad->rx_vec_allowed = true;