#include <rte_event_eth_rx_adapter.h>
#include <rte_pmd_dpaa2.h>
+#include <rte_fslmc.h>
#include <dpaa2_hw_pvt.h>
#include "dpaa2_tm.h"
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
#define MAX_DPNI 8
+#define DPAA2_MAX_CHANNELS 16
#define DPAA2_RX_DEFAULT_NBDESC 512
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
/* Tx confirmation enabled */
-#define DPAA2_TX_CONF_ENABLE 0x08
+#define DPAA2_TX_CONF_ENABLE 0x06
+
+/* HW loopback the egress traffic to self ingress*/
+#define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
+
+#define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
+
+#define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
+
+#define DPAA2_TX_LOOPBACK_MODE \
+ (DPAA2_TX_MAC_LOOPBACK_MODE | \
+ DPAA2_TX_SERDES_LOOPBACK_MODE | \
+ DPAA2_TX_DPNI_LOOPBACK_MODE)
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
- void *tx_conf_vq[MAX_TX_QUEUES];
+ void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
+ uint8_t num_tx_tc;
uint16_t qos_entries;
uint16_t fs_entries;
uint8_t dist_queues;
+ uint8_t num_channels;
uint8_t en_ordered;
uint8_t en_loose_ordered;
uint8_t max_cgs;
struct dpaa2_queue *next_tx_conf_queue;
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ rte_spinlock_t lpbk_qp_lock;
+ uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
-int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ struct fsl_mc_io *dpni, void *blist);
__rte_internal
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
-uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+__rte_internal
+uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts);
+
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused;
uint32_t flags __rte_unused);
int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
+
+int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq);
+
#endif /* _DPAA2_ETHDEV_H */