#ifndef __OTX2_ETHDEV_H__
#define __OTX2_ETHDEV_H__
+#include <math.h>
#include <stdint.h>
#include <rte_common.h>
(NIX_MAX_FRS - NIX_L2_OVERHEAD)
#define NIX_MAX_SQB 512
-#define NIX_MIN_SQB 32
+#define NIX_DEF_SQB 16
+#define NIX_MIN_SQB 8
#define NIX_SQB_LIST_SPACE 2
#define NIX_RSS_RETA_SIZE_MAX 256
/* Group 0 will be used for RSS, 1 -7 will be used for rte_flow RSS action*/
#define NIX_CQ_ALIGN 512
#define NIX_SQB_LOWER_THRESH 90
#define LMT_SLOT_MASK 0x7f
+#define NIX_RX_DEFAULT_RING_SZ 4096
/* If PTP is enabled additional SEND MEM DESC is required which
* takes 2 words, hence max 7 iova address are possible
#define NIX_TX_NB_SEG_MAX 9
#endif
-/* Apply BP when CQ is 75% full */
-#define NIX_CQ_BP_LEVEL (25 * 256 / 100)
+#define NIX_TX_MSEG_SG_DWORDS \
+ ((RTE_ALIGN_MUL_CEIL(NIX_TX_NB_SEG_MAX, 3) / 3) \
+ + NIX_TX_NB_SEG_MAX)
+
+/* Apply BP/DROP when CQ is 95% full */
+#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
#define OP_ERR BIT_ULL(CQ_OP_STAT_OP_ERR)
#define CQ_ERR BIT_ULL(CQ_OP_STAT_CQ_ERR)
+#define CQ_CQE_THRESH_DEFAULT 0x1ULL /* IRQ triggered when
+ * NIX_LF_CINTX_CNT[QCOUNT]
+ * crosses this value
+ */
+#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
+#define CQ_TIMER_THRESH_MAX 255
+
#define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
ETH_RSS_TCP | ETH_RSS_SCTP | \
ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD)
uint16_t qints;
uint8_t configured;
uint8_t configured_qints;
+ uint8_t configured_cints;
uint8_t configured_nb_rx_qs;
uint8_t configured_nb_tx_qs;
uint16_t nix_msixoff;
uint64_t rx_offload_capa;
uint64_t tx_offload_capa;
struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
+ struct otx2_qint cints_mem[RTE_MAX_QUEUES_PER_PORT];
uint16_t txschq[NIX_TXSCH_LVL_CNT];
uint16_t txschq_contig[NIX_TXSCH_LVL_CNT];
uint16_t txschq_index[NIX_TXSCH_LVL_CNT];
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+ double clk_freq_mult;
+ uint64_t clk_delta;
} __rte_cache_aligned;
struct otx2_eth_txq {
enum nix_q_size_e qsize;
struct rte_eth_dev *eth_dev;
struct otx2_eth_qconf qconf;
+ uint16_t cq_drop;
} __rte_cache_aligned;
static inline struct otx2_eth_dev *
int otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx);
uint64_t otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id);
+/* MTU */
+int otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev);
+
/* Link */
void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set);
int otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
void otx2_eth_dev_link_status_update(struct otx2_dev *dev,
struct cgx_link_user_info *link);
+int otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev);
+int otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev);
/* IRQ */
int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
+int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev);
void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev);
void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev);
+void oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
+int otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
/* Debug */
int otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data);
int otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev,
struct timespec *ts);
int otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en);
+int otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *time);
+int otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev);
#endif /* __OTX2_ETHDEV_H__ */