#ifndef __FSL_FMAN_H
#define __FSL_FMAN_H
+#include <rte_compat.h>
+
#ifdef __cplusplus
extern "C" {
#endif
} __rte_packed;
/* Set MAC address for a particular interface */
+__rte_internal
int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
/* Remove a MAC address for a particular interface */
+__rte_internal
void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
/* Get the FMAN statistics */
+__rte_internal
void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
/* Reset the FMAN statistics */
+__rte_internal
void fman_if_stats_reset(struct fman_if *p);
/* Get all of the FMAN statistics */
+__rte_internal
void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
/* Set ignore pause option for a specific interface */
void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
/* Enable/disable Rx promiscuous mode on specified interface */
+__rte_internal
void fman_if_promiscuous_enable(struct fman_if *p);
+__rte_internal
void fman_if_promiscuous_disable(struct fman_if *p);
/* Enable/disable Rx on specific interfaces */
+__rte_internal
void fman_if_enable_rx(struct fman_if *p);
+__rte_internal
void fman_if_disable_rx(struct fman_if *p);
/* Enable/disable loopback on specific interfaces */
+__rte_internal
void fman_if_loopback_enable(struct fman_if *p);
+__rte_internal
void fman_if_loopback_disable(struct fman_if *p);
/* Set buffer pool on specific interface */
+__rte_internal
void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
size_t bufsize);
/* Get Flow Control threshold parameters on specific interface */
+__rte_internal
int fman_if_get_fc_threshold(struct fman_if *fm_if);
/* Enable and Set Flow Control threshold parameters on specific interface */
+__rte_internal
int fman_if_set_fc_threshold(struct fman_if *fm_if,
u32 high_water, u32 low_water, u32 bpid);
/* Get Flow Control pause quanta on specific interface */
+__rte_internal
int fman_if_get_fc_quanta(struct fman_if *fm_if);
/* Set Flow Control pause quanta on specific interface */
+__rte_internal
int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
/* Set default error fqid on specific interface */
int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
/* Set IC transfer params */
+__rte_internal
int fman_if_set_ic_params(struct fman_if *fm_if,
const struct fman_if_ic_params *icp);
/* Get interface fd->offset value */
+__rte_internal
int fman_if_get_fdoff(struct fman_if *fm_if);
/* Set interface fd->offset value */
+__rte_internal
void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
/* Get interface SG enable status value */
+__rte_internal
int fman_if_get_sg_enable(struct fman_if *fm_if);
/* Set interface SG support mode */
+__rte_internal
void fman_if_set_sg(struct fman_if *fm_if, int enable);
/* Get interface Max Frame length (MTU) */
uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
/* Set interface Max Frame length (MTU) */
+__rte_internal
void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
/* Set interface next invoked action for dequeue operation */
void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
/* discard error packets on rx */
+__rte_internal
void fman_if_discard_rx_errors(struct fman_if *fm_if);
+__rte_internal
void fman_if_set_mcast_filter_table(struct fman_if *p);
+__rte_internal
void fman_if_reset_mcast_filter_table(struct fman_if *p);
int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
#define QMAN_CGR_MODE_FRAME 0x00000001
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+__rte_internal
void qman_set_fq_lookup_table(void **table);
#endif
*/
int qman_get_portal_index(void);
+__rte_internal
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs);
* processed via qman_poll_***() functions). Returns zero for success, or
* -EINVAL if the current CPU is sharing a portal hosted on another CPU.
*/
+__rte_internal
int qman_irqsource_add(u32 bits);
/**
* takes portal (fq specific) as input rather than using the thread affined
* portal.
*/
+__rte_internal
int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
/**
* instead be processed via qman_poll_***() functions. Returns zero for success,
* or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
*/
+__rte_internal
int qman_irqsource_remove(u32 bits);
/**
* takes portal (fq specific) as input rather than using the thread affined
* portal.
*/
+__rte_internal
int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
/**
*/
u16 qman_affine_channel(int cpu);
+__rte_internal
unsigned int qman_portal_poll_rx(unsigned int poll_limit,
void **bufs, struct qman_portal *q);
*
* This function will issue a volatile dequeue command to the QMAN.
*/
+__rte_internal
int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
/**
* is issued. It will keep returning NULL until there is no packet available on
* the DQRR.
*/
+__rte_internal
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
/**
* This will consume the DQRR enrey and make it available for next volatile
* dequeue.
*/
+__rte_internal
void qman_dqrr_consume(struct qman_fq *fq,
struct qm_dqrr_entry *dq);
* this function will return -EINVAL, otherwise the return value is >=0 and
* represents the number of DQRR entries processed.
*/
+__rte_internal
int qman_poll_dqrr(unsigned int limit);
/**
* (SDQCR). The requested pools are limited to those the portal has dequeue
* access to.
*/
+__rte_internal
void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
/**
* function must be called from the same CPU as that which processed the DQRR
* entry in the first place.
*/
+__rte_internal
void qman_dca_index(u8 index, int park_request);
/**
* a frame queue object based on that, rather than assuming/requiring that it be
* Out of Service.
*/
+__rte_internal
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
/**
* qman_fq_fqid - Queries the frame queue ID of a FQ object
* @fq: the frame queue object to query
*/
+__rte_internal
u32 qman_fq_fqid(struct qman_fq *fq);
/**
* This captures the state, as seen by the driver, at the time the function
* executes.
*/
+__rte_internal
void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
/**
* context_a.address fields and will leave the stashing fields provided by the
* user alone, otherwise it will zero out the context_a.stashing fields.
*/
+__rte_internal
int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
/**
* caller should be prepared to accept the callback as the function is called,
* not only once it has returned.
*/
+__rte_internal
int qman_retire_fq(struct qman_fq *fq, u32 *flags);
/**
* The frame queue must be retired and empty, and if any order restoration list
* was released as ERNs at the time of retirement, they must all be consumed.
*/
+__rte_internal
int qman_oos_fq(struct qman_fq *fq);
/**
* @fq: the frame queue object to be queried
* @np: storage for the queried FQD fields
*/
+__rte_internal
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
/**
* @fq: the frame queue object to be queried
* @frm_cnt: number of frames in the queue
*/
+__rte_internal
int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
/**
* callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
* "flags" retrieved from qman_fq_state().
*/
+__rte_internal
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
/**
* of an already busy hardware resource by throttling many of the to-be-dropped
* enqueues "at the source".
*/
+__rte_internal
int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+__rte_internal
int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
int frames_to_send);
* This API is similar to qman_enqueue_multi(), but it takes fd which needs
* to be processed by different frame queues.
*/
+__rte_internal
int
qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
u32 *flags, int frames_to_send);
* @fqid: the base FQID of the range to deallocate
* @count: the number of FQIDs in the range
*/
+__rte_internal
int qman_reserve_fqid_range(u32 fqid, unsigned int count);
static inline int qman_reserve_fqid(u32 fqid)
{
* than requested (though alignment will be as requested). If @partial is zero,
* the return value will either be 'count' or negative.
*/
+__rte_internal
int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
static inline int qman_alloc_pool(u32 *result)
{
* any unspecified parameters) will be used rather than a modify hw hardware
* (which only modifies the specified parameters).
*/
+__rte_internal
int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts);
* is executed. This must be excuted on the same affine portal on which it was
* created.
*/
+__rte_internal
int qman_delete_cgr(struct qman_cgr *cgr);
/**
* unspecified parameters) will be used rather than a modify hw hardware (which
* only modifies the specified parameters).
*/
+__rte_internal
int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts);
* than requested (though alignment will be as requested). If @partial is zero,
* the return value will either be 'count' or negative.
*/
+__rte_internal
int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
static inline int qman_alloc_cgrid(u32 *result)
{
* @id: the base CGR ID of the range to deallocate
* @count: the number of CGR IDs in the range
*/
+__rte_internal
void qman_release_cgrid_range(u32 id, unsigned int count);
static inline void qman_release_cgrid(u32 id)
{