X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_l2_api.h;h=5a7db7600a07a6cad0904106581527ee97b10849;hb=0863dbe396976530eb6e8849878a88c7e01774f3;hp=b41dd7fefe8834e0ec8b602bd8af401b462fb89d;hpb=5cdd769a26ecf41d6cd8d8b29bc49aab288effcb;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h index b41dd7fefe..5a7db7600a 100644 --- a/drivers/net/qede/base/ecore_l2_api.h +++ b/drivers/net/qede/base/ecore_l2_api.h @@ -14,19 +14,42 @@ #ifndef __EXTRACT__LINUX__ enum ecore_rss_caps { - ECORE_RSS_IPV4 = 0x1, - ECORE_RSS_IPV6 = 0x2, - ECORE_RSS_IPV4_TCP = 0x4, - ECORE_RSS_IPV6_TCP = 0x8, - ECORE_RSS_IPV4_UDP = 0x10, - ECORE_RSS_IPV6_UDP = 0x20, + ECORE_RSS_IPV4 = 0x1, + ECORE_RSS_IPV6 = 0x2, + ECORE_RSS_IPV4_TCP = 0x4, + ECORE_RSS_IPV6_TCP = 0x8, + ECORE_RSS_IPV4_UDP = 0x10, + ECORE_RSS_IPV6_UDP = 0x20, }; /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ #define ECORE_RSS_IND_TABLE_SIZE 128 -#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ +#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ #endif +struct ecore_queue_start_common_params { + /* Should always be relative to entity sending this. */ + u8 vport_id; + u16 queue_id; + + /* Relative, but relevant only for PFs */ + u8 stats_id; + + /* These are always absolute */ + u16 sb; + u8 sb_idx; +}; + +struct ecore_rxq_start_ret_params { + void OSAL_IOMEM *p_prod; + void *p_handle; +}; + +struct ecore_txq_start_ret_params { + void OSAL_IOMEM *p_doorbell; + void *p_handle; +}; + struct ecore_rss_params { u8 update_rss_config; u8 rss_enable; @@ -35,8 +58,10 @@ struct ecore_rss_params { u8 update_rss_ind_table; u8 update_rss_key; u8 rss_caps; - u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ - u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; u32 rss_key[ECORE_RSS_KEY_SIZE]; }; @@ -63,8 +88,8 @@ enum ecore_filter_opcode { ECORE_FILTER_ADD, ECORE_FILTER_REMOVE, ECORE_FILTER_MOVE, - ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ - ECORE_FILTER_FLUSH, /* Removes all filters */ + ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + ECORE_FILTER_FLUSH, /* Removes all filters */ }; enum ecore_filter_ucast_type { @@ -77,6 +102,7 @@ enum ecore_filter_ucast_type { ECORE_FILTER_INNER_MAC_VNI_PAIR, ECORE_FILTER_MAC_VNI_PAIR, ECORE_FILTER_VNI, + ECORE_FILTER_UNUSED, /* @DPDK */ }; struct ecore_filter_ucast { @@ -97,7 +123,7 @@ struct ecore_filter_mcast { enum ecore_filter_opcode opcode; u8 vport_to_add_to; u8 vport_to_remove_from; - u8 num_mc_addrs; + u8 num_mc_addrs; #define ECORE_MAX_MC_ADDRS 64 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; }; @@ -137,61 +163,47 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev, /* Set "accept" filters */ enum _ecore_status_t -ecore_filter_accept_cmd(struct ecore_dev *p_dev, - u8 vport, - struct ecore_filter_accept_flags accept_flags, - u8 update_accept_any_vlan, - u8 accept_any_vlan, - enum spq_mode comp_mode, - struct ecore_spq_comp_cb *p_comp_data); +ecore_filter_accept_cmd( + struct ecore_dev *p_dev, + u8 vport, + struct ecore_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); /** - * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod + * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod * * This ramrod initializes an RX Queue for a VPort. An Assert is generated if * the VPort ID is not currently initialized. * * @param p_hwfn * @param opaque_fid - * @param rx_queue_id RX Queue ID: Zero based, per VPort, allocated - * by assignment (=rssId) - * @param vport_id VPort ID - * @param u8 stats_id VPort ID which the queue stats - * will be added to - * @param sb Status Block of the Function Event Ring - * @param sb_index Index into the status block of the - * Function Event Ring + * @p_params Inputs; Relative for PF [SB being an exception] * @param bd_max_bytes Maximum bytes that can be placed on a BD * @param bd_chain_phys_addr Physical address of BDs for receive. * @param cqe_pbl_addr Physical address of the CQE PBL Table. * @param cqe_pbl_size Size of the CQE PBL Table - * @param pp_prod Pointer to place producer's - * address for the Rx Q (May be - * NULL). + * @param p_ret_params Pointed struct to be filled with outputs. * * @return enum _ecore_status_t */ -enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, - u16 opaque_fid, - u8 rx_queue_id, - u8 vport_id, - u8 stats_id, - u16 sb, - u8 sb_index, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, - void OSAL_IOMEM * *pp_prod); +enum _ecore_status_t +ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct ecore_rxq_start_ret_params *p_ret_params); /** - * @brief ecore_sp_eth_rx_queue_stop - - * - * This ramrod closes an RX queue. It sends RX queue stop ramrod - * + CFC delete ramrod + * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue * * @param p_hwfn - * @param rx_queue_id RX Queue ID + * @param p_rxq Handler of queue to close * @param eq_completion_only If True completion will be on * EQe, if False completion will be * on EQe if p_hwfn opaque @@ -202,60 +214,48 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, * @return enum _ecore_status_t */ enum _ecore_status_t -ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, bool cqe_completion); +ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, + bool cqe_completion); /** - * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod + * @brief - TX Queue Start Ramrod * * This ramrod initializes a TX Queue for a VPort. An Assert is generated if * the VPort is not currently initialized. * * @param p_hwfn * @param opaque_fid - * @param tx_queue_id TX Queue ID - * @param vport_id VPort ID - * @param stats_id VPort ID which the queue stats - * will be added to - * @param sb Status Block of the Function Event Ring - * @param sb_index Index into the status block of the Function - * Event Ring + * @p_params + * @param tc traffic class to use with this L2 txq * @param pbl_addr address of the pbl array * @param pbl_size number of entries in pbl - * @param pp_doorbell Pointer to place doorbell pointer (May be NULL). - * This address should be used with the - * DIRECT_REG_WR macro. + * @param p_ret_params Pointer to fill the return parameters in. * * @return enum _ecore_status_t */ -enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, - u16 opaque_fid, - u16 tx_queue_id, - u8 vport_id, - u8 stats_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, - void OSAL_IOMEM * * - pp_doorbell); +enum _ecore_status_t +ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, + struct ecore_txq_start_ret_params *p_ret_params); /** - * @brief ecore_sp_eth_tx_queue_stop - - * - * This ramrod closes a TX queue. It sends TX queue stop ramrod - * + CFC delete ramrod + * @brief ecore_eth_tx_queue_stop - closes a Tx queue * * @param p_hwfn - * @param tx_queue_id TX Queue ID + * @param p_txq - handle to Tx queue needed to be closed * * @return enum _ecore_status_t */ -enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, - u16 tx_queue_id); +enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_txq); -enum ecore_tpa_mode { +enum ecore_tpa_mode { ECORE_TPA_MODE_NONE, ECORE_TPA_MODE_RSC, ECORE_TPA_MODE_GRO, @@ -275,6 +275,17 @@ struct ecore_sp_vport_start_params { u8 vport_id; /* VPORT ID */ u16 mtu; /* VPORT MTU */ bool zero_placement_offset; + bool check_mac; + bool check_ethtype; + + /* Strict behavior on transmission errors */ + bool b_err_illegal_vlan_mode; + bool b_err_illegal_inband_mode; + bool b_err_vlan_insert_with_inband; + bool b_err_small_pkt; + bool b_err_big_pkt; + bool b_err_anti_spoof; + bool b_err_ctrl_frame; }; /** @@ -293,30 +304,34 @@ ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, struct ecore_sp_vport_start_params *p_params); struct ecore_sp_vport_update_params { - u16 opaque_fid; - u8 vport_id; - u8 update_vport_active_rx_flg; - u8 vport_active_rx_flg; - u8 update_vport_active_tx_flg; - u8 vport_active_tx_flg; - u8 update_inner_vlan_removal_flg; - u8 inner_vlan_removal_flg; - u8 silent_vlan_removal_flg; - u8 update_default_vlan_enable_flg; - u8 default_vlan_enable_flg; - u8 update_default_vlan_flg; - u16 default_vlan; - u8 update_tx_switching_flg; - u8 tx_switching_flg; - u8 update_approx_mcast_flg; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - unsigned long bins[8]; - struct ecore_rss_params *rss_params; + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + unsigned long bins[8]; + struct ecore_rss_params *rss_params; struct ecore_filter_accept_flags accept_flags; struct ecore_sge_tpa_params *sge_tpa_params; + /* MTU change - notice this requires the vport to be disabled. + * If non-zero, value would be used. + */ + u16 mtu; }; /** @@ -351,7 +366,8 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, * @return enum _ecore_status_t */ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, - u16 opaque_fid, u8 vport_id); + u16 opaque_fid, + u8 vport_id); enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, @@ -369,19 +385,19 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, * @note Final phase API. * * @param p_hwfn - * @param rx_queue_id RX Queue ID - * @param num_rxqs Allow to update multiple rx - * queues, from rx_queue_id to - * (rx_queue_id + num_rxqs) + * @param pp_rxq_handlers An array of queue handlers to be updated. + * @param num_rxqs number of queues to update. * @param complete_cqe_flg Post completion to the CQE Ring if set * @param complete_event_flg Post completion to the Event Ring if set + * @param comp_mode + * @param p_comp_data * * @return enum _ecore_status_t */ enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, - u16 rx_queue_id, + void **pp_rxq_handlers, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg,