int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct qede_dev *qdev = dev->data->dev_private;
static void qede_init_fp(struct qede_dev *qdev)
{
struct qede_fastpath *fp;
- uint8_t i, rss_id, tc;
- int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
+ uint8_t i;
+ int fp_rx = qdev->fp_num_rx;
memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
sizeof(*qdev->fp_array)));
}
}
-int qede_alloc_fp_array(struct qede_dev *qdev)
+static int qede_alloc_fp_array(struct qede_dev *qdev)
{
- struct qede_fastpath *fp;
struct ecore_dev *edev = &qdev->edev;
- int i;
qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
sizeof(*qdev->fp_array),
}
static inline void
-qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
{
uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
/* Enable LRO in split mode */
sge_tpa_params->tpa_ipv4_en_flg = enable;
sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
/* set if tpa enable changes */
sge_tpa_params->update_tpa_en_flg = 1;
/* set if tpa parameters should be handled */
sge_tpa_params->update_tpa_param_flg = enable;
sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
sge_tpa_params->tpa_pkt_split_flg = 1;
sge_tpa_params->tpa_hdr_data_split_flg = 0;
sge_tpa_params->tpa_gro_consistent_flg = 0;
sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
}
-static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+static int qede_start_queues(struct rte_eth_dev *eth_dev,
+ __rte_unused bool clear_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
dma_addr_t p_phys_table;
int txq_index;
uint16_t page_cnt;
- int vlan_removal_en = 1;
int rc, tc, i;
for_each_queue(i) {
return 0;
}
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
static inline uint8_t
qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
{
}
static inline void
-qede_reuse_page(struct qede_dev *qdev,
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
}
}
-static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
-{
- uint16_t val;
-
- /* Lookup table */
- static const uint32_t
- ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
- [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
- [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
- [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
- [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
- };
-
- /* Bits (0..3) provides L3/L4 protocol type */
- val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
- PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
- (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
- PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
-
- if (val < QEDE_PKT_TYPE_MAX)
- return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
- else
- return RTE_PTYPE_UNKNOWN;
-}
-
static inline void
-qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
- struct qede_rx_queue *rxq,
- struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ uint8_t agg_index, uint16_t len)
{
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct qede_agg_info *tpa_info;
- struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
- struct rte_mbuf *curr_frag;
- uint8_t list_count = 0;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
uint16_t cons_idx;
- uint8_t i;
-
- PMD_RX_LOG(INFO, rxq, "TPA cont[%02x] - len_list [%04x %04x]\n",
- cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
- rte_le_to_cpu_16(cqe->len_list[1]));
-
- tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- temp_frag = tpa_info->mbuf;
- assert(temp_frag);
- for (i = 0; cqe->len_list[i]; i++) {
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
qede_rx_bd_ring_consume(rxq);
- curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
- temp_frag->next = curr_frag;
- temp_frag = curr_frag;
- list_count++;
- }
-
- /* Allocate RX mbuf on the RX BD ring for those many consumed */
- for (i = 0 ; i < list_count ; i++) {
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
- DP_ERR(edev, "Failed to allocate mbuf for LRO cont\n");
- tpa_info->state = QEDE_AGG_STATE_ERROR;
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
}
}
}
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+}
+
static inline void
qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
struct qede_rx_queue *rxq,
struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct qede_agg_info *tpa_info;
- struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
- struct rte_mbuf *curr_frag;
- struct rte_mbuf *rx_mb;
- uint8_t list_count = 0;
- uint16_t cons_idx;
- uint8_t i;
-
- PMD_RX_LOG(INFO, rxq, "TPA End[%02x] - len_list [%04x %04x]\n",
- cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
- rte_le_to_cpu_16(cqe->len_list[1]));
-
- tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- temp_frag = tpa_info->mbuf;
- assert(temp_frag);
-
- for (i = 0; cqe->len_list[i]; i++) {
- cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
- qede_rx_bd_ring_consume(rxq);
- curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
- temp_frag->next = curr_frag;
- temp_frag = curr_frag;
- list_count++;
- }
-
- /* Allocate RX mbuf on the RX BD ring for those many consumed */
- for (i = 0 ; i < list_count ; i++) {
- if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
- DP_ERR(edev, "Failed to allocate mbuf for lro end\n");
- tpa_info->state = QEDE_AGG_STATE_ERROR;
- }
- }
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
/* Update total length and frags based on end TPA */
- rx_mb = rxq->tpa_info[cqe->tpa_agg_index].mbuf;
- /* TBD: Add sanity checks here */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
rx_mb->nb_segs = cqe->num_of_bds;
rx_mb->pkt_len = cqe->total_packet_len;
- tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
}
static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
{
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = &qdev->edev;
register struct rte_mbuf *seg1 = NULL;
register struct rte_mbuf *seg2 = NULL;
uint16_t sw_rx_index;
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
register struct rte_mbuf *rx_mb = NULL;
register struct rte_mbuf *seg1 = NULL;
enum eth_rx_cqe_type cqe_type;
- uint16_t pkt_len; /* Sum of all BD segments */
+ uint16_t pkt_len = 0; /* Sum of all BD segments */
uint16_t len; /* Length of first BD */
uint8_t num_segs = 1;
uint16_t preload_idx;
- uint8_t csum_flag;
uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ uint8_t bitfield_val;
enum rss_hash_type htype;
+#endif
uint8_t tunn_parse_flag;
uint8_t j;
struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
uint32_t packet_type;
uint16_t vlan_tci;
bool tpa_start_flg;
- uint8_t bitfield_val;
uint8_t offset, tpa_agg_idx, flags;
- struct qede_agg_info *tpa_info;
+ struct qede_agg_info *tpa_info = NULL;
+ uint32_t rss_hash;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
packet_type = RTE_PTYPE_UNKNOWN;
vlan_tci = 0;
tpa_start_flg = false;
+ rss_hash = 0;
/* Get the CQE from the completion ring */
cqe =
cqe_start_tpa = &cqe->fast_path_tpa_start;
tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
PMD_RX_LOG(INFO, rxq,
- "TPA start[%u] - len %04x [header %02x]"
- " [bd_list[0] %04x], [seg_len %04x]\n",
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
cqe_start_tpa->tpa_agg_index,
rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
cqe_start_tpa->header_len,
case ETH_RX_CQE_TYPE_TPA_CONT:
qede_rx_process_tpa_cont_cqe(qdev, rxq,
&cqe->fast_path_tpa_cont);
- continue;
+ goto next_cqe;
case ETH_RX_CQE_TYPE_TPA_END:
qede_rx_process_tpa_end_cqe(qdev, rxq,
&cqe->fast_path_tpa_end);
tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
- rx_mb = rxq->tpa_info[tpa_agg_idx].mbuf;
- PMD_RX_LOG(INFO, rxq, "TPA end reason %d\n",
- cqe->fast_path_tpa_end.end_reason);
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
goto tpa_end;
case ETH_RX_CQE_TYPE_SLOW_PATH:
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
/* Handle regular CQE or TPA start CQE */
if (!tpa_start_flg) {
parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
- bitfield_val = fp_cqe->bitfields;
offset = fp_cqe->placement_offset;
len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+#endif
} else {
parse_flag =
rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
- bitfield_val = cqe_start_tpa->bitfields;
offset = cqe_start_tpa->placement_offset;
/* seg_len = len_on_first_bd */
len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
- tpa_info->start_cqe_bd_len = len +
- cqe_start_tpa->header_len;
- tpa_info->mbuf = rx_mb;
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
}
if (qede_tunn_exist(parse_flag)) {
PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
}
if (CQE_HAS_VLAN(parse_flag)) {
- vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
ol_flags |= PKT_RX_VLAN_PKT;
+ if (qdev->vlan_strip_flg) {
+ ol_flags |= PKT_RX_VLAN_STRIPPED;
+ rx_mb->vlan_tci = vlan_tci;
+ }
}
-
if (CQE_HAS_OUTER_VLAN(parse_flag)) {
- vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
ol_flags |= PKT_RX_QINQ_PKT;
+ if (qdev->vlan_strip_flg) {
+ rx_mb->vlan_tci = vlan_tci;
+ ol_flags |= PKT_RX_QINQ_STRIPPED;
+ }
rx_mb->vlan_tci_outer = 0;
}
-
/* RSS Hash */
- htype = (uint8_t)GET_FIELD(bitfield_val,
- ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
- if (qdev->rss_enable && htype) {
+ if (qdev->rss_enable) {
ol_flags |= PKT_RX_RSS_HASH;
- rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
- PMD_RX_LOG(INFO, rxq, "Hash result 0x%x\n",
- rx_mb->hash.rss);
+ rx_mb->hash.rss = rss_hash;
}
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
rx_mb->port = rxq->port_id;
rx_mb->ol_flags = ol_flags;
rx_mb->data_len = len;
- rx_mb->vlan_tci = vlan_tci;
rx_mb->packet_type = packet_type;
- PMD_RX_LOG(INFO, rxq, "pkt_type %04x len %04x flags %04lx\n",
- packet_type, len, (unsigned long)ol_flags);
+ PMD_RX_LOG(INFO, rxq,
+ "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
+ " ol_flags 0x%04lx\n",
+ packet_type, len, htype, rx_mb->hash.rss,
+ (unsigned long)ol_flags);
if (!tpa_start_flg) {
rx_mb->nb_segs = fp_cqe->bd_num;
rx_mb->pkt_len = pkt_len;
+ } else {
+ /* store ref to the updated mbuf */
+ tpa_info->tpa_head = rx_mb;
+ tpa_info->tpa_tail = tpa_info->tpa_head;
}
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
tpa_end:
struct rte_mbuf *mbuf;
uint16_t nb_segs;
uint16_t idx;
- uint8_t nbds;
idx = TX_CONS(txq);
mbuf = txq->sw_tx_ring[idx].mbuf;
}
static inline void
-qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
{
uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
uint16_t sw_tx_cons;
+#endif
rte_compiler_barrier();
hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
abs(hw_bd_cons - sw_tx_cons));
+#endif
while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
qede_free_tx_pkt(txq);
}
/* TX prepare to check packets meets TX conditions */
uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+#endif
uint64_t ol_flags;
struct rte_mbuf *m;
uint16_t i;
}
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
if (unlikely(i != nb_pkts))
PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
nb_pkts - i);
+#endif
return i;
}
bd3 = NULL;
hdr_size = 0;
- mbuf = *tx_pkts;
+ mbuf = *tx_pkts++;
assert(mbuf);
/* Check minimum TX BDS availability against available BDs */
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
- *tx_pkts++;
txq->sw_tx_ring[idx].mbuf = mbuf;
/* BD1 */
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct qede_fastpath *fp;
- uint8_t i, rss_id, txq_index, tc;
+ uint8_t i, txq_index, tc;
int rxq = 0, txq = 0;
for_each_queue(i) {
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct qed_link_output link_output;
- struct qede_fastpath *fp;
int rc;
DP_INFO(edev, "Device state is %d\n", qdev->state);
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
- /* Reset ring */
- if (qede_reset_fp_rings(qdev))
- return -ENOMEM;
-
/* Start/resume traffic */
qdev->ops->fastpath_start(edev);
}
}
}
+ qede_reset_fp_rings(qdev);
return 0;
}