static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
unsigned int i = 0, j = 0, qid;
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for_each_rss(qid) {
+ for (qid = 0; qid < qdev->num_rx_queues; qid++) {
OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
offsetof(struct qede_rx_queue, rcv_pkts), 0,
sizeof(uint64_t));
i = 0;
- for_each_tss(qid) {
+ for (qid = 0; qid < qdev->num_tx_queues; qid++) {
txq = qdev->fp_array[qid].txq;
OSAL_MEMSET((uint64_t *)(uintptr_t)
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
id = i / RTE_RETA_GROUP_SIZE;
pos = i % RTE_RETA_GROUP_SIZE;
- q = i % QEDE_RSS_COUNT(qdev);
+ q = i % QEDE_RSS_COUNT(eth_dev);
reta_conf[id].reta[pos] = q;
}
if (qede_rss_reta_update(eth_dev, &reta_conf[0],
PMD_INIT_FUNC_TRACE(edev);
- /* Check requirements for 100G mode */
- if (ECORE_IS_CMT(edev)) {
- if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
- DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
- return -EINVAL;
- }
-
- if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
- return -EINVAL;
- }
- }
-
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
*/
return -ENOTSUP;
qede_dealloc_fp_resc(eth_dev);
- qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
- qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
+
if (qede_alloc_fp_resc(qdev))
return -ENOMEM;
return ret;
DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
- QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
+ QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
+
+ if (ECORE_IS_CMT(edev))
+ DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
+ qdev->num_rx_queues, qdev->num_tx_queues);
+
return 0;
}
else
dev_info->max_rx_queues = (uint16_t)RTE_MIN(
QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ /* Since CMT mode internally doubles the number of queues */
+ if (ECORE_IS_CMT(edev))
+ dev_info->max_rx_queues = dev_info->max_rx_queues / 2;
+
dev_info->max_tx_queues = dev_info->max_rx_queues;
dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
- (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+ if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
+ txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
DP_VERBOSE(edev, ECORE_MSG_DEBUG,
"Not all the queue stats will be displayed. Set"
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for_each_rss(qid) {
+ for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
eth_stats->q_ipackets[i] =
*(uint64_t *)(
((char *)(qdev->fp_array[qid].rxq)) +
break;
}
- for_each_tss(qid) {
+ for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
txq = qdev->fp_array[qid].txq;
eth_stats->q_opackets[j] =
*((uint64_t *)(uintptr_t)
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+
if (ECORE_IS_BB(&qdev->edev))
return RTE_DIM(qede_xstats_strings) +
RTE_DIM(qede_bb_xstats_strings) +
(RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
else
return RTE_DIM(qede_xstats_strings) +
RTE_DIM(qede_ah_xstats_strings) +
(RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ QEDE_RSS_COUNT(dev));
}
static int
}
}
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
}
}
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- for_each_rss(qid) {
- for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
- xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[qid].rxq)) +
- qede_rxq_xstats_strings[i].offset);
- xstats[stat_idx].id = stat_idx;
- stat_idx++;
- }
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)
+ (((char *)(qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
}
}
RTE_PTYPE_UNKNOWN
};
- if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+ if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+ eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
return ptypes;
return NULL;
vport_update_params.vport_id = 0;
/* pass the L2 handles instead of qids */
for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
- idx = i % QEDE_RSS_COUNT(qdev);
+ idx = i % QEDE_RSS_COUNT(eth_dev);
rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
}
vport_update_params.rss_params = &rss_params;
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_rss(i) {
+ for (i = 0; i < qdev->num_rx_queues; i++) {
fp = &qdev->fp_array[i];
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
/* Reassign back */
- dev->rx_pkt_burst = qede_recv_pkts;
- dev->tx_pkt_burst = qede_xmit_pkts;
-
+ if (ECORE_IS_CMT(edev)) {
+ dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ } else {
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+ }
return 0;
}
pci_addr.bus, pci_addr.devid, pci_addr.function,
eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = qede_recv_pkts;
- eth_dev->tx_pkt_burst = qede_xmit_pkts;
- eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
QEDE_PMD_DRV_VER_STR_SIZE);
+ if (ECORE_IS_CMT(edev)) {
+ eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ } else {
+ eth_dev->rx_pkt_burst = qede_recv_pkts;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ }
+
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
/* For CMT mode device do periodic polling for slowpath events.
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
(edev)->dev_info.num_tc)
#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
-#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
+#define QEDE_RSS_COUNT(dev) ((dev)->data->nb_rx_queues)
+#define QEDE_TSS_COUNT(dev) ((dev)->data->nb_tx_queues)
#define QEDE_DUPLEX_FULL 1
#define QEDE_DUPLEX_HALF 2
struct qed_dev_eth_info dev_info;
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
+ struct qede_fastpath_cmt *fp_array_cmt;
uint16_t mtu;
bool enable_tx_switching;
bool rss_enable;
return -EINVAL;
}
- if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
DP_ERR(edev, "invalid queue number %u\n",
fdir->action.rx_queue);
return -EINVAL;
struct rte_flow_error *error,
struct rte_flow *flow)
{
- struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
const struct rte_flow_action_queue *queue;
if (actions == NULL) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
queue = actions->conf;
- if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ if (queue->index >= QEDE_RSS_COUNT(dev)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
bufsz = rc;
- rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
- socket_id, mp, bufsz);
- if (!rxq)
- return -ENOMEM;
+ if (ECORE_IS_CMT(edev)) {
+ rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
+ socket_id, mp, bufsz);
+ if (!rxq)
+ return -ENOMEM;
+
+ qdev->fp_array[qid * 2].rxq = rxq;
+ rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
+ socket_id, mp, bufsz);
+ if (!rxq)
+ return -ENOMEM;
+
+ qdev->fp_array[qid * 2 + 1].rxq = rxq;
+ /* provide per engine fp struct as rx queue */
+ dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
+ } else {
+ rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
+ socket_id, mp, bufsz);
+ if (!rxq)
+ return -ENOMEM;
- dev->data->rx_queues[qid] = rxq;
- qdev->fp_array[qid].rxq = rxq;
+ dev->data->rx_queues[qid] = rxq;
+ qdev->fp_array[qid].rxq = rxq;
+ }
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
qid, nb_desc, rxq->rx_buf_size, socket_id);
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
+ struct qede_fastpath_cmt *fp_cmt;
struct qede_dev *qdev;
struct ecore_dev *edev;
qdev = rxq->qdev;
edev = QEDE_INIT_EDEV(qdev);
PMD_INIT_FUNC_TRACE(edev);
- _qede_rx_queue_release(qdev, edev, rxq);
+ if (ECORE_IS_CMT(edev)) {
+ fp_cmt = rx_queue;
+ _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
+ _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
+ } else {
+ _qede_rx_queue_release(qdev, edev, rxq);
+ }
}
}
dev->data->tx_queues[queue_idx] = NULL;
}
- txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
- socket_id, tx_conf);
- if (!txq)
- return -ENOMEM;
+ if (ECORE_IS_CMT(edev)) {
+ txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
+ socket_id, tx_conf);
+ if (!txq)
+ return -ENOMEM;
+
+ qdev->fp_array[queue_idx * 2].txq = txq;
+ txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
+ socket_id, tx_conf);
+ if (!txq)
+ return -ENOMEM;
+
+ qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
+ dev->data->tx_queues[queue_idx] =
+ &qdev->fp_array_cmt[queue_idx];
+ } else {
+ txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+ if (!txq)
+ return -ENOMEM;
- dev->data->tx_queues[queue_idx] = txq;
- qdev->fp_array[queue_idx].txq = txq;
+ dev->data->tx_queues[queue_idx] = txq;
+ qdev->fp_array[queue_idx].txq = txq;
+ }
return 0;
}
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
+ struct qede_fastpath_cmt *fp_cmt;
struct qede_dev *qdev;
struct ecore_dev *edev;
qdev = txq->qdev;
edev = QEDE_INIT_EDEV(qdev);
PMD_INIT_FUNC_TRACE(edev);
- _qede_tx_queue_release(qdev, edev, txq);
+
+ if (ECORE_IS_CMT(edev)) {
+ fp_cmt = tx_queue;
+ _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
+ _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
+ } else {
+ _qede_tx_queue_release(qdev, edev, txq);
+ }
}
}
struct qede_fastpath *fp;
uint32_t num_sbs;
uint16_t sb_idx;
+ int i;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
sizeof(*qdev->fp_array));
+ if (ECORE_IS_CMT(edev)) {
+ qdev->fp_array_cmt = rte_calloc("fp_cmt",
+ QEDE_RXTX_MAX(qdev) / 2,
+ sizeof(*qdev->fp_array_cmt),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->fp_array_cmt) {
+ DP_ERR(edev, "fp array for CMT allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset((void *)qdev->fp_array_cmt, 0,
+ (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
+
+ /* Establish the mapping of fp_array with fp_array_cmt */
+ for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
+ qdev->fp_array_cmt[i].qdev = qdev;
+ qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
+ qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
+ }
+ }
+
for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
fp = &qdev->fp_array[sb_idx];
if (!fp)
if (qdev->fp_array)
rte_free(qdev->fp_array);
qdev->fp_array = NULL;
+
+ if (qdev->fp_array_cmt)
+ rte_free(qdev->fp_array_cmt);
+ qdev->fp_array_cmt = NULL;
}
static inline void
int hwfn_index;
int rc;
- if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ if (rx_queue_id < qdev->num_rx_queues) {
fp = &qdev->fp_array[rx_queue_id];
- rxq = eth_dev->data->rx_queues[rx_queue_id];
+ rxq = fp->rxq;
/* Allocate buffers for the Rx ring */
for (j = 0; j < rxq->nb_rx_desc; j++) {
rc = qede_alloc_rx_buffer(rxq);
int hwfn_index;
int rc;
- if (tx_queue_id < eth_dev->data->nb_tx_queues) {
- txq = eth_dev->data->tx_queues[tx_queue_id];
+ if (tx_queue_id < qdev->num_tx_queues) {
fp = &qdev->fp_array[tx_queue_id];
+ txq = fp->txq;
memset(¶ms, 0, sizeof(params));
params.queue_id = tx_queue_id / edev->num_hwfns;
params.vport_id = 0;
int hwfn_index;
int rc;
- if (tx_queue_id < eth_dev->data->nb_tx_queues) {
- txq = eth_dev->data->tx_queues[tx_queue_id];
+ if (tx_queue_id < qdev->num_tx_queues) {
+ txq = qdev->fp_array[tx_queue_id].txq;
/* Drain txq */
if (qede_drain_txq(qdev, txq, true))
return -1; /* For the lack of retcodes */
uint8_t id;
int rc = -1;
- for_each_rss(id) {
+ for (id = 0; id < qdev->num_rx_queues; id++) {
rc = qede_rx_queue_start(eth_dev, id);
if (rc != ECORE_SUCCESS)
return -1;
}
- for_each_tss(id) {
+ for (id = 0; id < qdev->num_tx_queues; id++) {
rc = qede_tx_queue_start(eth_dev, id);
if (rc != ECORE_SUCCESS)
return -1;
uint8_t id;
/* Stopping RX/TX queues */
- for_each_tss(id) {
+ for (id = 0; id < qdev->num_tx_queues; id++)
qede_tx_queue_stop(eth_dev, id);
- }
- for_each_rss(id) {
+ for (id = 0; id < qdev->num_rx_queues; id++)
qede_rx_queue_stop(eth_dev, id);
- }
}
static inline bool qede_tunn_exist(uint16_t flag)
return rx_pkt;
}
+uint16_t
+qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
+ uint16_t eng0_pkts, eng1_pkts;
+
+ eng0_pkts = nb_pkts / 2;
+
+ eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
+
+ eng1_pkts = nb_pkts - eng0_pkts;
+
+ eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
+ eng1_pkts);
+
+ return eng0_pkts + eng1_pkts;
+}
/* Populate scatter gather buffer descriptor fields */
static inline uint16_t
return nb_pkt_sent;
}
+uint16_t
+qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
+ uint16_t eng0_pkts, eng1_pkts;
+
+ eng0_pkts = nb_pkts / 2;
+
+ eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
+
+ eng1_pkts = nb_pkts - eng0_pkts;
+
+ eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
+ eng1_pkts);
+
+ return eng0_pkts + eng1_pkts;
+}
+
uint16_t
qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
ETH_RSS_VXLAN |\
ETH_RSS_GENEVE)
-#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
-#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
#define QEDE_RXTX_MAX(qdev) \
- (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
+ (RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
/* Macros for non-tunnel packet types lkup table */
#define QEDE_PKT_TYPE_UNKNOWN 0x0
* Structure associated with each RX queue.
*/
struct qede_rx_queue {
+ /* Always keep qdev as first member */
+ struct qede_dev *qdev;
struct rte_mempool *mb_pool;
struct ecore_chain rx_bd_ring;
struct ecore_chain rx_comp_ring;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
- struct qede_dev *qdev;
void *handle;
};
};
struct qede_tx_queue {
+ /* Always keep qdev as first member */
+ struct qede_dev *qdev;
struct ecore_chain tx_pbl;
struct qede_tx_entry *sw_tx_ring;
uint16_t nb_tx_desc;
uint16_t port_id;
uint64_t xmit_pkts;
bool is_legacy;
- struct qede_dev *qdev;
void *handle;
};
struct qede_tx_queue *txq;
};
+/* This structure holds the inforation of fast path queues
+ * belonging to individual engines in CMT mode.
+ */
+struct qede_fastpath_cmt {
+ /* Always keep this a first element */
+ struct qede_dev *qdev;
+ /* fastpath info of engine 0 */
+ struct qede_fastpath *fp0;
+ /* fastpath info of engine 1 */
+ struct qede_fastpath *fp1;
+};
+
/*
* RX/TX function prototypes
*/
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
struct rte_mbuf **pkts,