offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
+ {"rx_gft_filter_drop",
+ offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
{"rx_hw_buffer_truncates",
offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
if (status & 0x1) {
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(eth_dev->intr_handle))
- DP_ERR(edev, "rte_intr_enable failed\n");
+ if (rte_intr_ack(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_ack failed\n");
}
}
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(eth_dev->intr_handle))
- DP_ERR(edev, "rte_intr_enable failed\n");
+ if (rte_intr_ack(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_ack failed\n");
}
static void
static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
unsigned int i = 0, j = 0, qid;
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for_each_rss(qid) {
+ for (qid = 0; qid < qdev->num_rx_queues; qid++) {
OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
offsetof(struct qede_rx_queue, rcv_pkts), 0,
sizeof(uint64_t));
i = 0;
- for_each_tss(qid) {
+ for (qid = 0; qid < qdev->num_tx_queues; qid++) {
txq = qdev->fp_array[qid].txq;
OSAL_MEMSET((uint64_t *)(uintptr_t)
if (add) {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
- ETHER_ADDR_LEN) == 0) &&
+ RTE_ETHER_ADDR_LEN) == 0) &&
ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_INFO(edev, "Unicast MAC is already added"
DP_ERR(edev, "Did not allocate memory for ucast\n");
return -ENOMEM;
}
- ether_addr_copy(mac_addr, &u->mac);
+ rte_ether_addr_copy(mac_addr, &u->mac);
u->vlan = ucast->vlan;
u->vni = ucast->vni;
SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
} else {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
- ETHER_ADDR_LEN) == 0) &&
+ RTE_ETHER_ADDR_LEN) == 0) &&
ucast->vlan == tmp->vlan &&
ucast->vni == tmp->vni)
break;
DP_ERR(edev, "Did not allocate memory for mcast\n");
return -ENOMEM;
}
- ether_addr_copy(&mc_addrs[i], &m->mac);
+ rte_ether_addr_copy(&mc_addrs[i], &m->mac);
SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
}
memset(&mcast, 0, sizeof(mcast));
mcast.num_mc_addrs = mc_addrs_num;
mcast.opcode = ECORE_FILTER_ADD;
for (i = 0; i < mc_addrs_num; i++)
- ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
+ rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
&mcast.mac[i]);
rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
mcast.opcode = ECORE_FILTER_REMOVE;
j = 0;
SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- ether_addr_copy(&tmp->mac,
+ rte_ether_addr_copy(&tmp->mac,
(struct rte_ether_addr *)&mcast.mac[j]);
j++;
}
struct ecore_filter_ucast ucast;
int re;
- if (!is_valid_assigned_ether_addr(mac_addr))
+ if (!rte_is_valid_assigned_ether_addr(mac_addr))
return -EINVAL;
qede_set_ucast_cmn_params(&ucast);
ucast.opcode = ECORE_FILTER_ADD;
ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
return re;
}
return;
}
- if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
+ if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
return;
qede_set_ucast_cmn_params(&ucast);
ucast.type = ECORE_FILTER_MAC;
/* Use the index maintained by rte */
- ether_addr_copy(ð_dev->data->mac_addrs[index],
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[index],
(struct rte_ether_addr *)&ucast.mac);
qede_mac_int_ops(eth_dev, &ucast, false);
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
id = i / RTE_RETA_GROUP_SIZE;
pos = i % RTE_RETA_GROUP_SIZE;
- q = i % QEDE_RSS_COUNT(qdev);
+ q = i % QEDE_RSS_COUNT(eth_dev);
reta_conf[id].reta[pos] = q;
}
if (qede_rss_reta_update(eth_dev, &reta_conf[0],
PMD_INIT_FUNC_TRACE(edev);
- /* Check requirements for 100G mode */
- if (ECORE_IS_CMT(edev)) {
- if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
- DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
- return -EINVAL;
- }
-
- if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
- return -EINVAL;
- }
- }
-
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
*/
return -ENOTSUP;
qede_dealloc_fp_resc(eth_dev);
- qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
- qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
+
if (qede_alloc_fp_resc(qdev))
return -ENOMEM;
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
+ RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
return ret;
DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
- QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
+ QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
+
+ if (ECORE_IS_CMT(edev))
+ DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
+ qdev->num_rx_queues, qdev->num_tx_queues);
+
return 0;
}
.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
-static void
+static int
qede_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
else
dev_info->max_rx_queues = (uint16_t)RTE_MIN(
QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ /* Since CMT mode internally doubles the number of queues */
+ if (ECORE_IS_CMT(edev))
+ dev_info->max_rx_queues = dev_info->max_rx_queues / 2;
+
dev_info->max_tx_queues = dev_info->max_rx_queues;
dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
speed_cap |= ETH_LINK_SPEED_100G;
dev_info->speed_capa = speed_cap;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
return rte_eth_linkstatus_set(eth_dev, &link);
}
-static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ enum _ecore_status_t ecore_status;
PMD_INIT_FUNC_TRACE(edev);
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
- qed_configure_filter_rx_mode(eth_dev, type);
+ ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
-static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ enum _ecore_status_t ecore_status;
PMD_INIT_FUNC_TRACE(edev);
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
else
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
static void qede_poll_sp_sb_cb(void *param)
if (rc != 0) {
DP_ERR(edev, "Unable to start periodic"
" timer rc %d\n", rc);
- assert(false && "Unable to start periodic timer");
}
}
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
- unsigned int i = 0, j = 0, qid;
+ unsigned int i = 0, j = 0, qid, idx, hw_fn;
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
- (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+ if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
+ txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
DP_VERBOSE(edev, ECORE_MSG_DEBUG,
"Not all the queue stats will be displayed. Set"
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for_each_rss(qid) {
- eth_stats->q_ipackets[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[qid].rxq)) +
- offsetof(struct qede_rx_queue,
- rcv_pkts));
- eth_stats->q_errors[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[qid].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_hw_errors)) +
- *(uint64_t *)(
- ((char *)(qdev->fp_array[qid].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_alloc_errors));
+ for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
+ eth_stats->q_ipackets[i] = 0;
+ eth_stats->q_errors[i] = 0;
+
+ for_each_hwfn(edev, hw_fn) {
+ idx = qid * edev->num_hwfns + hw_fn;
+
+ eth_stats->q_ipackets[i] +=
+ *(uint64_t *)
+ (((char *)(qdev->fp_array[idx].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] +=
+ *(uint64_t *)
+ (((char *)(qdev->fp_array[idx].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)
+ (((char *)(qdev->fp_array[idx].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ }
+
i++;
if (i == rxq_stat_cntrs)
break;
}
- for_each_tss(qid) {
- txq = qdev->fp_array[qid].txq;
- eth_stats->q_opackets[j] =
- *((uint64_t *)(uintptr_t)
- (((uint64_t)(uintptr_t)(txq)) +
- offsetof(struct qede_tx_queue,
- xmit_pkts)));
+ for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
+ eth_stats->q_opackets[j] = 0;
+
+ for_each_hwfn(edev, hw_fn) {
+ idx = qid * edev->num_hwfns + hw_fn;
+
+ txq = qdev->fp_array[idx].txq;
+ eth_stats->q_opackets[j] +=
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ }
+
j++;
if (j == txq_stat_cntrs)
break;
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+
if (ECORE_IS_BB(&qdev->edev))
return RTE_DIM(qede_xstats_strings) +
RTE_DIM(qede_bb_xstats_strings) +
(RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
else
return RTE_DIM(qede_xstats_strings) +
RTE_DIM(qede_ah_xstats_strings) +
(RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ QEDE_RSS_COUNT(dev));
}
static int
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
- unsigned int i, qid, stat_idx = 0;
- unsigned int rxq_stat_cntrs;
+ unsigned int i, qid, hw_fn, stat_idx = 0;
+
+ if (xstats_names == NULL)
+ return stat_cnt;
- if (xstats_names != NULL) {
- for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ strlcpy(xstats_names[stat_idx].name,
+ qede_xstats_strings[i].name,
+ sizeof(xstats_names[stat_idx].name));
+ stat_idx++;
+ }
+
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
strlcpy(xstats_names[stat_idx].name,
- qede_xstats_strings[i].name,
+ qede_bb_xstats_strings[i].name,
sizeof(xstats_names[stat_idx].name));
stat_idx++;
}
-
- if (ECORE_IS_BB(edev)) {
- for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
- strlcpy(xstats_names[stat_idx].name,
- qede_bb_xstats_strings[i].name,
- sizeof(xstats_names[stat_idx].name));
- stat_idx++;
- }
- } else {
- for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
- strlcpy(xstats_names[stat_idx].name,
- qede_ah_xstats_strings[i].name,
- sizeof(xstats_names[stat_idx].name));
- stat_idx++;
- }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ strlcpy(xstats_names[stat_idx].name,
+ qede_ah_xstats_strings[i].name,
+ sizeof(xstats_names[stat_idx].name));
+ stat_idx++;
}
+ }
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+ for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
+ for_each_hwfn(edev, hw_fn) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
snprintf(xstats_names[stat_idx].name,
- sizeof(xstats_names[stat_idx].name),
- "%.4s%d%s",
- qede_rxq_xstats_strings[i].name, qid,
- qede_rxq_xstats_strings[i].name + 4);
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "%.4s%d.%d%s",
+ qede_rxq_xstats_strings[i].name,
+ hw_fn, qid,
+ qede_rxq_xstats_strings[i].name + 4);
stat_idx++;
}
}
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
const unsigned int num = qede_get_xstats_count(qdev);
- unsigned int i, qid, stat_idx = 0;
- unsigned int rxq_stat_cntrs;
+ unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
if (n < num)
return num;
}
}
- rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- for_each_rss(qid) {
+ for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+ for_each_hwfn(edev, hw_fn) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
- xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[qid].rxq)) +
+ fpidx = qid * edev->num_hwfns + hw_fn;
+ xstats[stat_idx].value = *(uint64_t *)
+ (((char *)(qdev->fp_array[fpidx].rxq)) +
qede_rxq_xstats_strings[i].offset);
xstats[stat_idx].id = stat_idx;
stat_idx++;
}
+
}
}
}
for (i = 0; i < mc_addrs_num; i++) {
- if (!is_multicast_ether_addr(&mc_addrs[i])) {
+ if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
DP_ERR(edev, "Not a valid multicast MAC\n");
return -EINVAL;
}
RTE_PTYPE_UNKNOWN
};
- if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+ if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+ eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
return ptypes;
return NULL;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
uint8_t len = rss_conf->rss_key_len;
- uint8_t idx;
- uint8_t i;
+ uint8_t idx, i, j, fpidx;
int rc;
memset(&vport_update_params, 0, sizeof(vport_update_params));
/* tbl_size has to be set with capabilities */
rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
- /* pass the L2 handles instead of qids */
- for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
- idx = i % QEDE_RSS_COUNT(qdev);
- rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
- }
- vport_update_params.rss_params = &rss_params;
for_each_hwfn(edev, i) {
+ /* pass the L2 handles instead of qids */
+ for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
+ idx = j % QEDE_RSS_COUNT(eth_dev);
+ fpidx = idx * edev->num_hwfns + i;
+ rss_params.rss_ind_table[j] =
+ qdev->fp_array[fpidx].rxq->handle;
+ }
+
+ vport_update_params.rss_params = &rss_params;
+
p_hwfn = &edev->hwfns[i];
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
return 0;
}
-static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
- struct ecore_rss_params *rss)
-{
- int i, fn;
- bool rss_mode = 1; /* enable */
- struct ecore_queue_cid *cid;
- struct ecore_rss_params *t_rss;
-
- /* In regular scenario, we'd simply need to take input handlers.
- * But in CMT, we'd have to split the handlers according to the
- * engine they were configured on. We'd then have to understand
- * whether RSS is really required, since 2-queues on CMT doesn't
- * require RSS.
- */
-
- /* CMT should be round-robin */
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- cid = rss->rss_ind_table[i];
-
- if (cid->p_owner == ECORE_LEADING_HWFN(edev))
- t_rss = &rss[0];
- else
- t_rss = &rss[1];
-
- t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
- }
-
- t_rss = &rss[1];
- t_rss->update_rss_ind_table = 1;
- t_rss->rss_table_size_log = 7;
- t_rss->update_rss_config = 1;
-
- /* Make sure RSS is actually required */
- for_each_hwfn(edev, fn) {
- for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
- i++) {
- if (rss[fn].rss_ind_table[i] !=
- rss[fn].rss_ind_table[0])
- break;
- }
-
- if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
- DP_INFO(edev,
- "CMT - 1 queue per-hwfn; Disabling RSS\n");
- rss_mode = 0;
- goto out;
- }
- }
-
-out:
- t_rss->rss_enable = rss_mode;
-
- return rss_mode;
-}
-
int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params vport_update_params;
struct ecore_rss_params *params;
+ uint16_t i, j, idx, fid, shift;
struct ecore_hwfn *p_hwfn;
- uint16_t i, idx, shift;
uint8_t entry;
int rc = 0;
}
memset(&vport_update_params, 0, sizeof(vport_update_params));
- params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
- RTE_CACHE_LINE_SIZE);
+ params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
if (params == NULL) {
DP_ERR(edev, "failed to allocate memory\n");
return -ENOMEM;
}
- for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
- if (reta_conf[idx].mask & (1ULL << shift)) {
- entry = reta_conf[idx].reta[shift];
- /* Pass rxq handles to ecore */
- params->rss_ind_table[i] =
- qdev->fp_array[entry].rxq->handle;
- /* Update the local copy for RETA query command */
- qdev->rss_ind_table[i] = entry;
- }
- }
-
params->update_rss_ind_table = 1;
params->rss_table_size_log = 7;
params->update_rss_config = 1;
- /* Fix up RETA for CMT mode device */
- if (ECORE_IS_CMT(edev))
- qdev->rss_enable = qede_update_rss_parm_cmt(edev,
- params);
vport_update_params.vport_id = 0;
/* Use the current value of rss_enable */
params->rss_enable = qdev->rss_enable;
vport_update_params.rss_params = params;
for_each_hwfn(edev, i) {
+ for (j = 0; j < reta_size; j++) {
+ idx = j / RTE_RETA_GROUP_SIZE;
+ shift = j % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ entry = reta_conf[idx].reta[shift];
+ fid = entry * edev->num_hwfns + i;
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[j] =
+ qdev->fp_array[fid].rxq->handle;
+ /* Update the local copy for RETA query cmd */
+ qdev->rss_ind_table[j] = entry;
+ }
+ }
+
p_hwfn = &edev->hwfns[i];
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
int i, rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_dev_info_get(dev, &dev_info);
+ rc = qede_dev_info_get(dev, &dev_info);
+ if (rc != 0) {
+ DP_ERR(edev, "Error during getting ethernet device info\n");
+ return rc;
+ }
max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
frame_size = max_rx_pkt_len;
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
- mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
QEDE_ETH_OVERHEAD);
return -EINVAL;
}
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_rss(i) {
+ for (i = 0; i < qdev->num_rx_queues; i++) {
fp = &qdev->fp_array[i];
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->rx_buf_size = rc;
}
}
- if (max_rx_pkt_len > ETHER_MAX_LEN)
+ if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
/* Reassign back */
- dev->rx_pkt_burst = qede_recv_pkts;
- dev->tx_pkt_burst = qede_xmit_pkts;
-
+ if (ECORE_IS_CMT(edev)) {
+ dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ } else {
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+ }
return 0;
}
struct qed_slowpath_params params;
static bool do_once = true;
uint8_t bulletin_change;
- uint8_t vf_mac[ETHER_ADDR_LEN];
+ uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
bool is_mac_exist;
/* Fix up ecore debug level */
pci_addr.bus, pci_addr.devid, pci_addr.function,
eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = qede_recv_pkts;
- eth_dev->tx_pkt_burst = qede_xmit_pkts;
- eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
QEDE_PMD_DRV_VER_STR_SIZE);
+ if (ECORE_IS_CMT(edev)) {
+ eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ } else {
+ eth_dev->rx_pkt_burst = qede_recv_pkts;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ }
+
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
/* For CMT mode device do periodic polling for slowpath events.
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
- (ETHER_ADDR_LEN *
+ (RTE_ETHER_ADDR_LEN *
adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
}
if (!is_vf) {
- ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
+ rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
hw_info.hw_mac_addr,
ð_dev->data->mac_addrs[0]);
- ether_addr_copy(ð_dev->data->mac_addrs[0],
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
&is_mac_forced);
if (is_mac_exist) {
DP_INFO(edev, "VF macaddr received from PF\n");
- ether_addr_copy(
+ rte_ether_addr_copy(
(struct rte_ether_addr *)&vf_mac,
ð_dev->data->mac_addrs[0]);
- ether_addr_copy(ð_dev->data->mac_addrs[0],
- &adapter->primary_mac);
+ rte_ether_addr_copy(
+ ð_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
} else {
DP_ERR(edev, "No VF macaddr assigned\n");
}
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
SLIST_INIT(&adapter->mc_list_head);
- adapter->mtu = ETHER_MTU;
+ adapter->mtu = RTE_ETHER_MTU;
adapter->vport_started = false;
/* VF tunnel offloads is enabled by default in PF driver */
static struct rte_pci_driver rte_qedevf_pmd = {
.id_table = pci_id_qedevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = qedevf_eth_dev_pci_probe,
.remove = qedevf_eth_dev_pci_remove,
};
static struct rte_pci_driver rte_qede_pmd = {
.id_table = pci_id_qede_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = qede_eth_dev_pci_probe,
.remove = qede_eth_dev_pci_remove,
};