/* ucast->assert_on_error = true; - For debug */
}
-static void qede_set_cmn_tunn_param(struct ecore_tunn_update_params *params,
- uint8_t clss, uint64_t mode, uint64_t mask)
+static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
+ uint8_t clss, bool mode, bool mask)
{
- memset(params, 0, sizeof(struct ecore_tunn_update_params));
- params->tunn_mode = mode;
- params->tunn_mode_update_mask = mask;
- params->update_tx_pf_clss = 1;
- params->update_rx_pf_clss = 1;
- params->tunn_clss_vxlan = clss;
+ memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
+ p_tunn->vxlan.b_update_mode = mode;
+ p_tunn->vxlan.b_mode_enabled = mask;
+ p_tunn->b_update_rx_cls = true;
+ p_tunn->b_update_tx_cls = true;
+ p_tunn->vxlan.tun_cls = clss;
}
static int
}
} else { /* Unicast */
if (add) {
- if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+ if (qdev->num_uc_addr >=
+ qdev->dev_info.num_mac_filters) {
DP_ERR(edev,
"Ucast filter table limit exceeded,"
" Please enable promisc mode\n");
PMD_INIT_FUNC_TRACE(edev);
- if (index >= qdev->dev_info.num_mac_addrs) {
+ if (index >= qdev->dev_info.num_mac_filters) {
DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
- index, qdev->dev_info.num_mac_addrs);
+ index, qdev->dev_info.num_mac_filters);
return;
}
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
" Please remove existing VLAN filters"
" before disabling VLAN filtering\n");
/* Signal app that VLAN filtering is still
if (on) {
if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_INFO(edev, "Reached max VLAN filter limit"
+ DP_ERR(edev, "Reached max VLAN filter limit"
" enabling accept_any_vlan\n");
qede_config_accept_any_vlan(qdev, true);
return 0;
int rc;
start.remove_inner_vlan = 1;
- start.gro_enable = 0;
+ start.enable_lro = qdev->enable_lro;
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
start.vport_id = 0;
start.drop_ttl0 = false;
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
- DP_NOTICE(edev, false,
- "100G mode needs min. 2 RX/TX queues\n");
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
(eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
"100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
if (rxmode->enable_scatter == 1)
eth_dev->data->scattered_rx = 1;
- if (rxmode->enable_lro == 1) {
- DP_INFO(edev, "LRO is not supported\n");
- return -EINVAL;
- }
-
if (!rxmode->hw_strip_crc)
DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
+ if (rxmode->enable_lro) {
+ qdev->enable_lro = true;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+
/* Check for the port restart case */
if (qdev->state != QEDE_DEV_INIT) {
rc = qdev->ops->vport_stop(edev, 0);
return -EINVAL;
}
+ /* Flow director mode check */
+ rc = qede_check_fdir_support(eth_dev);
+ if (rc) {
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+ SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+
SLIST_INIT(&qdev->vlan_list_head);
/* Add primary mac for PF */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
.nb_max = NUM_RX_BDS_MAX,
.nb_min = 128,
- .nb_align = 128 /* lowest common multiple */
+ .nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
.nb_max = NUM_TX_BDS_MAX,
.nb_min = 256,
- .nb_align = 256
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
static void
PMD_INIT_FUNC_TRACE(edev);
dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
dev_info->max_tx_queues = dev_info->max_rx_queues;
- dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
dev_info->max_vfs = 0;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM);
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO);
+
dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
PMD_INIT_FUNC_TRACE(edev);
+ qede_fdir_dealloc_resc(eth_dev);
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
eth_stats->oerrors = stats.tx_err_drop_pkts;
/* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
eth_stats->q_ipackets[i] =
rx_alloc_errors));
i++;
}
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
txq = qdev->fp_array[(qid)].txqs[0];
eth_stats->q_opackets[j] =
xmit_pkts)));
j++;
}
+ if (j == txq_stat_cntrs)
+ break;
}
}
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (xstats_names != NULL) {
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
stat_idx++;
}
- for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
snprintf(xstats_names[stat_idx].name,
sizeof(xstats_names[stat_idx].name),
struct ecore_eth_stats stats;
const unsigned int num = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (n < num)
return num;
stat_idx++;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params vport_update_params;
struct ecore_rss_params rss_params;
- struct ecore_rss_params params;
struct ecore_hwfn *p_hwfn;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
uint8_t i;
int rc;
/* tbl_size has to be set with capabilities */
rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
vport_update_params.rss_params = &rss_params;
for_each_hwfn(edev, i) {
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
entry = reta_conf[idx].reta[shift];
- params.rss_ind_table[i] = entry;
+ /* Pass rxq handles to ecore */
+ params.rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
}
}
/* Fix up RETA for CMT mode device */
if (edev->num_hwfns > 1)
qdev->rss_enable = qed_update_rss_parm_cmt(edev,
- ¶ms.rss_ind_table[0]);
+ params.rss_ind_table[0]);
params.update_rss_ind_table = 1;
params.rss_table_size_log = 7;
params.update_rss_config = 1;
}
}
- /* Update the local copy for RETA query command */
- memcpy(qdev->rss_ind_table, params.rss_ind_table,
- sizeof(params.rss_ind_table));
-
return 0;
}
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunn_update_params params;
+ struct ecore_tunnel_info tunn; /* @DPDK */
struct ecore_hwfn *p_hwfn;
int rc, i;
PMD_INIT_FUNC_TRACE(edev);
- memset(¶ms, 0, sizeof(params));
+ memset(&tunn, 0, sizeof(tunn));
if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
- params.update_vxlan_udp_port = 1;
- params.vxlan_udp_port = (add) ? tunnel_udp->udp_port :
- QEDE_VXLAN_DEF_PORT;
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
+ QEDE_VXLAN_DEF_PORT;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, ¶ms,
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
- params.vxlan_udp_port);
+ tunn.vxlan_port.port);
return rc;
}
}
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunn_update_params params;
+ struct ecore_tunnel_info tunn;
struct ecore_hwfn *p_hwfn;
enum ecore_filter_ucast_type type;
enum ecore_tunn_clss clss;
qdev->vxlan_filter_type = filter_type;
DP_INFO(edev, "Enabling VXLAN tunneling\n");
- qede_set_cmn_tunn_param(¶ms, clss,
- (1 << ECORE_MODE_VXLAN_TUNN),
- (1 << ECORE_MODE_VXLAN_TUNN));
+ qede_set_cmn_tunn_param(&tunn, clss, true, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
- ¶ms, ECORE_SPQ_MODE_CB, NULL);
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
- params.tunn_clss_vxlan);
+ tunn.vxlan.tun_cls);
}
}
qdev->num_tunn_filters++; /* Filter added successfully */
DP_INFO(edev, "Disabling VXLAN tunneling\n");
/* Use 0 as tunnel mode */
- qede_set_cmn_tunn_param(¶ms, clss, 0,
- (1 << ECORE_MODE_VXLAN_TUNN));
+ qede_set_cmn_tunn_param(&tunn, clss, false, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
- ¶ms, ECORE_SPQ_MODE_CB, NULL);
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev,
"Failed to update tunn_clss %u\n",
- params.tunn_clss_vxlan);
+ tunn.vxlan.tun_cls);
break;
}
}
}
break;
case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
case RTE_ETH_FILTER_MACVLAN:
case RTE_ETH_FILTER_ETHERTYPE:
case RTE_ETH_FILTER_FLEXIBLE:
case RTE_ETH_FILTER_SYN:
- case RTE_ETH_FILTER_NTUPLE:
case RTE_ETH_FILTER_HASH:
case RTE_ETH_FILTER_L2_TUNNEL:
case RTE_ETH_FILTER_MAX:
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
qed_ops->common->update_pf_params(edev, &pf_params);
}
eth_dev->rx_pkt_burst = qede_recv_pkts;
eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_NOTICE(edev, false,
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
+ adapter->ops->common->set_name(edev, edev->name);
if (!is_vf)
- adapter->dev_info.num_mac_addrs =
+ adapter->dev_info.num_mac_filters =
(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
ECORE_MAC);
else
ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
- &adapter->dev_info.num_mac_addrs);
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
(ETHER_ADDR_LEN *
- adapter->dev_info.num_mac_addrs),
+ adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->mac_addrs == NULL) {
return qede_dev_common_uninit(eth_dev);
}
-static struct rte_pci_id pci_id_qedevf_map[] = {
+static const struct rte_pci_id pci_id_qedevf_map[] = {
#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
{.vendor_id = 0,}
};
-static struct rte_pci_id pci_id_qede_map[] = {
+static const struct rte_pci_id pci_id_qede_map[] = {
#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
{
QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
},
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
{
QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
},