DP_INFO(edev, "*********************************\n");
}
-static int
-qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_sp_vport_start_params params;
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for_each_rss(qid) {
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rcv_pkts), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+ sizeof(uint64_t));
+
+ if (xstats)
+ for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+ OSAL_MEMSET((((char *)
+ (qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[j].offset),
+ 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ i = 0;
+
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+
+ OSAL_MEMSET((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == txq_stat_cntrs)
+ break;
+ }
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
int rc;
int i;
- memset(¶ms, 0, sizeof(params));
- params.vport_id = 0;
- params.mtu = mtu;
- /* @DPDK - Disable FW placement */
- params.zero_placement_offset = 1;
+ vport_id = 0;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- params.concrete_fid = p_hwfn->hw_info.concrete_fid;
- params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_start(p_hwfn, ¶ms);
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
return rc;
}
}
- ecore_reset_vport_stats(edev);
- DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ DP_INFO(edev, "vport stopped\n");
return 0;
}
static int
-qede_stop_vport(struct ecore_dev *edev)
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
struct ecore_hwfn *p_hwfn;
- uint8_t vport_id;
int rc;
int i;
- vport_id = 0;
+ if (qdev->vport_started)
+ qede_stop_vport(edev);
+
+ memset(¶ms, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
- vport_id);
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, ¶ms);
if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
return rc;
}
}
+ ecore_reset_vport_stats(edev);
+ qdev->vport_started = true;
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
return 0;
}
+#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
+#define QEDE_VF_TX_SWITCHING "vf_tx_switching"
+
/* Activate or deactivate vport via vport-update */
int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
{
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
if (!qdev->enable_tx_switching) {
- if (IS_VF(edev)) {
+ if ((QEDE_NPAR_TX_SWITCHING != NULL) ||
+ ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) {
params.update_tx_switching_flg = 1;
params.tx_switching_flg = !flg;
- DP_INFO(edev, "VF tx-switching is disabled\n");
+ DP_INFO(edev, "%s tx-switching is disabled\n",
+ QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF");
}
}
for_each_hwfn(edev, i) {
}
}
qdev->enable_lro = flg;
+ eth_dev->data->lro = flg;
+
DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
return 0;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+ } else {
+ p_ptt = NULL;
+ }
+
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
tunn_info, ECORE_SPQ_MODE_CB, NULL);
if (IS_PF(edev))
return rc;
}
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
static int
qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
enum rte_eth_tunnel_type tunn_type, bool enable)
case RTE_TUNNEL_TYPE_GENEVE:
rc = qede_geneve_enable(eth_dev, clss, enable);
break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
default:
rc = -EINVAL;
break;
ether_addr_copy(ð_dev->data->mac_addrs[index],
(struct ether_addr *)&ucast.mac);
- ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+ qede_mac_int_ops(eth_dev, &ucast, false);
}
-static void
+static int
qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
DP_ERR(edev, "Setting MAC address is not allowed\n");
- ether_addr_copy(&qdev->primary_mac,
- ð_dev->data->mac_addrs[0]);
- return;
+ return -EPERM;
}
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+ return 0;
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
if (mask & ETH_VLAN_FILTER_MASK) {
/* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
/* Signal app that VLAN filtering is still
* enabled
*/
- rxmode->hw_vlan_filter = true;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
} else {
qede_vlan_filter_set(eth_dev, 0, 0);
}
}
if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
+ DP_ERR(edev, "Extend VLAN not supported\n");
+
+ qdev->vlan_offload_mask = mask;
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+ DP_INFO(edev, "VLAN offload mask %d\n", mask);
return 0;
}
static int qede_dev_start(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
PMD_INIT_FUNC_TRACE(edev);
- /* Update MTU only if it has changed */
- if (qdev->mtu != qdev->new_mtu) {
- if (qede_update_mtu(eth_dev, qdev->new_mtu))
- goto err;
- qdev->mtu = qdev->new_mtu;
- }
-
/* Configure TPA parameters */
- if (rxmode->enable_lro) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
return -EINVAL;
/* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ if (!eth_dev->data->scattered_rx)
+ rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
}
/* Start queues */
if (qede_start_queues(eth_dev))
goto err;
+ if (IS_PF(edev))
+ qede_reset_queue_stats(qdev, true);
+
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
* enabling RSS. Hence RSS configuration is deferred upto this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
if (qede_activate_vport(eth_dev, true))
goto err;
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
-
/* Update link status */
qede_link_update(eth_dev, 0);
/* Disable traffic */
ecore_hw_stop_fastpath(edev); /* TBD - loop */
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
+ if (IS_PF(edev))
+ qede_mac_addr_remove(eth_dev, 0);
DP_INFO(edev, "Device is stopped\n");
}
-#define QEDE_TX_SWITCHING "vf_txswitch"
-
const char *valid_args[] = {
- QEDE_TX_SWITCHING,
+ QEDE_NPAR_TX_SWITCHING,
+ QEDE_VF_TX_SWITCHING,
NULL,
};
return errno;
}
- if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
+ (strcmp(QEDE_VF_TX_SWITCHING, key) == 0))
qdev->enable_tx_switching = !!tmp;
return ret;
/* Check requirements for 100G mode */
if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
if (qede_args(eth_dev))
return -ENOTSUP;
- /* Sanity checks and throw warnings */
- if (rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
-
- if (!rxmode->hw_strip_crc)
- DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
-
- if (!rxmode->hw_ip_checksum)
- DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
- if (rxmode->header_split)
- DP_INFO(edev, "Header split enable is not supported\n");
- if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
- ETH_MQ_RX_RSS)) {
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == ETH_MQ_RX_RSS)) {
DP_ERR(edev, "Unsupported multi-queue mode\n");
return -ENOTSUP;
}
if (qede_check_fdir_support(eth_dev))
return -ENOTSUP;
- /* Deallocate resources if held previously. It is needed only if the
- * queue count has been changed from previous configuration. If its
- * going to change then it means RX/TX queue setup will be called
- * again and the fastpath pointers will be reinitialized there.
- */
- if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
- qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
- qede_dealloc_fp_resc(eth_dev);
- /* Proceed with updated queue count */
- qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
- qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
- if (qede_alloc_fp_resc(qdev))
- return -ENOMEM;
- }
+ qede_dealloc_fp_resc(eth_dev);
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
- /* VF's MTU has to be set using vport-start where as
- * PF's MTU can be updated via vport-update.
- */
- if (IS_VF(edev)) {
- if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
- return -1;
- } else {
- if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
- return -1;
- }
+ /* If jumbo enabled adjust MTU */
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
- qdev->mtu = rxmode->max_rx_pkt_len;
- qdev->new_mtu = qdev->mtu;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+
+ if (qede_start_vport(qdev, eth_dev->data->mtu))
+ return -1;
+
+ qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
if (ret)
return ret;
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = QEDE_TXQ_FLAGS,
- };
-
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO);
-
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ dev_info->rx_queue_offload_capa = 0;
+
+ /* TX offloads are on a per-packet basis, so it is applicable
+ * to both at port and queue levels.
+ */
dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .txq_flags = DEV_TX_OFFLOAD_MULTI_SEGS,
+ };
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ /* Packets are always dropped if no descriptors are available */
+ .rx_drop_en = 1,
+ /* The below RX offloads are always enabled */
+ .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM),
+ };
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
qede_dev_stop(eth_dev);
qede_stop_vport(edev);
+ qdev->vport_started = false;
qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
eth_dev->data->nb_rx_queues = 0;
eth_dev->data->nb_tx_queues = 0;
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, true);
}
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, false);
}
static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_GRE,
/* Inner */
RTE_PTYPE_INNER_L2_ETHER,
RTE_PTYPE_INNER_L2_ETHER_VLAN,
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
+ uint32_t max_rx_pkt_len;
uint32_t frame_size;
uint16_t rx_buf_size;
uint16_t bufsz;
+ bool restart = false;
int i;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- frame_size = mtu + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
- DP_ERR(edev, "MTU %u out of range\n", mtu);
+ DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+ mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
*/
dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
- qede_dev_stop(dev);
+ if (dev->data->dev_started) {
+ dev->data->dev_started = 0;
+ qede_dev_stop(dev);
+ restart = true;
+ } else {
+ if (IS_PF(edev))
+ qede_mac_addr_remove(dev, 0);
+ }
rte_delay_ms(1000);
+ qede_start_vport(qdev, mtu); /* Recreate vport */
qdev->mtu = mtu;
+
/* Fix up RX buf size for all queues of the port */
for_each_rss(i) {
fp = &qdev->fp_array[i];
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
- }
- qede_dev_start(dev);
- if (frame_size > ETHER_MAX_LEN)
+ if (fp->rxq != NULL) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = frame_size;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ }
+ }
+ if (max_rx_pkt_len > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ /* Restore config lost due to vport stop */
+ if (IS_PF(edev))
+ qede_mac_addr_set(dev, &qdev->primary_mac);
+
+ if (dev->data->promiscuous)
+ qede_promiscuous_enable(dev);
+ else
+ qede_promiscuous_disable(dev);
+
+ if (dev->data->all_multicast)
+ qede_allmulticast_enable(dev);
+ else
+ qede_allmulticast_disable(dev);
+
+ qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
+
+ if (!dev->data->dev_started && restart) {
+ qede_dev_start(dev);
+ dev->data->dev_started = 1;
+ }
+
/* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
/* Reassign back */
dev->rx_pkt_burst = qede_recv_pkts;
dev->tx_pkt_burst = qede_xmit_pkts;
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
qdev->vxlan.udp_port = udp_port;
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
qdev->geneve.udp_port = udp_port;
break;
-
default:
return ECORE_INVAL;
}
qdev->geneve.filter_type = conf->filter_type;
}
- if (!qdev->vxlan.enable || !qdev->geneve.enable)
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
return qede_tunn_enable(eth_dev, clss,
conf->tunnel_type,
true);
switch (filter_conf->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
DP_INFO(edev,
"Packet steering to the specified Rx queue"
" is not supported with UDP tunneling");
return(qede_tunn_filter_config(eth_dev, filter_op,
filter_conf));
- /* Place holders for future tunneling support */
case RTE_TUNNEL_TYPE_TEREDO:
case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
case RTE_L2_TUNNEL_TYPE_E_TAG:
DP_ERR(edev, "Unsupported tunnel type %d\n",
filter_conf->tunnel_type);
do_once = false;
}
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
SLIST_INIT(&adapter->fdir_info.fdir_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
adapter->mtu = ETHER_MTU;
- adapter->new_mtu = ETHER_MTU;
- if (!is_vf) {
- if (qede_start_vport(adapter, adapter->mtu))
- return -1;
- } else {
- /* VF tunnel offloads is enabled by default in PF driver */
+ adapter->vport_started = false;
+
+ /* VF tunnel offloads is enabled by default in PF driver */
+ adapter->vxlan.num_filters = 0;
+ adapter->geneve.num_filters = 0;
+ adapter->ipgre.num_filters = 0;
+ if (is_vf) {
adapter->vxlan.enable = true;
- adapter->vxlan.num_filters = 0;
adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
adapter->geneve.enable = true;
- adapter->vxlan.num_filters = 0;
- adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+ adapter->ipgre.enable = true;
+ adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
- adapter->vxlan.udp_port = QEDE_GENEVE_DEF_PORT;
+ } else {
+ adapter->vxlan.enable = false;
+ adapter->geneve.enable = false;
+ adapter->ipgre.enable = false;
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
static void
qede_init_log(void)
{
- qede_logtype_init = rte_log_register("pmd.qede.init");
+ qede_logtype_init = rte_log_register("pmd.net.qede.init");
if (qede_logtype_init >= 0)
rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
- qede_logtype_driver = rte_log_register("pmd.qede.driver");
+ qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
if (qede_logtype_driver >= 0)
rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
}