}
*phys = mz->phys_addr;
ecore_mz_mapping[ecore_mz_count++] = mz;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
- mz->len, mz->phys_addr, mz->addr, socket_id);
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
}
*phys = mz->phys_addr;
ecore_mz_mapping[ecore_mz_count++] = mz;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
- mz->len, mz->phys_addr, mz->addr, core_id);
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated aligned dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
/* Sanity checks and throw warnings */
if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
+
if (!rxmode->hw_strip_crc)
DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+
if (!rxmode->hw_ip_checksum)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
uint16_t filter_type;
int rc, i;
+ PMD_INIT_FUNC_TRACE(edev);
+
filter_type = conf->filter_type | qdev->vxlan_filter_type;
/* First determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
+
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
qdev->mtu = max_rx_pkt_len;
dev->data->scattered_rx = 1;
}
}
+
if (dev->data->scattered_rx)
rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
qede_free_tx_pkt(txq);
}
-
static int qede_drain_txq(struct qede_dev *qdev,
struct qede_tx_queue *txq, bool allow_drain)
{
return 0;
}
-
/* Stops a given TX queue in the HW */
static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
pkt_len;
if (unlikely(!cur_size)) {
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
- " left for mapping jumbo", num_segs);
+ " left for mapping jumbo\n", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}