rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_rx_ring) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for sw_rx_ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+ " socket %u\n", socket_id);
rte_free(rxq);
return -ENOMEM;
}
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for rxbd ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for RX BD ring"
+ " on socket %u\n", socket_id);
rte_free(rxq->sw_rx_ring);
rte_free(rxq);
return -ENOMEM;
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for cqe ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+ " on socket %u\n", socket_id);
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
rte_free(rxq->sw_rx_ring);
rte_free(rxq);
uint16_t sb_id)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
return rc;
}
if (fp->sb_info) {
OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
fp->sb_info->sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
rte_free(fp->sb_info);
fp->sb_info = NULL;
}
ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
/* Prepare ramrod */
memset(¶ms, 0, sizeof(params));
- params.queue_id = rx_queue_id;
+ params.queue_id = rx_queue_id / edev->num_hwfns;
params.vport_id = 0;
- params.sb = fp->sb_info->igu_sb_id;
+ params.stats_id = params.vport_id;
+ params.p_sb = fp->sb_info;
DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
fp->rxq->queue_id, fp->sb_info->igu_sb_id);
params.sb_idx = RX_PI;
txq = eth_dev->data->tx_queues[tx_queue_id];
fp = &qdev->fp_array[tx_queue_id];
memset(¶ms, 0, sizeof(params));
- params.queue_id = tx_queue_id;
+ params.queue_id = tx_queue_id / edev->num_hwfns;
params.vport_id = 0;
- params.sb = fp->sb_info->igu_sb_id;
+ params.stats_id = params.vport_id;
+ params.p_sb = fp->sb_info;
DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
fp->txq->queue_id, fp->sb_info->igu_sb_id);
params.sb_idx = TX_PI(0); /* tc = 0 */
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
uint8_t id;
- int rc;
+ int rc = -1;
for_each_rss(id) {
rc = qede_rx_queue_start(eth_dev, id);
inner_l4_hdr_offset = (mbuf->l2_len -
MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
- /* TODO: There's no DPDK flag to request outer
- * L4 checksum offload, so we don't do it.
- * bd1_bd_flags_bf |=
- * ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
- * ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
- */
/* Inner L2 size and address type */
bd2_bf1 |= (inner_l2_hdr_size &
ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
}
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM)
+ if (tx_ol_flags & PKT_TX_IP_CKSUM) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
/* L4 checksum offload (tcp or udp) */
if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
(tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
}
/* Fill the entry in the SW ring and the BDs in the FW ring */