(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + \
- (uint64_t)((char *)((mb)->data) - \
- (char *)(mb)->buf_addr)))
+ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
static const struct rte_memzone *
i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
uint16_t nb_pkts);
/* Translate the rx descriptor status to pkt flags */
-static inline uint16_t
+static inline uint64_t
i40e_rxd_status_to_pkt_flags(uint64_t qword)
{
- uint16_t flags;
+ uint64_t flags;
/* Check if VLAN packet */
- flags = (uint16_t)(qword & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
- PKT_RX_VLAN_PKT : 0);
+ flags = qword & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
+ PKT_RX_VLAN_PKT : 0;
/* Check if RSS_HASH */
- flags |= (uint16_t)((((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ flags |= (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
- I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0);
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
return flags;
}
-static inline uint16_t
+static inline uint64_t
i40e_rxd_error_to_pkt_flags(uint64_t qword)
{
- uint16_t flags = 0;
+ uint64_t flags = 0;
uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
#define I40E_RX_ERR_BITS 0x3f
}
/* Translate pkt types to pkt flags */
-static inline uint16_t
+static inline uint64_t
i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
{
uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
- static const uint16_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
+ static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
0, /* PTYPE 0 */
0, /* PTYPE 1 */
0, /* PTYPE 2 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 59 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 60 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 61 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
0, /* PTYPE 62 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 63 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 64 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 65 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 66 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 67 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 68 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
0, /* PTYPE 69 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 70 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 71 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 72 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 73 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 74 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 75 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 76 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
0, /* PTYPE 77 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 78 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 79 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 80 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 81 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 82 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 83 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
0, /* PTYPE 84 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 85 */
- PKT_RX_IPV4_HDR_EXT, /* PTYPE 86 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
PKT_RX_IPV6_HDR, /* PTYPE 88 */
PKT_RX_IPV6_HDR, /* PTYPE 89 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 125 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 126 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 127 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
0, /* PTYPE 128 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 129 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 130 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 131 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 132 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 133 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 134 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
0, /* PTYPE 135 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 136 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 137 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 138 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 139 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 140 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 141 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 142 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
0, /* PTYPE 143 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 144 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 145 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 146 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 147 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 148 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 149 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
0, /* PTYPE 150 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 151 */
- PKT_RX_IPV6_HDR_EXT, /* PTYPE 152 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
0, /* PTYPE 154 */
0, /* PTYPE 155 */
uint8_t l3_len)
{
if (!l2_len) {
- PMD_DRV_LOG(DEBUG, "L2 length set to 0\n");
+ PMD_DRV_LOG(DEBUG, "L2 length set to 0");
return;
}
*td_offset |= (l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (!l3_len) {
- PMD_DRV_LOG(DEBUG, "L3 length set to 0\n");
+ PMD_DRV_LOG(DEBUG, "L3 length set to 0");
return;
}
int ret = 0;
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST))
+ if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
ret = -EINVAL;
- else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc))
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
ret = -EINVAL;
- else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)
+ } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST)))
+ } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
+ RTE_PMD_I40E_RX_MAX_BURST))) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "I40E_MAX_RING_DESC=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->nb_rx_desc, I40E_MAX_RING_DESC,
+ RTE_PMD_I40E_RX_MAX_BURST);
ret = -EINVAL;
+ }
#else
ret = -EINVAL;
#endif
uint32_t rx_status;
int32_t s[I40E_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
- uint16_t pkt_flags;
+ uint64_t pkt_flags;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rx_status &
+ mb->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(\
rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
mb->ol_flags = pkt_flags;
+
+ mb->packet_type = (uint16_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp->wb.qword0.hi_dword.rss);
diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
rxq->rx_free_thresh);
if (unlikely(diag != 0)) {
- PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk\n");
+ PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
return -ENOMEM;
}
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
- mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
uint16_t i, j;
PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u\n",
- rxq->port_id, rxq->queue_id);
+ "port_id=%u, queue_id=%u",
+ rxq->port_id, rxq->queue_id);
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
uint16_t rx_packet_len;
uint16_t rx_id, nb_hold;
uint64_t dma_addr;
- uint16_t pkt_flags;
+ uint64_t pkt_flags;
nb_rx = 0;
nb_hold = 0;
rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_prefetch0(rxm->data);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = rx_packet_len;
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
rxm->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
struct rte_mbuf *last_seg = rxq->pkt_last_seg;
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
- uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len, pkt_flags;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
+ uint64_t pkt_flags;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
rxm->data_len = rx_packet_len;
- rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
/**
* If this is the first buffer of the received packet, set the
}
first_seg->port = rxq->port_id;
- first_seg->vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ first_seg->packet_type = (uint16_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
first_seg->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
/* Prefetch data of first segment, if configured to do so. */
- rte_prefetch0(first_seg->data);
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
}
/* Check if the context descriptor is needed for TX offloading */
static inline uint16_t
-i40e_calc_context_desc(uint16_t flags)
+i40e_calc_context_desc(uint64_t flags)
{
uint16_t mask = 0;
uint32_t td_offset;
uint32_t tx_flags;
uint32_t td_tag;
- uint16_t ol_flags;
+ uint64_t ol_flags;
uint8_t l2_len;
uint8_t l3_len;
uint16_t nb_used;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->vlan_macip.f.l2_len;
- l3_len = tx_pkt->vlan_macip.f.l3_len;
+ l2_len = tx_pkt->l2_len;
+ l3_len = tx_pkt->l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ tx_flags |= tx_pkt->vlan_tci <<
+ I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
err = i40e_alloc_rx_queue_mbufs(rxq);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
return err;
}
err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
- rx_queue_id);
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
- rx_queue_id);
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
return err;
}
i40e_rx_queue_release_mbufs(rxq);
if (tx_queue_id < dev->data->nb_tx_queues) {
err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
- tx_queue_id);
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
}
return err;
err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
- tx_queue_id);
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
return err;
}
if (!vsi || queue_idx >= vsi->nb_qps) {
PMD_DRV_LOG(ERR, "VSI not available or queue "
- "index exceeds the maximum\n");
+ "index exceeds the maximum");
return I40E_ERR_PARAM;
}
if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
(nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
- "invalid\n", nb_desc);
+ "invalid", nb_desc);
return I40E_ERR_PARAM;
}
socket_id);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "rx queue data structure\n");
+ "rx queue data structure");
return (-ENOMEM);
}
rxq->mp = mp;
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
- rxq->start_rx_per_q = rx_conf->start_rx_per_q;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
- PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX\n");
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
return (-ENOMEM);
}
socket_id);
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
- PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
return (-ENOMEM);
}
if (!use_def_burst_func && !dev->data->scattered_rx) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.\n",
- rxq->port_id, rxq->queue_id);
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.\n",
- rxq->port_id, rxq->queue_id);
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
}
return 0;
struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
if (!q) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL\n");
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
return;
}
uint16_t desc = 0;
if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u\n", rx_queue_id);
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
return 0;
}
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u\n", offset);
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
return 0;
}
if (!vsi || queue_idx >= vsi->nb_qps) {
PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
- "exceeds the maximum\n", queue_idx);
+ "exceeds the maximum", queue_idx);
return I40E_ERR_PARAM;
}
(nb_desc > I40E_MAX_RING_DESC) ||
(nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
- "invalid\n", nb_desc);
+ "invalid", nb_desc);
return I40E_ERR_PARAM;
}
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
- RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
- "number of TX descriptors minus 2. "
- "(tx_rs_thresh=%u port=%d queue=%d)\n",
- (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id,
- (int)queue_idx);
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "number of TX descriptors minus 2. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
return I40E_ERR_PARAM;
}
if (tx_free_thresh >= (nb_desc - 3)) {
- RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
- "tx_free_thresh must be less than the "
- "number of TX descriptors minus 3. "
- "(tx_free_thresh=%u port=%d queue=%d)\n",
- (unsigned int)tx_free_thresh,
- (int)dev->data->port_id,
- (int)queue_idx);
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
return I40E_ERR_PARAM;
}
if (tx_rs_thresh > tx_free_thresh) {
- RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or "
- "equal to tx_free_thresh. (tx_free_thresh=%u"
- " tx_rs_thresh=%u port=%d queue=%d)\n",
- (unsigned int)tx_free_thresh,
- (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id,
- (int)queue_idx);
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+ "equal to tx_free_thresh. (tx_free_thresh=%u"
+ " tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
return I40E_ERR_PARAM;
}
if ((nb_desc % tx_rs_thresh) != 0) {
- RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
- "number of TX descriptors. (tx_rs_thresh=%u"
- " port=%d queue=%d)\n",
- (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id,
- (int)queue_idx);
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u"
+ " port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
return I40E_ERR_PARAM;
}
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
- RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
- "tx_rs_thresh is greater than 1. "
- "(tx_rs_thresh=%u port=%d queue=%d)\n",
- (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id,
- (int)queue_idx);
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
return I40E_ERR_PARAM;
}
socket_id);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "tx queue structure\n");
+ "tx queue structure");
return (-ENOMEM);
}
socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
- PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX\n");
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
return (-ENOMEM);
}
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;
- txq->start_tx_per_q = tx_conf->start_tx_per_q;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
socket_id);
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
- PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
return (-ENOMEM);
}
/* Use a simple TX queue without offloads or multi segs if possible */
if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
(txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path\n");
+ PMD_INIT_LOG(INFO, "Using simple tx path");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path\n");
+ PMD_INIT_LOG(INFO, "Using full-featured tx path");
dev->tx_pkt_burst = i40e_xmit_pkts;
}
struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
if (!q) {
- PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL\n");
+ PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
return;
}
uint16_t i;
if (!rxq || !rxq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL\n");
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
}
uint16_t i;
if (!txq || !txq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL\n");
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
}
uint16_t i, prev, size;
if (!txq) {
- PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL\n");
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
return;
}
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
- txd[i].cmd_type_offset_bsz =
+ txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf = NULL;
txe[i].last_id = i;
err = i40e_clear_lan_tx_queue_context(hw, pf_q);
if (err != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context\n");
+ PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
return err;
}
err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
if (err != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failure of set lan tx queue context\n");
+ PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
return err;
}
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
if (unlikely(!mbuf)) {
- PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
return -ENOMEM;
}
rte_mbuf_refcnt_set(mbuf, 1);
mbuf->next = NULL;
- mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled\n",
- (uint32_t)ETHER_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (rxq->max_pkt_len < ETHER_MIN_LEN ||
rxq->max_pkt_len > ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled\n",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
err = i40e_rx_queue_config(rxq);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Failed to config RX queue\n");
+ PMD_DRV_LOG(ERR, "Failed to config RX queue");
return err;
}
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
if (err != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context\n");
+ PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
return err;
}
err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
if (err != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context\n");
+ PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
return err;
}