/*
* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
#define VTAG_SHIFT (3)
static inline void
rx_pkts[2]->ol_flags = vol.e[2];
rx_pkts[3]->ol_flags = vol.e[3];
}
-#else
-#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
-#endif
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
int pos;
- uint64_t var;
uint8x16_t shuf_msk = {
0xFF, 0xFF,
0xFF, 0xFF, /* skip 32 bits pkt_type */
uint64x2_t mbp1, mbp2;
uint8x16_t staterr;
uint16x8_t tmp;
+ uint32_t var = 0;
uint32_t stat;
/* B.1 load 1 mbuf point */
mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
- /* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
- descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
- rte_rmb();
-
/* B.2 copy 2 mbuf point into rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
/* B.1 load 1 mbuf point */
mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
- descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
- /* B.1 load 2 mbuf point */
- descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ /* A. load 4 pkts descs */
descs[0] = vld1q_u64((uint64_t *)(rxdp));
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_smp_rmb();
/* B.2 copy 2 mbuf point into rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
*(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ stat &= IXGBE_VPMD_DESC_DD_MASK;
+
/* C.4 calc avaialbe number of desc */
- var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
- nb_pkts_recd += var;
- if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ if (likely(stat != IXGBE_VPMD_DESC_DD_MASK)) {
+ while (stat & 0x01) {
+ ++var;
+ stat = stat >> 8;
+ }
+ nb_pkts_recd += var;
break;
+ } else {
+ nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
+ }
}
/* Update our internal tail pointer */
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ /* no csum error report support */
+ if (rxmode->hw_ip_checksum == 1)
+ return -1;
+
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
}