* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include "ixgbe/ixgbe_common.h"
#include "ixgbe_rxtx.h"
-#define IXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
+/* Bit Mask to indicate what bits required for building TX context */
+#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
* Return the total number of buffers freed.
*/
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *txep;
+ struct ixgbe_tx_entry *txep;
uint32_t status;
int i;
/* free buffers one at a time */
if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ txep->mbuf->next = NULL;
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
}
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
uint16_t n = 0;
}
static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, uint32_t vlan_macip_lens)
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
{
uint32_t type_tucmd_mlhl;
- uint32_t mss_l4len_idx;
+ uint32_t mss_l4len_idx = 0;
uint32_t ctx_idx;
- uint32_t cmp_mask;
+ uint32_t vlan_macip_lens;
+ union ixgbe_tx_offload tx_offload_mask;
ctx_idx = txq->ctx_curr;
- cmp_mask = 0;
+ tx_offload_mask.data = 0;
type_tucmd_mlhl = 0;
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
if (ol_flags & PKT_TX_VLAN_PKT) {
- cmp_mask |= TX_VLAN_CMP_MASK;
+ tx_offload_mask.vlan_tci |= ~0;
}
- if (ol_flags & PKT_TX_IP_CKSUM) {
- type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
- cmp_mask |= TX_MAC_LEN_CMP_MASK;
- }
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum and TCP cksum */
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
- /* Specify which HW CTX to upload. */
- mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_TCP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_SCTP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- default:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- break;
+ break;
+ }
}
txq->ctx_cache[ctx_idx].flags = ol_flags;
- txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
- vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_idx].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = tx_offload.l3_len;
+ vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
ctx_txd->seqnum_seed = 0;
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
- uint32_t vlan_macip_lens)
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+ union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
- static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
- static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
- uint32_t tmp;
-
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ uint32_t tmp = 0;
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_IXSM;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
return tmp;
}
static inline uint32_t
-tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
- static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
- return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ uint32_t cmdtype = 0;
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+ return cmdtype;
}
/* Default RS bit threshold values */
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *sw_ring = txq->sw_ring;
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq;
- struct igb_tx_entry *sw_ring;
- struct igb_tx_entry *txe, *txn;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
- union ixgbe_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
uint64_t tx_ol_req;
uint32_t ctx = 0;
uint32_t new_ctx;
+ union ixgbe_tx_offload tx_offload = { .data = 0 };
txq = tx_queue;
sw_ring = txq->sw_ring;
ixgbe_xmit_cleanup(txq);
}
+ rte_prefetch0(&txe->mbuf->pool);
+
/* TX loop */
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
pkt_len = tx_pkt->pkt_len;
- RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
-
/*
* Determine how many (if any) context descriptors
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
- vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
/* If hardware offload required */
- tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
+ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
if (tx_ol_req) {
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens.data);
+ tx_offload);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
*/
cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
- olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
#ifdef RTE_LIBRTE_IEEE1588
if (ol_flags & PKT_TX_IEEE1588_TMST)
cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
#endif
+ olinfo_status = 0;
if (tx_ol_req) {
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ }
+
/*
* Setup the TX Advanced Context Descriptor if required
*/
&txr[tx_id];
txn = &sw_ring[txe->next_id];
- RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ rte_prefetch0(&txn->mbuf->pool);
if (txe->mbuf != NULL) {
rte_pktmbuf_free_seg(txe->mbuf);
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens.data);
+ tx_offload);
txe->last_id = tx_last;
tx_id = txe->next_id;
* This path will go through
* whatever new/reuse the context descriptor
*/
- cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
}
+ olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
m_seg = tx_pkt;
do {
txd = &txr[tx_id];
txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
{
- uint16_t pkt_flags;
+ uint64_t pkt_flags;
- static uint64_t ip_pkt_types_map[16] = {
+ static const uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
};
- static uint64_t ip_rss_types_map[16] = {
+ static const uint64_t ip_rss_types_map[16] = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
};
#ifdef RTE_LIBRTE_IEEE1588
- static uint32_t ip_pkt_etqf_map[8] = {
+ static uint64_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
#endif
static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
- uint16_t pkt_flags;
+ uint64_t pkt_flags;
int s[LOOK_AHEAD], nb_dd;
int i, j, nb_rx = 0;
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx;
- uint64_t dma_addr;
+ __le64 dma_addr;
int diag, i;
/* allocate buffers in bulk directly into the S/W ring */
mb->port = rxq->port_id;
/* populate the descriptors */
- dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
rxdp[i].read.pkt_addr = dma_addr;
}
}
static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
/* Any previously recv'd pkts will be returned from the Rx stage */
}
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
union ixgbe_adv_rx_desc rxd;
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
struct rte_mbuf *rxm;
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
- uint16_t pkt_flags;
+ uint64_t pkt_flags;
nb_rx = 0;
nb_hold = 0;
first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (uint16_t)(pkt_flags |
+ pkt_flags = (pkt_flags |
rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (uint16_t)(pkt_flags |
+ pkt_flags = (pkt_flags |
rx_desc_error_to_pkt_flags(staterr));
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
first_seg->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
+ rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
first_seg->hash.fdir.id =
- rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/* Prefetch data of first segment, if configured to do so. */
}
static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
}
static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
ixgbe_tx_queue_release(txq);
}
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
.buffer_addr = 0}};
- struct igb_tx_entry *txe = txq->sw_ring;
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
.release_mbufs = ixgbe_tx_queue_release_mbufs,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
};
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(INFO, "Using simple tx code path");
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ ixgbe_txq_vec_setup(txq) == 0)) {
+ PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+ } else
+#endif
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+ PMD_INIT_LOG(INFO,
+ " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
+ (unsigned long)txq->txq_flags,
+ (unsigned long)IXGBE_SIMPLE_FLAGS);
+ PMD_INIT_LOG(INFO,
+ " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_rs_thresh,
+ (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ }
+}
+
int
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
- CACHE_LINE_SIZE, socket_id);
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
/*
* Modification to set VFTDT for virtual function if vf is detected
*/
- if (hw->mac.type == ixgbe_mac_82599_vf)
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf)
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
- CACHE_LINE_SIZE, socket_id);
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
-#ifdef RTE_IXGBE_INC_VECTOR
- if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
- ixgbe_txq_vec_setup(txq) == 0) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
- dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
- }
- else
-#endif
- dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO, " - txq_flags = %lx "
- "[IXGBE_SIMPLE_FLAGS=%lx]",
- (long unsigned)txq->txq_flags,
- (long unsigned)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
- "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
- (long unsigned)txq->tx_rs_thresh,
- (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
- dev->tx_pkt_burst = ixgbe_xmit_pkts;
- }
+ /* set up vector or scalar TX function as appropriate */
+ ixgbe_set_tx_function(dev, txq);
txq->ops->reset(txq);
}
static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
}
static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
*/
static inline int
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
#endif
{
int ret = 0;
return ret;
}
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
.pkt_addr = 0}};
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
int use_def_burst_func = 1;
uint16_t len;
}
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
- CACHE_LINE_SIZE, socket_id);
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
rxq->mb_pool = mp;
/*
* Modified to setup VFRDT for Virtual Function
*/
- if (hw->mac.type == ixgbe_mac_82599_vf) {
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf) {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
len = nb_desc;
#endif
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
- CACHE_LINE_SIZE, socket_id);
+ sizeof(struct ixgbe_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
*/
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+#ifdef RTE_IXGBE_INC_VECTOR
+ ixgbe_rxq_vec_setup(rxq);
+#endif
/* Check if pre-conditions are satisfied, and no Scattered Rx */
if (!use_def_burst_func && !dev->data->scattered_rx) {
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
#ifdef RTE_IXGBE_INC_VECTOR
- if (!ixgbe_rx_vec_condition_check(dev)) {
+ if (!ixgbe_rx_vec_condition_check(dev) &&
+ (rte_is_power_of_2(nb_desc))) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
"sure RX burst size no less than 32.");
- ixgbe_rxq_vec_setup(rxq);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
}
#endif
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(rxq);
mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
if (rss_hf & ETH_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_IPV4_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
if (rss_hf & ETH_RSS_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
if (rss_hf & ETH_RSS_IPV6_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_IPV6_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hf & ETH_RSS_IPV6_TCP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
- if (rss_hf & ETH_RSS_IPV4_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
rss_hf |= ETH_RSS_IPV4;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_IPV4_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
rss_hf |= ETH_RSS_IPV6;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
rss_hf |= ETH_RSS_IPV6_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_IPV6_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
rss_hf |= ETH_RSS_IPV6_TCP_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_IPV4_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_IPV6_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
rss_hf |= ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
break;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
break;
struct ixgbe_hw *hw;
enum rte_eth_nb_pools num_pools;
uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
int i;
PMD_INIT_FUNC_TRACE();
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+ }
+
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
}
static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
return 0;
}
+static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ ixgbe_rss_configure(dev);
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* MRQC: enable VF RSS */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ break;
+ }
+ return 0;
+}
+
static int
ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
default: ixgbe_rss_disable(dev);
}
} else {
- switch (RTE_ETH_DEV_SRIOV(dev).active) {
/*
* SRIOV active scheme
- * FIXME if support DCB/RSS together with VMDq & SRIOV
+ * Support RSS together with VMDq & SRIOV
*/
- case ETH_64_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
- break;
-
- case ETH_32_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_config_vf_rss(dev);
break;
- case ETH_16_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
- break;
+ /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB with VMDq & SRIOV");
+ return -1;
default:
- PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ ixgbe_config_vf_default(dev);
+ break;
}
}
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
uint32_t rxctrl;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
}
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ if (rte_is_power_of_2(rxq->nb_rx_desc))
+ dev->rx_pkt_burst =
+ ixgbe_recv_scattered_pkts_vec;
+ else
#endif
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
- dev->data->scattered_rx = 1;
- }
-
/*
* Device configured with multiple RX queues.
*/
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
if (dev->data->dev_conf.rxmode.hw_strip_crc)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* Enable TX CRC (checksum offload requirement) */
+ /* Enable TX CRC (checksum offload requirement) and hw padding
+ * (TSO requirement) */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 |= IXGBE_HLREG0_TXCRCEN;
+ hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
/* Setup the Base and Length of the Tx Descriptor Rings */
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
/*
* Start Transmit and Receive Units.
*/
-void
+int
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
uint32_t rxctrl;
uint16_t i;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->tx_deferred_start)
- ixgbe_dev_tx_queue_start(dev, i);
+ if (!txq->tx_deferred_start) {
+ ret = ixgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->rx_deferred_start)
- ixgbe_dev_rx_queue_start(dev, i);
+ if (!rxq->rx_deferred_start) {
+ ret = ixgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
/* Enable Receive engine */
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
+ return 0;
}
/*
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
- uint32_t srrctl;
+ uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
uint16_t i;
int ret;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be power of 2");
+ return -1;
+ }
+
+ if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be equal to or less than %d",
+ hw->mac.max_rx_queues);
+ return -1;
+ }
+
/*
* When the VF driver issues a IXGBE_VF_RESET request, the PF driver
* disables the VF receipt of packets if the PF MTU is > 1500.
* Configure Header Split
*/
if (dev->data->dev_conf.rxmode.header_split) {
-
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
-
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ if (rte_is_power_of_2(rxq->nb_rx_desc))
+ dev->rx_pkt_burst =
+ ixgbe_recv_scattered_pkts_vec;
+ else
#endif
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ if (dev->data->dev_conf.rxmode.header_split)
+ /* Must setup the PSRTYPE register */
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
#endif
- dev->data->scattered_rx = 1;
- }
+
+ /* Set RQPL for VF RSS according to max Rx queue */
+ psrtype |= (dev->data->nb_rx_queues >> 1) <<
+ IXGBE_PSRTYPE_RQPL_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
return 0;
}
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
uint16_t i;