* Return the total number of buffers freed.
*/
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *txep;
+ struct ixgbe_tx_entry *txep;
uint32_t status;
int i;
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
uint16_t n = 0;
}
static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
{
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *sw_ring = txq->sw_ring;
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq;
- struct igb_tx_entry *sw_ring;
- struct igb_tx_entry *txe, *txn;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
#endif
static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx;
uint64_t dma_addr;
}
static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
/* Any previously recv'd pkts will be returned from the Rx stage */
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
union ixgbe_adv_rx_desc rxd;
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
struct rte_mbuf *rxm;
}
static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
}
static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
ixgbe_tx_queue_release(txq);
}
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
.buffer_addr = 0}};
- struct igb_tx_entry *txe = txq->sw_ring;
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
* in dev_init by secondary process when attaching to an existing ethdev.
*/
void
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
}
static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
}
static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
*/
static inline int
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
#endif
{
int ret = 0;
return ret;
}
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
.pkt_addr = 0}};
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
int use_def_burst_func = 1;
uint16_t len;
}
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
len = nb_desc;
#endif
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
+ sizeof(struct ixgbe_rx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(rxq);
}
static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
uint32_t rxctrl;
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
uint32_t rxctrl;
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct rte_pktmbuf_pool_private *mbp_priv;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
uint16_t i;
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
-struct igb_rx_entry {
+struct ixgbe_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};
/**
* Structure associated with each descriptor of the TX ring of a TX queue.
*/
-struct igb_tx_entry {
+struct ixgbe_tx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
uint16_t next_id; /**< Index of next descriptor in ring. */
uint16_t last_id; /**< Index of last scattered descriptor. */
/**
* Structure associated with each descriptor of the TX ring of a TX queue.
*/
-struct igb_tx_entry_v {
+struct ixgbe_tx_entry_v {
struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
};
/**
* Structure associated with each RX queue.
*/
-struct igb_rx_queue {
+struct ixgbe_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
- struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint64_t mbuf_initializer; /**< value to init mbufs */
/**
* Structure associated with each TX queue.
*/
-struct igb_tx_queue {
+struct ixgbe_tx_queue {
/** TX ring virtual address. */
volatile union ixgbe_adv_tx_desc *tx_ring;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
- struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ struct ixgbe_tx_entry *sw_ring; /**< virtual address of SW ring. */
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< current value of TDT reg. */
};
struct ixgbe_txq_ops {
- void (*release_mbufs)(struct igb_tx_queue *txq);
- void (*free_swring)(struct igb_tx_queue *txq);
- void (*reset)(struct igb_tx_queue *txq);
+ void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+ void (*free_swring)(struct ixgbe_tx_queue *txq);
+ void (*reset)(struct ixgbe_tx_queue *txq);
};
/*
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
-int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
#endif
#endif
static inline void
-ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
{
int i;
uint16_t rx_id;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
struct rte_mbuf *mb0, *mb1;
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
RTE_PKTMBUF_HEADROOM);
* - don't support ol_flags for rss and csum err
*/
static inline uint16_t
-_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
}
static inline uint16_t
-reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
{
struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
/* get some new buffers */
}
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry_v *txep;
+ struct ixgbe_tx_entry_v *txep;
uint32_t status;
uint32_t n;
uint32_t i;
* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
- txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
+ txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
(n - 1)];
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
}
static inline void __attribute__((always_inline))
-tx_backlog_entry(struct igb_tx_entry_v *txep,
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
- struct igb_tx_entry_v *txep;
+ struct ixgbe_tx_entry_v *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = DCMD_DTYP_FLAGS;
uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
- txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
+ txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
/* avoid reach the end of ring */
txdp = &(txq->tx_ring[tx_id]);
- txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
+ txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
}
static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
- struct igb_tx_entry_v *txe;
+ struct ixgbe_tx_entry_v *txe;
uint16_t nb_free, max_desc;
if (txq->sw_ring != NULL) {
for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
nb_free < max_desc && i != txq->tx_tail;
i = (i + 1) & max_desc) {
- txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+ txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
}
/* reset tx_entry */
for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+ txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
txe->mbuf = NULL;
}
}
}
static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq == NULL)
return;
if (txq->sw_ring != NULL) {
- rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
+ rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
txq->sw_ring = NULL;
}
}
static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
.buffer_addr = 0} };
- struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
+ struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
uint16_t i;
/* Zero out HW ring memory */
};
int
-ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
return 0;
}
-int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
if (txq->sw_ring == NULL)
return -1;
/* leave the first one for overflow */
- txq->sw_ring = (struct igb_tx_entry *)
- ((struct igb_tx_entry_v *)txq->sw_ring + 1);
+ txq->sw_ring = (struct ixgbe_tx_entry *)
+ ((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
txq->ops = &vec_txq_ops;
return 0;