/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_string_fns.h>
#include "e1000_logs.h"
-#include "igb/e1000_api.h"
+#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
static inline struct rte_mbuf *
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
(uint64_t) ((char *)((mb)->pkt.data) - \
- (char *)(mb)->buf_addr))
+ (char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
enum igb_advctx_num {
IGB_CTX_0 = 0, /**< CTX0 */
IGB_CTX_1 = 1, /**< CTX1 */
- IGB_CTX_NUM = 2, /**< CTX NUM */
+ IGB_CTX_NUM = 2, /**< CTX_NUM */
};
/**
struct igb_advctx_info {
uint16_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */
+ union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
};
/**
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint32_t txd_type; /**< Device-specific TXD type */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
- uint16_t tx_tail; /**< Current value of TDT register. */
- uint16_t tx_head; /**< Index of first used TX descriptor. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_head;
+ /**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
- uint32_t ctx_curr; /**< Current used hardware descriptor. */
- uint32_t ctx_start;/**< Start context position for transmit queue. */
- struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/
+ uint32_t ctx_curr;
+ /**< Current used hardware descriptor. */
+ uint32_t ctx_start;
+ /**< Start context position for transmit queue. */
+ struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
+ /**< Hardware context history.*/
};
#if 1
txq->ctx_cache[ctx_curr].flags = ol_flags;
txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
+ vlan_macip_lens & cmp_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
{
/* If match with the current context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
/* If match with the second context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
}
uint16_t
-eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct igb_tx_queue *txq;
struct igb_tx_entry *sw_ring;
struct igb_tx_entry *txe, *txn;
volatile union e1000_adv_tx_desc *txr;
uint16_t tx_last;
uint16_t nb_tx;
uint16_t tx_ol_req;
- uint32_t new_ctx;
- uint32_t ctx;
+ uint32_t new_ctx = 0;
+ uint32_t ctx = 0;
uint32_t vlan_macip_lens;
+ txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len;
+ vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
- ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens);
+ ctx = what_advctx_update(txq, tx_ol_req,
+ vlan_macip_lens);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
ctx = txq->ctx_curr;
}
uint16_t
-eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->pkt.vlan_macip.f.vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (pkt_flags |
}
uint16_t
-eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
nb_rx = 0;
nb_hold = 0;
+ rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->pkt.vlan_macip.f.vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
+ return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IGB_ALIGN);
}
static void
igb_tx_queue_release(struct igb_tx_queue *txq)
{
- igb_tx_queue_release_mbufs(txq);
- rte_free(txq->sw_ring);
- rte_free(txq);
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
}
-int
-igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+eth_igb_tx_queue_release(void *txq)
{
- uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
- struct igb_tx_queue **txq;
-
- if (dev->data->tx_queues == NULL) {
- dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->tx_queues == NULL) {
- dev->data->nb_tx_queues = 0;
- return -ENOMEM;
- }
- } else {
- if (nb_queues < old_nb_queues)
- for (i = nb_queues; i < old_nb_queues; i++)
- igb_tx_queue_release(dev->data->tx_queues[i]);
-
- if (nb_queues != old_nb_queues) {
- txq = rte_realloc(dev->data->tx_queues,
- sizeof(struct igb_tx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (txq == NULL)
- return -ENOMEM;
- else
- dev->data->tx_queues = txq;
- if (nb_queues > old_nb_queues)
- memset(&(txq[old_nb_queues]), 0,
- sizeof(struct igb_tx_queue *) *
- (nb_queues - old_nb_queues));
- }
- }
- dev->data->nb_tx_queues = nb_queues;
-
- return 0;
+ igb_tx_queue_release(txq);
}
static void
if (tx_conf->tx_free_thresh != 0)
RTE_LOG(WARNING, PMD,
"The tx_free_thresh parameter is not "
- "used for the 1G driver.");
+ "used for the 1G driver.\n");
if (tx_conf->tx_rs_thresh != 0)
RTE_LOG(WARNING, PMD,
"The tx_rs_thresh parameter is not "
- "used for the 1G driver.");
+ "used for the 1G driver.\n");
if (tx_conf->tx_thresh.wthresh == 0)
RTE_LOG(WARNING, PMD,
"To improve 1G driver performance, consider setting "
- "the TX WTHRESH value to 4, 8, or 16.");
+ "the TX WTHRESH value to 4, 8, or 16.\n");
/* Free memory prior to re-allocation if needed */
if (dev->data->tx_queues[queue_idx] != NULL)
txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
- size = sizeof(union e1000_adv_tx_desc) * nb_desc;
-
/* Allocate software ring */
txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(struct igb_tx_entry) * nb_desc,
static void
igb_rx_queue_release(struct igb_rx_queue *rxq)
{
- igb_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_ring);
- rte_free(rxq);
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
}
-int
-igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+void
+eth_igb_rx_queue_release(void *rxq)
{
- uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
- struct igb_rx_queue **rxq;
-
- if (dev->data->rx_queues == NULL) {
- dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (dev->data->rx_queues == NULL) {
- dev->data->nb_rx_queues = 0;
- return -ENOMEM;
- }
- } else {
- for (i = nb_queues; i < old_nb_queues; i++) {
- igb_rx_queue_release(dev->data->rx_queues[i]);
- dev->data->rx_queues[i] = NULL;
- }
- if (nb_queues != old_nb_queues) {
- rxq = rte_realloc(dev->data->rx_queues,
- sizeof(struct igb_rx_queue *) * nb_queues,
- CACHE_LINE_SIZE);
- if (rxq == NULL)
- return -ENOMEM;
- else
- dev->data->rx_queues = rxq;
- if (nb_queues > old_nb_queues)
- memset(&(rxq[old_nb_queues]), 0,
- sizeof(struct igb_rx_queue *) *
- (nb_queues - old_nb_queues));
- }
- }
- dev->data->nb_rx_queues = nb_queues;
-
- return 0;
+ igb_rx_queue_release(rxq);
}
static void
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- igb_tx_queue_release_mbufs(txq);
- igb_reset_tx_queue(txq, dev);
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ igb_reset_tx_queue(txq, dev);
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- igb_rx_queue_release_mbufs(rxq);
- igb_reset_rx_queue(rxq);
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ igb_reset_rx_queue(rxq);
+ }
}
}
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
- if (ret) {
- igb_dev_clear_queues(dev);
+ if (ret)
return ret;
- }
/*
* Reset crc_len in case it was changed after queue setup by a
E1000_SRRCTL_BSIZEPKT_MASK) <<
E1000_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE
+ > buf_size){
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
dev->data->scattered_rx = 1;
}
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- /* set STRCRC bit in all queues for Powerville */
- if (hw->mac.type == e1000_i350) {
+ /* set STRCRC bit in all queues for Powerville/Springville */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(i));
dvmolr |= E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
}
}
-
} else {
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
- /* clear STRCRC bit in all queues for Powerville */
- if (hw->mac.type == e1000_i350) {
+ /* clear STRCRC bit in all queues for Powerville/Springville */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(i));
dvmolr &= ~E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
}