static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
/*
- * * Define VF Stats MACRO for Non "cleared on read" register
- * */
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
#define UPDATE_VF_STAT(reg, last, cur) \
{ \
u32 latest = IXGBE_READ_REG(hw, reg); \
*/
static struct rte_pci_id pci_id_ixgbe_map[] = {
-#undef RTE_LIBRTE_IGB_PMD
-#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, /* sentinel */ },
* The set of PCI devices this driver supports (for 82599 VF)
*/
static struct rte_pci_id pci_id_ixgbevf_map[] = {
-{
- .vendor_id = PCI_VENDOR_ID_INTEL,
- .device_id = IXGBE_DEV_ID_82599_VF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID,
-},
+
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, /* sentinel */ },
+
};
static struct eth_dev_ops ixgbe_eth_dev_ops = {
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.stats_reset = ixgbevf_dev_stats_reset,
- .dev_close = ixgbevf_dev_stop,
-
+ .dev_close = ixgbevf_dev_close,
.dev_infos_get = ixgbe_dev_info_get,
.vlan_filter_set = ixgbevf_vlan_filter_set,
.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
default:
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return (diag);
+ return (-EIO);
}
PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
+ int mask = 0;
PMD_INIT_FUNC_TRACE();
err = ixgbe_dev_rx_init(dev);
if (err) {
PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
- return err;
+ goto error;
}
ixgbe_dev_rxtx_start(dev);
default:
PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
dev->data->dev_conf.link_speed, dev->data->port_id);
- return -EINVAL;
+ goto error;
}
err = ixgbe_setup_link(hw, speed, negotiate, link_up);
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
+ PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+ dev->data->port_id);
/*
* VF has no ability to enable/disable HW CRC
static int
ixgbevf_dev_start(struct rte_eth_dev *dev)
{
- int err = 0;
+ int err, mask = 0;
+
PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
ixgbevf_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
err = ixgbevf_dev_rx_init(dev);
- if(err){
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
ixgbe_dev_clear_queues(dev);
- PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
return err;
}
PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
- ixgbe_reset_hw(hw);
- hw->adapter_stopped = 0;
+ hw->adapter_stopped = TRUE;
ixgbe_stop_adapter(hw);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ ixgbevf_set_vfta_all(dev,0);
+
+ ixgbe_dev_clear_queues(dev);
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+
+ ixgbe_reset_hw(hw);
+
+ ixgbevf_dev_stop(dev);
+
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
/**
* Structure to check if new context need be built
*/
+
struct ixgbe_advctx_info {
uint16_t flags; /**< ol_flags for context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- uint32_t vlan_macip_lens; /**< vlan, mac ip length. */
+ union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
};
/**
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_idx].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
+ vlan_macip_lens & cmp_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
return txq->ctx_curr;
}
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_tci << 16 |
- tx_pkt->pkt.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT |
- tx_pkt->pkt.l3_len;
+ vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
/* If hardware offload required */
tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
- ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens);
+ ctx = what_advctx_update(txq, tx_ol_req,
+ vlan_macip_lens);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->pkt.vlan_macip.f.vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_tci =
+ first_seg->pkt.vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
RTE_LOG(ERR, PMD,
"tx_rs_thresh must be less than the "
"number of TX descriptors minus 2. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
"tx_rs_thresh must be less than the "
"tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
- "(tx_free_thresh=%u port=%d queue=%d)",
+ "(tx_free_thresh=%u port=%d queue=%d)\n",
tx_free_thresh, dev->data->port_id, queue_idx);
return -(EINVAL);
}
*/
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
RTE_LOG(ERR, PMD,
- "TX WTHRESH should be set to 0 if "
+ "TX WTHRESH must be set to 0 if "
"tx_rs_thresh is greater than 1. "
- "TX WTHRESH will be set to 0. "
- "(tx_rs_thresh=%u port=%d queue=%d)",
+ "(tx_rs_thresh=%u port=%d queue=%d)\n",
tx_rs_thresh,
dev->data->port_id, queue_idx);
return -(EINVAL);
static void
ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
{
+ if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
rte_free(rxq);
+ }
}
void
return ret;
}
-/* (Re)set dynamic igb_rx_queue fields to defaults */
+/* Reset dynamic igb_rx_queue fields back to defaults */
static void
ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
{
unsigned i;
+ uint16_t len;
/*
* By default, the Rx queue setup function allocates enough memory for
const struct rte_memzone *rz;
struct igb_rx_queue *rxq;
struct ixgbe_hw *hw;
+ int use_def_burst_func = 1;
+ uint16_t len;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
/*
- * Allocate TX ring hardware descriptors. A memzone large enough to
+ * Allocate RX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
len = nb_desc;
#endif
rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * nb_desc,
+ sizeof(struct igb_rx_entry) * len,
CACHE_LINE_SIZE);
if (rxq->sw_ring == NULL) {
ixgbe_rx_queue_release(rxq);
{
unsigned i;
+ PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
ixgbe_tx_queue_release_mbufs(txq);
ixgbe_reset_tx_queue(txq);
}
+ }
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(rxq);
}
+ }
}
/*********************************************************************
(unsigned) rxq->queue_id);
return (-ENOMEM);
}
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->type = RTE_MBUF_PKT;
+ mbuf->pkt.next = NULL;
+ mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->pkt.nb_segs = 1;
+ mbuf->pkt.in_port = rxq->port_id;
+
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ IXGBE_RX_BUF_THRESHOLD > buf_size){
dev->data->scattered_rx = 1;
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i),
txctrl);
break;
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i),
txctrl);
break;
/* Allocate buffers for descriptor rings */
ret = ixgbe_alloc_rx_queue_mbufs(rxq);
- if (ret){
- return -1;
- }
+ if (ret)
+ return ret;
+
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
/*
* Configure the RX buffer size in the BSIZEPACKET field of
* the SRRCTL register of the queue.
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
+
return 0;
}
*/
txctrl = IXGBE_READ_REG(hw,
IXGBE_VFDCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
txctrl);
}