#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-static void avp_dev_rx_queue_release(void *rxq);
-static void avp_dev_tx_queue_release(void *txq);
+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
static int avp_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
break;
}
- if ((count < 1) && (retry == 0)) {
+ if (retry == 0) {
PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
request->req_id);
ret = -ETIME;
status);
/* re-enable UIO interrupt handling */
- ret = rte_intr_ack(&pci_dev->intr_handle);
+ ret = rte_intr_ack(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
ret);
return -EINVAL;
/* enable UIO interrupt handling */
- ret = rte_intr_enable(&pci_dev->intr_handle);
+ ret = rte_intr_enable(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
ret);
RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
/* enable UIO interrupt handling */
- ret = rte_intr_disable(&pci_dev->intr_handle);
+ ret = rte_intr_disable(pci_dev->intr_handle);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
ret);
int ret;
/* register a callback handler with UIO for interrupt notifications */
- ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ ret = rte_intr_callback_register(pci_dev->intr_handle,
avp_dev_interrupt_handler,
(void *)eth_dev);
if (ret < 0) {
avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
struct avp_dev *avp)
{
- unsigned int max_rx_pkt_len;
+ unsigned int max_rx_pktlen;
- max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
- if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
- (max_rx_pkt_len > avp->host_mbuf_size)) {
+ if (max_rx_pktlen > avp->guest_mbuf_size ||
+ max_rx_pktlen > avp->host_mbuf_size) {
/*
* If the guest MTU is greater than either the host or guest
* buffers then chained mbufs have to be enabled in the TX
* direction. It is assumed that the application will not need
- * to send packets larger than their max_rx_pkt_len (MRU).
+ * to send packets larger than their MTU.
*/
return 1;
}
PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
avp->max_rx_pkt_len,
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
avp->host_mbuf_size,
avp->guest_mbuf_size);
{
struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->dst_addr) == 0)) {
/* allow all packets destined to our address */
return 0;
}
- if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_broadcast_ether_addr(ð->dst_addr))) {
/* allow all broadcast packets */
return 0;
}
- if (likely(rte_is_multicast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_multicast_ether_addr(ð->dst_addr))) {
/* allow all multicast packets */
return 0;
}
src_offset = 0;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- ol_flags = PKT_RX_VLAN;
+ ol_flags = RTE_MBUF_F_RX_VLAN;
vlan_tci = pkt_buf->vlan_tci;
} else {
ol_flags = 0;
m->port = avp->port_id;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- m->ol_flags = PKT_RX_VLAN;
+ m->ol_flags = RTE_MBUF_F_RX_VLAN;
m->vlan_tci = pkt_buf->vlan_tci;
}
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
* function; send it truncated to avoid the performance
* hit of having to manage returning the already
* allocated buffer to the free list. This should not
- * happen since the application should have set the
- * max_rx_pkt_len based on its MTU and it should be
+ * happen since the application should have not send
+ * packages larger than its MTU and it should be
* policing its own packet sizes.
*/
txq->errors++;
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}
}
static void
-avp_dev_rx_queue_release(void *rx_queue)
+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct avp_queue *rxq = (struct avp_queue *)rx_queue;
- struct avp_dev *avp = rxq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_rx_queues; i++) {
- if (data->rx_queues[i] == rxq) {
- rte_free(data->rx_queues[i]);
- data->rx_queues[i] = NULL;
- }
+ if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+ rte_free(eth_dev->data->rx_queues[rx_queue_id]);
+ eth_dev->data->rx_queues[rx_queue_id] = NULL;
}
}
}
static void
-avp_dev_tx_queue_release(void *tx_queue)
+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
- struct avp_queue *txq = (struct avp_queue *)tx_queue;
- struct avp_dev *avp = txq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_tx_queues; i++) {
- if (data->tx_queues[i] == txq) {
- rte_free(data->tx_queues[i]);
- data->tx_queues[i] = NULL;
- }
+ if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+ rte_free(eth_dev->data->tx_queues[tx_queue_id]);
+ eth_dev->data->tx_queues[tx_queue_id] = NULL;
}
}
/* Setup required number of queues */
_avp_set_queue_counts(eth_dev);
- mask = (ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ mask = (RTE_ETH_VLAN_STRIP_MASK |
+ RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_eth_link *link = ð_dev->data->dev_link;
- link->link_speed = ETH_SPEED_NUM_10G;
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_status = !!(avp->flags & AVP_F_LINKUP);
return -1;
dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
return 0;
struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
uint64_t offloads = dev_conf->rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
}
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
- if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
-RTE_LOG_REGISTER(avp_logtype_driver, pmd.net.avp.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(avp_logtype_driver, driver, NOTICE);