#define AVP_MAX_RX_BURST 64
#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
-#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
/*
struct avp_dev {
uint32_t magic; /**< Memory validation marker */
uint64_t device_id; /**< Unique system identifier */
- struct ether_addr ethaddr; /**< Host specified MAC address */
+ struct rte_ether_addr ethaddr; /**< Host specified MAC address */
struct rte_eth_dev_data *dev_data;
/**< Back pointer to ethernet device data */
volatile uint32_t flags; /**< Device operational flags */
avp->host_features = host_info->features;
rte_spinlock_init(&avp->lock);
memcpy(&avp->ethaddr.addr_bytes[0],
- host_info->ethaddr, ETHER_ADDR_LEN);
+ host_info->ethaddr, RTE_ETHER_ADDR_LEN);
/* adjust max values to not exceed our max */
avp->max_tx_queues =
RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
eth_dev->dev_ops = &avp_eth_dev_ops;
eth_dev->rx_pkt_burst = &avp_recv_pkts;
eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+ /* Let rte_eth_dev_close() release the port resources */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/*
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
/* Get a mac from device config */
- ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
return 0;
}
static int
eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
{
- int ret;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
if (eth_dev->data == NULL)
return 0;
- ret = avp_dev_disable_interrupts(eth_dev);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
- return ret;
- }
-
- if (eth_dev->data->mac_addrs != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- }
+ avp_dev_close(eth_dev);
return 0;
}
}
static inline int
-_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+_avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b)
{
uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
static inline int
_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
{
- struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
/* allow all packets destined to our address */
return 0;
}
- if (likely(is_broadcast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) {
/* allow all broadcast packets */
return 0;
}
- if (likely(is_multicast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_multicast_ether_addr(ð->d_addr))) {
/* allow all multicast packets */
return 0;
}
unsigned int i;
for (i = 0; i < avp->num_rx_queues; i++) {
- if (data->rx_queues[i] == rxq)
+ if (data->rx_queues[i] == rxq) {
+ rte_free(data->rx_queues[i]);
+ data->rx_queues[i] = NULL;
+ }
+ }
+}
+
+static void
+avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ if (data->rx_queues[i]) {
+ rte_free(data->rx_queues[i]);
data->rx_queues[i] = NULL;
+ }
}
}
unsigned int i;
for (i = 0; i < avp->num_tx_queues; i++) {
- if (data->tx_queues[i] == txq)
+ if (data->tx_queues[i] == txq) {
+ rte_free(data->tx_queues[i]);
data->tx_queues[i] = NULL;
+ }
+ }
+}
+
+static void
+avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ if (data->tx_queues[i]) {
+ rte_free(data->tx_queues[i]);
+ data->tx_queues[i] = NULL;
+ }
}
}
/* continue */
}
+ /* release dynamic storage for rx/tx queues */
+ avp_dev_rx_queue_release_all(eth_dev);
+ avp_dev_tx_queue_release_all(eth_dev);
+
unlock:
rte_spinlock_unlock(&avp->lock);
}
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
}
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
stats->q_opackets[i] += txq->packets;
stats->q_obytes[i] += txq->bytes;
- stats->q_errors[i] += txq->errors;
}
}