1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 int enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 return vnic_dev_del_addr(enic->vdev, mac_addr);
173 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
177 if (!is_eth_addr_valid(mac_addr)) {
178 dev_err(enic, "invalid mac address\n");
182 err = vnic_dev_add_addr(enic->vdev, mac_addr);
184 dev_err(enic, "add mac addr failed\n");
189 enic_free_rq_buf(struct rte_mbuf **mbuf)
194 rte_pktmbuf_free(*mbuf);
198 void enic_init_vnic_resources(struct enic *enic)
200 unsigned int error_interrupt_enable = 1;
201 unsigned int error_interrupt_offset = 0;
202 unsigned int rxq_interrupt_enable = 0;
203 unsigned int rxq_interrupt_offset;
204 unsigned int index = 0;
206 struct vnic_rq *data_rq;
208 if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
209 rxq_interrupt_enable = 1;
210 rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
212 for (index = 0; index < enic->rq_count; index++) {
213 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
215 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
217 error_interrupt_enable,
218 error_interrupt_offset);
220 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
222 vnic_rq_init(data_rq,
224 error_interrupt_enable,
225 error_interrupt_offset);
227 vnic_cq_init(&enic->cq[cq_idx],
228 0 /* flow_control_enable */,
229 1 /* color_enable */,
232 1 /* cq_tail_color */,
233 rxq_interrupt_enable,
234 1 /* cq_entry_enable */,
235 0 /* cq_message_enable */,
236 rxq_interrupt_offset,
237 0 /* cq_message_addr */);
238 if (rxq_interrupt_enable)
239 rxq_interrupt_offset++;
242 for (index = 0; index < enic->wq_count; index++) {
243 vnic_wq_init(&enic->wq[index],
244 enic_cq_wq(enic, index),
245 error_interrupt_enable,
246 error_interrupt_offset);
248 cq_idx = enic_cq_wq(enic, index);
249 vnic_cq_init(&enic->cq[cq_idx],
250 0 /* flow_control_enable */,
251 1 /* color_enable */,
254 1 /* cq_tail_color */,
255 0 /* interrupt_enable */,
256 0 /* cq_entry_enable */,
257 1 /* cq_message_enable */,
258 0 /* interrupt offset */,
259 (u64)enic->wq[index].cqmsg_rz->iova);
262 for (index = 0; index < enic->intr_count; index++) {
263 vnic_intr_init(&enic->intr[index],
264 enic->config.intr_timer_usec,
265 enic->config.intr_timer_type,
266 /*mask_on_assertion*/1);
272 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
275 struct rq_enet_desc *rqd = rq->ring.descs;
278 uint32_t max_rx_pkt_len;
284 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
285 rq->ring.desc_count);
288 * If *not* using scatter and the mbuf size is smaller than the
289 * requested max packet size (max_rx_pkt_len), then reduce the
290 * posted buffer size to max_rx_pkt_len. HW still receives packets
291 * larger than max_rx_pkt_len, but they will be truncated, which we
292 * drop in the rx handler. Not ideal, but better than returning
293 * large packets when the user is not expecting them.
295 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
296 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
297 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
298 rq_buf_len = max_rx_pkt_len;
299 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
300 mb = rte_mbuf_raw_alloc(rq->mp);
302 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
303 (unsigned)rq->index);
307 mb->data_off = RTE_PKTMBUF_HEADROOM;
308 dma_addr = (dma_addr_t)(mb->buf_iova
309 + RTE_PKTMBUF_HEADROOM);
310 rq_enet_desc_enc(rqd, dma_addr,
311 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
312 : RQ_ENET_TYPE_NOT_SOP),
314 rq->mbuf_ring[i] = mb;
317 /* make sure all prior writes are complete before doing the PIO write */
320 /* Post all but the last buffer to VIC. */
321 rq->posted_index = rq->ring.desc_count - 1;
325 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
326 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
327 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
328 iowrite32(0, &rq->ctrl->fetch_index);
336 enic_alloc_consistent(void *priv, size_t size,
337 dma_addr_t *dma_handle, u8 *name)
340 const struct rte_memzone *rz;
342 struct enic *enic = (struct enic *)priv;
343 struct enic_memzone_entry *mze;
345 rz = rte_memzone_reserve_aligned((const char *)name, size,
346 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
348 pr_err("%s : Failed to allocate memory requested for %s\n",
354 *dma_handle = (dma_addr_t)rz->iova;
356 mze = rte_malloc("enic memzone entry",
357 sizeof(struct enic_memzone_entry), 0);
360 pr_err("%s : Failed to allocate memory for memzone list\n",
362 rte_memzone_free(rz);
368 rte_spinlock_lock(&enic->memzone_list_lock);
369 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
370 rte_spinlock_unlock(&enic->memzone_list_lock);
376 enic_free_consistent(void *priv,
377 __rte_unused size_t size,
379 dma_addr_t dma_handle)
381 struct enic_memzone_entry *mze;
382 struct enic *enic = (struct enic *)priv;
384 rte_spinlock_lock(&enic->memzone_list_lock);
385 LIST_FOREACH(mze, &enic->memzone_list, entries) {
386 if (mze->rz->addr == vaddr &&
387 mze->rz->iova == dma_handle)
391 rte_spinlock_unlock(&enic->memzone_list_lock);
393 "Tried to free memory, but couldn't find it in the memzone list\n");
396 LIST_REMOVE(mze, entries);
397 rte_spinlock_unlock(&enic->memzone_list_lock);
398 rte_memzone_free(mze->rz);
402 int enic_link_update(struct enic *enic)
404 struct rte_eth_dev *eth_dev = enic->rte_dev;
405 struct rte_eth_link link;
407 memset(&link, 0, sizeof(link));
408 link.link_status = enic_get_link_status(enic);
409 link.link_duplex = ETH_LINK_FULL_DUPLEX;
410 link.link_speed = vnic_dev_port_speed(enic->vdev);
412 return rte_eth_linkstatus_set(eth_dev, &link);
416 enic_intr_handler(void *arg)
418 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
419 struct enic *enic = pmd_priv(dev);
421 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
423 enic_link_update(enic);
424 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
425 enic_log_q_error(enic);
428 static int enic_rxq_intr_init(struct enic *enic)
430 struct rte_intr_handle *intr_handle;
431 uint32_t rxq_intr_count, i;
434 intr_handle = enic->rte_dev->intr_handle;
435 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
438 * Rx queue interrupts only work when we have MSI-X interrupts,
439 * one per queue. Sharing one interrupt is technically
440 * possible with VIC, but it is not worth the complications it brings.
442 if (!rte_intr_cap_multiple(intr_handle)) {
443 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
444 " (vfio-pci driver)\n");
447 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
448 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
450 dev_err(enic, "Failed to enable event fds for Rx queue"
454 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
455 rxq_intr_count * sizeof(int), 0);
456 if (intr_handle->intr_vec == NULL) {
457 dev_err(enic, "Failed to allocate intr_vec\n");
460 for (i = 0; i < rxq_intr_count; i++)
461 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
465 static void enic_rxq_intr_deinit(struct enic *enic)
467 struct rte_intr_handle *intr_handle;
469 intr_handle = enic->rte_dev->intr_handle;
470 rte_intr_efd_disable(intr_handle);
471 if (intr_handle->intr_vec != NULL) {
472 rte_free(intr_handle->intr_vec);
473 intr_handle->intr_vec = NULL;
477 int enic_enable(struct enic *enic)
481 struct rte_eth_dev *eth_dev = enic->rte_dev;
483 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
484 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
486 /* vnic notification of link status has already been turned on in
487 * enic_dev_init() which is called during probe time. Here we are
488 * just turning on interrupt vector 0 if needed.
490 if (eth_dev->data->dev_conf.intr_conf.lsc)
491 vnic_dev_notify_set(enic->vdev, 0);
493 err = enic_rxq_intr_init(enic);
496 if (enic_clsf_init(enic))
497 dev_warning(enic, "Init of hash table for clsf failed."\
498 "Flow director feature will not work\n");
500 for (index = 0; index < enic->rq_count; index++) {
501 err = enic_alloc_rx_queue_mbufs(enic,
502 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
504 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
507 err = enic_alloc_rx_queue_mbufs(enic,
508 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
510 /* release the allocated mbufs for the sop rq*/
511 enic_rxmbuf_queue_release(enic,
512 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
514 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
519 for (index = 0; index < enic->wq_count; index++)
520 enic_start_wq(enic, index);
521 for (index = 0; index < enic->rq_count; index++)
522 enic_start_rq(enic, index);
524 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
526 vnic_dev_enable_wait(enic->vdev);
528 /* Register and enable error interrupt */
529 rte_intr_callback_register(&(enic->pdev->intr_handle),
530 enic_intr_handler, (void *)enic->rte_dev);
532 rte_intr_enable(&(enic->pdev->intr_handle));
533 /* Unmask LSC interrupt */
534 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
539 int enic_alloc_intr_resources(struct enic *enic)
544 dev_info(enic, "vNIC resources used: "\
545 "wq %d rq %d cq %d intr %d\n",
546 enic->wq_count, enic_vnic_rq_count(enic),
547 enic->cq_count, enic->intr_count);
549 for (i = 0; i < enic->intr_count; i++) {
550 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
552 enic_free_vnic_resources(enic);
559 void enic_free_rq(void *rxq)
561 struct vnic_rq *rq_sop, *rq_data;
567 rq_sop = (struct vnic_rq *)rxq;
568 enic = vnic_dev_priv(rq_sop->vdev);
569 rq_data = &enic->rq[rq_sop->data_queue_idx];
571 enic_rxmbuf_queue_release(enic, rq_sop);
573 enic_rxmbuf_queue_release(enic, rq_data);
575 rte_free(rq_sop->mbuf_ring);
577 rte_free(rq_data->mbuf_ring);
579 rq_sop->mbuf_ring = NULL;
580 rq_data->mbuf_ring = NULL;
582 vnic_rq_free(rq_sop);
584 vnic_rq_free(rq_data);
586 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
592 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
594 struct rte_eth_dev *eth_dev = enic->rte_dev;
595 vnic_wq_enable(&enic->wq[queue_idx]);
596 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
599 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
601 struct rte_eth_dev *eth_dev = enic->rte_dev;
604 ret = vnic_wq_disable(&enic->wq[queue_idx]);
608 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
612 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
614 struct vnic_rq *rq_sop;
615 struct vnic_rq *rq_data;
616 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
617 rq_data = &enic->rq[rq_sop->data_queue_idx];
618 struct rte_eth_dev *eth_dev = enic->rte_dev;
621 vnic_rq_enable(rq_data);
623 vnic_rq_enable(rq_sop);
624 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
627 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
629 int ret1 = 0, ret2 = 0;
630 struct rte_eth_dev *eth_dev = enic->rte_dev;
631 struct vnic_rq *rq_sop;
632 struct vnic_rq *rq_data;
633 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
634 rq_data = &enic->rq[rq_sop->data_queue_idx];
636 ret2 = vnic_rq_disable(rq_sop);
639 ret1 = vnic_rq_disable(rq_data);
646 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
650 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
651 unsigned int socket_id, struct rte_mempool *mp,
652 uint16_t nb_desc, uint16_t free_thresh)
655 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
656 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
657 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
658 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
659 unsigned int mbuf_size, mbufs_per_pkt;
660 unsigned int nb_sop_desc, nb_data_desc;
661 uint16_t min_sop, max_sop, min_data, max_data;
662 uint32_t max_rx_pkt_len;
665 rq_sop->data_queue_idx = data_queue_idx;
667 rq_data->data_queue_idx = 0;
668 rq_sop->socket_id = socket_id;
670 rq_data->socket_id = socket_id;
673 rq_sop->rx_free_thresh = free_thresh;
674 rq_data->rx_free_thresh = free_thresh;
675 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
678 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
679 RTE_PKTMBUF_HEADROOM);
680 /* max_rx_pkt_len includes the ethernet header and CRC. */
681 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
683 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
684 DEV_RX_OFFLOAD_SCATTER) {
685 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
686 /* ceil((max pkt len)/mbuf_size) */
687 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
689 dev_info(enic, "Scatter rx mode disabled\n");
691 if (max_rx_pkt_len > mbuf_size) {
692 dev_warning(enic, "The maximum Rx packet size (%u) is"
693 " larger than the mbuf size (%u), and"
694 " scatter is disabled. Larger packets will"
696 max_rx_pkt_len, mbuf_size);
700 if (mbufs_per_pkt > 1) {
701 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
702 rq_sop->data_queue_enable = 1;
705 * HW does not directly support rxmode.max_rx_pkt_len. HW always
706 * receives packet sizes up to the "max" MTU.
707 * If not using scatter, we can achieve the effect of dropping
708 * larger packets by reducing the size of posted buffers.
709 * See enic_alloc_rx_queue_mbufs().
712 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
713 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
714 " when scatter rx mode is in use.\n");
717 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
719 rq_sop->data_queue_enable = 0;
723 /* number of descriptors have to be a multiple of 32 */
724 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
725 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
727 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
728 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
730 if (mbufs_per_pkt > 1) {
732 max_sop = ((enic->config.rq_desc_count /
733 (mbufs_per_pkt - 1)) & ~0x1F);
734 min_data = min_sop * (mbufs_per_pkt - 1);
735 max_data = enic->config.rq_desc_count;
738 max_sop = enic->config.rq_desc_count;
743 if (nb_desc < (min_sop + min_data)) {
745 "Number of rx descs too low, adjusting to minimum\n");
746 nb_sop_desc = min_sop;
747 nb_data_desc = min_data;
748 } else if (nb_desc > (max_sop + max_data)) {
750 "Number of rx_descs too high, adjusting to maximum\n");
751 nb_sop_desc = max_sop;
752 nb_data_desc = max_data;
754 if (mbufs_per_pkt > 1) {
755 dev_info(enic, "For max packet size %u and mbuf size %u valid"
756 " rx descriptor range is %u to %u\n",
757 max_rx_pkt_len, mbuf_size, min_sop + min_data,
760 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
761 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
763 /* Allocate sop queue resources */
764 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
765 nb_sop_desc, sizeof(struct rq_enet_desc));
767 dev_err(enic, "error in allocation of sop rq\n");
770 nb_sop_desc = rq_sop->ring.desc_count;
772 if (rq_data->in_use) {
773 /* Allocate data queue resources */
774 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
776 sizeof(struct rq_enet_desc));
778 dev_err(enic, "error in allocation of data rq\n");
779 goto err_free_rq_sop;
781 nb_data_desc = rq_data->ring.desc_count;
783 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
784 socket_id, nb_sop_desc + nb_data_desc,
785 sizeof(struct cq_enet_rq_desc));
787 dev_err(enic, "error in allocation of cq for rq\n");
788 goto err_free_rq_data;
791 /* Allocate the mbuf rings */
792 rq_sop->mbuf_ring = (struct rte_mbuf **)
793 rte_zmalloc_socket("rq->mbuf_ring",
794 sizeof(struct rte_mbuf *) * nb_sop_desc,
795 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
796 if (rq_sop->mbuf_ring == NULL)
799 if (rq_data->in_use) {
800 rq_data->mbuf_ring = (struct rte_mbuf **)
801 rte_zmalloc_socket("rq->mbuf_ring",
802 sizeof(struct rte_mbuf *) * nb_data_desc,
803 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
804 if (rq_data->mbuf_ring == NULL)
805 goto err_free_sop_mbuf;
808 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
813 rte_free(rq_sop->mbuf_ring);
815 /* cleanup on error */
816 vnic_cq_free(&enic->cq[queue_idx]);
819 vnic_rq_free(rq_data);
821 vnic_rq_free(rq_sop);
826 void enic_free_wq(void *txq)
834 wq = (struct vnic_wq *)txq;
835 enic = vnic_dev_priv(wq->vdev);
836 rte_memzone_free(wq->cqmsg_rz);
838 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
841 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
842 unsigned int socket_id, uint16_t nb_desc)
845 struct vnic_wq *wq = &enic->wq[queue_idx];
846 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
850 wq->socket_id = socket_id;
852 if (nb_desc > enic->config.wq_desc_count) {
854 "WQ %d - number of tx desc in cmd line (%d)"\
855 "is greater than that in the UCSM/CIMC adapter"\
856 "policy. Applying the value in the adapter "\
858 queue_idx, nb_desc, enic->config.wq_desc_count);
859 } else if (nb_desc != enic->config.wq_desc_count) {
860 enic->config.wq_desc_count = nb_desc;
862 "TX Queues - effective number of descs:%d\n",
867 /* Allocate queue resources */
868 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
869 enic->config.wq_desc_count,
870 sizeof(struct wq_enet_desc));
872 dev_err(enic, "error in allocation of wq\n");
876 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
877 socket_id, enic->config.wq_desc_count,
878 sizeof(struct cq_enet_wq_desc));
881 dev_err(enic, "error in allocation of cq for wq\n");
884 /* setup up CQ message */
885 snprintf((char *)name, sizeof(name),
886 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
889 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
890 sizeof(uint32_t), SOCKET_ID_ANY,
891 RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
898 int enic_disable(struct enic *enic)
903 for (i = 0; i < enic->intr_count; i++) {
904 vnic_intr_mask(&enic->intr[i]);
905 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
907 enic_rxq_intr_deinit(enic);
908 rte_intr_disable(&enic->pdev->intr_handle);
909 rte_intr_callback_unregister(&enic->pdev->intr_handle,
911 (void *)enic->rte_dev);
913 vnic_dev_disable(enic->vdev);
915 enic_clsf_destroy(enic);
917 if (!enic_is_sriov_vf(enic))
918 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
920 for (i = 0; i < enic->wq_count; i++) {
921 err = vnic_wq_disable(&enic->wq[i]);
925 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
926 if (enic->rq[i].in_use) {
927 err = vnic_rq_disable(&enic->rq[i]);
933 /* If we were using interrupts, set the interrupt vector to -1
934 * to disable interrupts. We are not disabling link notifcations,
935 * though, as we want the polling of link status to continue working.
937 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
938 vnic_dev_notify_set(enic->vdev, -1);
940 vnic_dev_set_reset_flag(enic->vdev, 1);
942 for (i = 0; i < enic->wq_count; i++)
943 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
945 for (i = 0; i < enic_vnic_rq_count(enic); i++)
946 if (enic->rq[i].in_use)
947 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
948 for (i = 0; i < enic->cq_count; i++)
949 vnic_cq_clean(&enic->cq[i]);
950 for (i = 0; i < enic->intr_count; i++)
951 vnic_intr_clean(&enic->intr[i]);
956 static int enic_dev_wait(struct vnic_dev *vdev,
957 int (*start)(struct vnic_dev *, int),
958 int (*finished)(struct vnic_dev *, int *),
965 err = start(vdev, arg);
969 /* Wait for func to complete...2 seconds max */
970 for (i = 0; i < 2000; i++) {
971 err = finished(vdev, &done);
981 static int enic_dev_open(struct enic *enic)
984 int flags = CMD_OPENF_IG_DESCCACHE;
986 err = enic_dev_wait(enic->vdev, vnic_dev_open,
987 vnic_dev_open_done, flags);
989 dev_err(enic_get_dev(enic),
990 "vNIC device open failed, err %d\n", err);
995 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
997 dma_addr_t rss_key_buf_pa;
998 union vnic_rss_key *rss_key_buf_va = NULL;
1002 RTE_ASSERT(user_key != NULL);
1003 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1004 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1005 &rss_key_buf_pa, name);
1006 if (!rss_key_buf_va)
1009 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1010 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1012 err = enic_set_rss_key(enic,
1014 sizeof(union vnic_rss_key));
1016 /* Save for later queries */
1018 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1019 sizeof(union vnic_rss_key));
1021 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1022 rss_key_buf_va, rss_key_buf_pa);
1027 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1029 dma_addr_t rss_cpu_buf_pa;
1030 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1034 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1035 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1036 &rss_cpu_buf_pa, name);
1037 if (!rss_cpu_buf_va)
1040 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1042 err = enic_set_rss_cpu(enic,
1044 sizeof(union vnic_rss_cpu));
1046 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1047 rss_cpu_buf_va, rss_cpu_buf_pa);
1049 /* Save for later queries */
1051 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1055 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1056 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1058 const u8 tso_ipid_split_en = 0;
1061 err = enic_set_nic_cfg(enic,
1062 rss_default_cpu, rss_hash_type,
1063 rss_hash_bits, rss_base_cpu,
1064 rss_enable, tso_ipid_split_en,
1065 enic->ig_vlan_strip_en);
1070 /* Initialize RSS with defaults, called from dev_configure */
1071 int enic_init_rss_nic_cfg(struct enic *enic)
1073 static uint8_t default_rss_key[] = {
1074 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1075 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1076 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1077 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1079 struct rte_eth_rss_conf rss_conf;
1080 union vnic_rss_cpu rss_cpu;
1083 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1085 * If setting key for the first time, and the user gives us none, then
1086 * push the default key to NIC.
1088 if (rss_conf.rss_key == NULL) {
1089 rss_conf.rss_key = default_rss_key;
1090 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1092 ret = enic_set_rss_conf(enic, &rss_conf);
1094 dev_err(enic, "Failed to configure RSS\n");
1097 if (enic->rss_enable) {
1098 /* If enabling RSS, use the default reta */
1099 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1100 rss_cpu.cpu[i / 4].b[i % 4] =
1101 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1103 ret = enic_set_rss_reta(enic, &rss_cpu);
1105 dev_err(enic, "Failed to set RSS indirection table\n");
1110 int enic_setup_finish(struct enic *enic)
1112 enic_init_soft_stats(enic);
1115 vnic_dev_packet_filter(enic->vdev,
1128 static int enic_rss_conf_valid(struct enic *enic,
1129 struct rte_eth_rss_conf *rss_conf)
1131 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1132 if (enic->flow_type_rss_offloads == 0)
1134 if (rss_conf->rss_key != NULL &&
1135 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1136 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1137 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1140 if (rss_conf->rss_hf != 0 &&
1141 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1142 dev_err(enic, "Given rss_hf contains none of the supported"
1149 /* Set hash type and key according to rss_conf */
1150 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1152 struct rte_eth_dev *eth_dev;
1158 RTE_ASSERT(rss_conf != NULL);
1159 ret = enic_rss_conf_valid(enic, rss_conf);
1161 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1165 eth_dev = enic->rte_dev;
1167 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1168 if (enic->rq_count > 1 &&
1169 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1172 if (rss_hf & ETH_RSS_IPV4)
1173 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1174 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1175 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1176 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
1178 * 'TCP' is not a typo. HW does not have a separate
1179 * enable bit for UDP RSS. The TCP bit enables both TCP
1182 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1184 if (rss_hf & ETH_RSS_IPV6)
1185 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1186 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1187 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1188 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
1189 /* Again, 'TCP' is not a typo. */
1190 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1192 if (rss_hf & ETH_RSS_IPV6_EX)
1193 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1194 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1195 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1201 /* Set the hash key if provided */
1202 if (rss_enable && rss_conf->rss_key) {
1203 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1205 dev_err(enic, "Failed to set RSS key\n");
1210 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1211 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1214 enic->rss_hf = rss_hf;
1215 enic->rss_hash_type = rss_hash_type;
1216 enic->rss_enable = rss_enable;
1221 int enic_set_vlan_strip(struct enic *enic)
1224 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1225 * together. So, re-do niccfg, preserving the current RSS settings.
1227 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1228 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1232 void enic_add_packet_filter(struct enic *enic)
1234 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1235 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1236 enic->promisc, enic->allmulti);
1239 int enic_get_link_status(struct enic *enic)
1241 return vnic_dev_link_status(enic->vdev);
1244 static void enic_dev_deinit(struct enic *enic)
1246 struct rte_eth_dev *eth_dev = enic->rte_dev;
1248 /* stop link status checking */
1249 vnic_dev_notify_unset(enic->vdev);
1251 rte_free(eth_dev->data->mac_addrs);
1253 rte_free(enic->intr);
1259 int enic_set_vnic_res(struct enic *enic)
1261 struct rte_eth_dev *eth_dev = enic->rte_dev;
1263 unsigned int required_rq, required_wq, required_cq, required_intr;
1265 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1266 required_rq = eth_dev->data->nb_rx_queues * 2;
1267 required_wq = eth_dev->data->nb_tx_queues;
1268 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1269 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1270 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1271 required_intr += eth_dev->data->nb_rx_queues;
1274 if (enic->conf_rq_count < required_rq) {
1275 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1276 eth_dev->data->nb_rx_queues,
1277 required_rq, enic->conf_rq_count);
1280 if (enic->conf_wq_count < required_wq) {
1281 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1282 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1286 if (enic->conf_cq_count < required_cq) {
1287 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1288 required_cq, enic->conf_cq_count);
1291 if (enic->conf_intr_count < required_intr) {
1292 dev_err(dev, "Not enough Interrupts to support Rx queue"
1293 " interrupts. Required:%u, Configured:%u\n",
1294 required_intr, enic->conf_intr_count);
1299 enic->rq_count = eth_dev->data->nb_rx_queues;
1300 enic->wq_count = eth_dev->data->nb_tx_queues;
1301 enic->cq_count = enic->rq_count + enic->wq_count;
1302 enic->intr_count = required_intr;
1308 /* Initialize the completion queue for an RQ */
1310 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1312 struct vnic_rq *sop_rq, *data_rq;
1313 unsigned int cq_idx;
1316 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1317 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1320 vnic_cq_clean(&enic->cq[cq_idx]);
1321 vnic_cq_init(&enic->cq[cq_idx],
1322 0 /* flow_control_enable */,
1323 1 /* color_enable */,
1326 1 /* cq_tail_color */,
1327 0 /* interrupt_enable */,
1328 1 /* cq_entry_enable */,
1329 0 /* cq_message_enable */,
1330 0 /* interrupt offset */,
1331 0 /* cq_message_addr */);
1334 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1335 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1336 sop_rq->ring.desc_count - 1, 1, 0);
1337 if (data_rq->in_use) {
1338 vnic_rq_init_start(data_rq,
1340 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1341 data_rq->ring.desc_count - 1, 1, 0);
1344 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1348 if (data_rq->in_use) {
1349 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1351 enic_rxmbuf_queue_release(enic, sop_rq);
1359 /* The Cisco NIC can send and receive packets up to a max packet size
1360 * determined by the NIC type and firmware. There is also an MTU
1361 * configured into the NIC via the CIMC/UCSM management interface
1362 * which can be overridden by this function (up to the max packet size).
1363 * Depending on the network setup, doing so may cause packet drops
1364 * and unexpected behavior.
1366 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1368 unsigned int rq_idx;
1371 uint16_t old_mtu; /* previous setting */
1372 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1373 struct rte_eth_dev *eth_dev = enic->rte_dev;
1375 old_mtu = eth_dev->data->mtu;
1376 config_mtu = enic->config.mtu;
1378 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1379 return -E_RTE_SECONDARY;
1381 if (new_mtu > enic->max_mtu) {
1383 "MTU not updated: requested (%u) greater than max (%u)\n",
1384 new_mtu, enic->max_mtu);
1387 if (new_mtu < ENIC_MIN_MTU) {
1389 "MTU not updated: requested (%u) less than min (%u)\n",
1390 new_mtu, ENIC_MIN_MTU);
1393 if (new_mtu > config_mtu)
1395 "MTU (%u) is greater than value configured in NIC (%u)\n",
1396 new_mtu, config_mtu);
1398 /* The easy case is when scatter is disabled. However if the MTU
1399 * becomes greater than the mbuf data size, packet drops will ensue.
1401 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1402 DEV_RX_OFFLOAD_SCATTER)) {
1403 eth_dev->data->mtu = new_mtu;
1407 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1408 * change Rx scatter mode if necessary for better performance. I.e. if
1409 * MTU was greater than the mbuf size and now it's less, scatter Rx
1410 * doesn't have to be used and vice versa.
1412 rte_spinlock_lock(&enic->mtu_lock);
1414 /* Stop traffic on all RQs */
1415 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1416 rq = &enic->rq[rq_idx];
1417 if (rq->is_sop && rq->in_use) {
1418 rc = enic_stop_rq(enic,
1419 enic_sop_rq_idx_to_rte_idx(rq_idx));
1421 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1427 /* replace Rx function with a no-op to avoid getting stale pkts */
1428 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1431 /* Allow time for threads to exit the real Rx function. */
1434 /* now it is safe to reconfigure the RQs */
1436 /* update the mtu */
1437 eth_dev->data->mtu = new_mtu;
1439 /* free and reallocate RQs with the new MTU */
1440 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1441 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1446 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1447 rq->tot_nb_desc, rq->rx_free_thresh);
1450 "Fatal MTU alloc error- No traffic will pass\n");
1454 rc = enic_reinit_rq(enic, rq_idx);
1457 "Fatal MTU RQ reinit- No traffic will pass\n");
1462 /* put back the real receive function */
1464 eth_dev->rx_pkt_burst = enic_recv_pkts;
1467 /* restart Rx traffic */
1468 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1469 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1470 if (rq->is_sop && rq->in_use)
1471 enic_start_rq(enic, rq_idx);
1475 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1476 rte_spinlock_unlock(&enic->mtu_lock);
1480 static int enic_dev_init(struct enic *enic)
1483 struct rte_eth_dev *eth_dev = enic->rte_dev;
1485 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1487 /* Get vNIC configuration
1489 err = enic_get_vnic_config(enic);
1491 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1495 /* Get available resource counts */
1496 enic_get_res_counts(enic);
1497 if (enic->conf_rq_count == 1) {
1498 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1499 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1500 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1503 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1504 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1505 enic->conf_cq_count, 8);
1506 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1507 enic->conf_intr_count, 8);
1508 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1509 enic->conf_rq_count, 8);
1510 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1511 enic->conf_wq_count, 8);
1512 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1513 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1516 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1517 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1520 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1521 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1524 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1525 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1529 /* Get the supported filters */
1530 enic_fdir_info(enic);
1532 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1533 * ENIC_MAX_MAC_ADDR, 0);
1534 if (!eth_dev->data->mac_addrs) {
1535 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1538 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1539 eth_dev->data->mac_addrs);
1541 vnic_dev_set_reset_flag(enic->vdev, 0);
1543 LIST_INIT(&enic->flows);
1544 rte_spinlock_init(&enic->flows_lock);
1546 /* set up link status checking */
1547 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1553 int enic_probe(struct enic *enic)
1555 struct rte_pci_device *pdev = enic->pdev;
1558 dev_debug(enic, " Initializing ENIC PMD\n");
1560 /* if this is a secondary process the hardware is already initialized */
1561 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1564 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1565 enic->bar0.len = pdev->mem_resource[0].len;
1567 /* Register vNIC device */
1568 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1570 dev_err(enic, "vNIC registration failed, aborting\n");
1574 LIST_INIT(&enic->memzone_list);
1575 rte_spinlock_init(&enic->memzone_list_lock);
1577 vnic_register_cbacks(enic->vdev,
1578 enic_alloc_consistent,
1579 enic_free_consistent);
1582 * Allocate the consistent memory for stats upfront so both primary and
1583 * secondary processes can dump stats.
1585 err = vnic_dev_alloc_stats_mem(enic->vdev);
1587 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1588 goto err_out_unregister;
1590 /* Issue device open to get device in known state */
1591 err = enic_dev_open(enic);
1593 dev_err(enic, "vNIC dev open failed, aborting\n");
1594 goto err_out_unregister;
1597 /* Set ingress vlan rewrite mode before vnic initialization */
1598 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1599 IG_VLAN_REWRITE_MODE_PASS_THRU);
1602 "Failed to set ingress vlan rewrite mode, aborting.\n");
1603 goto err_out_dev_close;
1606 /* Issue device init to initialize the vnic-to-switch link.
1607 * We'll start with carrier off and wait for link UP
1608 * notification later to turn on carrier. We don't need
1609 * to wait here for the vnic-to-switch link initialization
1610 * to complete; link UP notification is the indication that
1611 * the process is complete.
1614 err = vnic_dev_init(enic->vdev, 0);
1616 dev_err(enic, "vNIC dev init failed, aborting\n");
1617 goto err_out_dev_close;
1620 err = enic_dev_init(enic);
1622 dev_err(enic, "Device initialization failed, aborting\n");
1623 goto err_out_dev_close;
1629 vnic_dev_close(enic->vdev);
1631 vnic_dev_unregister(enic->vdev);
1636 void enic_remove(struct enic *enic)
1638 enic_dev_deinit(enic);
1639 vnic_dev_close(enic->vdev);
1640 vnic_dev_unregister(enic->vdev);