1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 void enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 if (vnic_dev_del_addr(enic->vdev, mac_addr))
171 dev_err(enic, "del mac addr failed\n");
174 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
178 if (!is_eth_addr_valid(mac_addr)) {
179 dev_err(enic, "invalid mac address\n");
183 err = vnic_dev_add_addr(enic->vdev, mac_addr);
185 dev_err(enic, "add mac addr failed\n");
190 enic_free_rq_buf(struct rte_mbuf **mbuf)
195 rte_pktmbuf_free(*mbuf);
199 void enic_init_vnic_resources(struct enic *enic)
201 unsigned int error_interrupt_enable = 1;
202 unsigned int error_interrupt_offset = 0;
203 unsigned int rxq_interrupt_enable = 0;
204 unsigned int rxq_interrupt_offset;
205 unsigned int index = 0;
207 struct vnic_rq *data_rq;
209 if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
210 rxq_interrupt_enable = 1;
211 rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
213 for (index = 0; index < enic->rq_count; index++) {
214 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
216 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
218 error_interrupt_enable,
219 error_interrupt_offset);
221 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
223 vnic_rq_init(data_rq,
225 error_interrupt_enable,
226 error_interrupt_offset);
228 vnic_cq_init(&enic->cq[cq_idx],
229 0 /* flow_control_enable */,
230 1 /* color_enable */,
233 1 /* cq_tail_color */,
234 rxq_interrupt_enable,
235 1 /* cq_entry_enable */,
236 0 /* cq_message_enable */,
237 rxq_interrupt_offset,
238 0 /* cq_message_addr */);
239 if (rxq_interrupt_enable)
240 rxq_interrupt_offset++;
243 for (index = 0; index < enic->wq_count; index++) {
244 vnic_wq_init(&enic->wq[index],
245 enic_cq_wq(enic, index),
246 error_interrupt_enable,
247 error_interrupt_offset);
249 cq_idx = enic_cq_wq(enic, index);
250 vnic_cq_init(&enic->cq[cq_idx],
251 0 /* flow_control_enable */,
252 1 /* color_enable */,
255 1 /* cq_tail_color */,
256 0 /* interrupt_enable */,
257 0 /* cq_entry_enable */,
258 1 /* cq_message_enable */,
259 0 /* interrupt offset */,
260 (u64)enic->wq[index].cqmsg_rz->iova);
263 for (index = 0; index < enic->intr_count; index++) {
264 vnic_intr_init(&enic->intr[index],
265 enic->config.intr_timer_usec,
266 enic->config.intr_timer_type,
267 /*mask_on_assertion*/1);
273 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
276 struct rq_enet_desc *rqd = rq->ring.descs;
279 uint32_t max_rx_pkt_len;
285 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
286 rq->ring.desc_count);
289 * If *not* using scatter and the mbuf size is smaller than the
290 * requested max packet size (max_rx_pkt_len), then reduce the
291 * posted buffer size to max_rx_pkt_len. HW still receives packets
292 * larger than max_rx_pkt_len, but they will be truncated, which we
293 * drop in the rx handler. Not ideal, but better than returning
294 * large packets when the user is not expecting them.
296 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
297 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
298 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
299 rq_buf_len = max_rx_pkt_len;
300 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
301 mb = rte_mbuf_raw_alloc(rq->mp);
303 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
304 (unsigned)rq->index);
308 mb->data_off = RTE_PKTMBUF_HEADROOM;
309 dma_addr = (dma_addr_t)(mb->buf_iova
310 + RTE_PKTMBUF_HEADROOM);
311 rq_enet_desc_enc(rqd, dma_addr,
312 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
313 : RQ_ENET_TYPE_NOT_SOP),
315 rq->mbuf_ring[i] = mb;
318 /* make sure all prior writes are complete before doing the PIO write */
321 /* Post all but the last buffer to VIC. */
322 rq->posted_index = rq->ring.desc_count - 1;
326 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
327 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
328 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
329 iowrite32(0, &rq->ctrl->fetch_index);
337 enic_alloc_consistent(void *priv, size_t size,
338 dma_addr_t *dma_handle, u8 *name)
341 const struct rte_memzone *rz;
343 struct enic *enic = (struct enic *)priv;
344 struct enic_memzone_entry *mze;
346 rz = rte_memzone_reserve_aligned((const char *)name, size,
347 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
349 pr_err("%s : Failed to allocate memory requested for %s\n",
355 *dma_handle = (dma_addr_t)rz->iova;
357 mze = rte_malloc("enic memzone entry",
358 sizeof(struct enic_memzone_entry), 0);
361 pr_err("%s : Failed to allocate memory for memzone list\n",
363 rte_memzone_free(rz);
369 rte_spinlock_lock(&enic->memzone_list_lock);
370 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
371 rte_spinlock_unlock(&enic->memzone_list_lock);
377 enic_free_consistent(void *priv,
378 __rte_unused size_t size,
380 dma_addr_t dma_handle)
382 struct enic_memzone_entry *mze;
383 struct enic *enic = (struct enic *)priv;
385 rte_spinlock_lock(&enic->memzone_list_lock);
386 LIST_FOREACH(mze, &enic->memzone_list, entries) {
387 if (mze->rz->addr == vaddr &&
388 mze->rz->iova == dma_handle)
392 rte_spinlock_unlock(&enic->memzone_list_lock);
394 "Tried to free memory, but couldn't find it in the memzone list\n");
397 LIST_REMOVE(mze, entries);
398 rte_spinlock_unlock(&enic->memzone_list_lock);
399 rte_memzone_free(mze->rz);
403 int enic_link_update(struct enic *enic)
405 struct rte_eth_dev *eth_dev = enic->rte_dev;
406 struct rte_eth_link link;
408 memset(&link, 0, sizeof(link));
409 link.link_status = enic_get_link_status(enic);
410 link.link_duplex = ETH_LINK_FULL_DUPLEX;
411 link.link_speed = vnic_dev_port_speed(enic->vdev);
413 return rte_eth_linkstatus_set(eth_dev, &link);
417 enic_intr_handler(void *arg)
419 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
420 struct enic *enic = pmd_priv(dev);
422 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
424 enic_link_update(enic);
425 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
426 enic_log_q_error(enic);
429 static int enic_rxq_intr_init(struct enic *enic)
431 struct rte_intr_handle *intr_handle;
432 uint32_t rxq_intr_count, i;
435 intr_handle = enic->rte_dev->intr_handle;
436 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
439 * Rx queue interrupts only work when we have MSI-X interrupts,
440 * one per queue. Sharing one interrupt is technically
441 * possible with VIC, but it is not worth the complications it brings.
443 if (!rte_intr_cap_multiple(intr_handle)) {
444 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
445 " (vfio-pci driver)\n");
448 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
449 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
451 dev_err(enic, "Failed to enable event fds for Rx queue"
455 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
456 rxq_intr_count * sizeof(int), 0);
457 if (intr_handle->intr_vec == NULL) {
458 dev_err(enic, "Failed to allocate intr_vec\n");
461 for (i = 0; i < rxq_intr_count; i++)
462 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
466 static void enic_rxq_intr_deinit(struct enic *enic)
468 struct rte_intr_handle *intr_handle;
470 intr_handle = enic->rte_dev->intr_handle;
471 rte_intr_efd_disable(intr_handle);
472 if (intr_handle->intr_vec != NULL) {
473 rte_free(intr_handle->intr_vec);
474 intr_handle->intr_vec = NULL;
478 int enic_enable(struct enic *enic)
482 struct rte_eth_dev *eth_dev = enic->rte_dev;
484 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
485 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
487 /* vnic notification of link status has already been turned on in
488 * enic_dev_init() which is called during probe time. Here we are
489 * just turning on interrupt vector 0 if needed.
491 if (eth_dev->data->dev_conf.intr_conf.lsc)
492 vnic_dev_notify_set(enic->vdev, 0);
494 err = enic_rxq_intr_init(enic);
497 if (enic_clsf_init(enic))
498 dev_warning(enic, "Init of hash table for clsf failed."\
499 "Flow director feature will not work\n");
501 for (index = 0; index < enic->rq_count; index++) {
502 err = enic_alloc_rx_queue_mbufs(enic,
503 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
505 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
508 err = enic_alloc_rx_queue_mbufs(enic,
509 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
511 /* release the allocated mbufs for the sop rq*/
512 enic_rxmbuf_queue_release(enic,
513 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
515 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
520 for (index = 0; index < enic->wq_count; index++)
521 enic_start_wq(enic, index);
522 for (index = 0; index < enic->rq_count; index++)
523 enic_start_rq(enic, index);
525 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
527 vnic_dev_enable_wait(enic->vdev);
529 /* Register and enable error interrupt */
530 rte_intr_callback_register(&(enic->pdev->intr_handle),
531 enic_intr_handler, (void *)enic->rte_dev);
533 rte_intr_enable(&(enic->pdev->intr_handle));
534 /* Unmask LSC interrupt */
535 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
540 int enic_alloc_intr_resources(struct enic *enic)
545 dev_info(enic, "vNIC resources used: "\
546 "wq %d rq %d cq %d intr %d\n",
547 enic->wq_count, enic_vnic_rq_count(enic),
548 enic->cq_count, enic->intr_count);
550 for (i = 0; i < enic->intr_count; i++) {
551 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
553 enic_free_vnic_resources(enic);
560 void enic_free_rq(void *rxq)
562 struct vnic_rq *rq_sop, *rq_data;
568 rq_sop = (struct vnic_rq *)rxq;
569 enic = vnic_dev_priv(rq_sop->vdev);
570 rq_data = &enic->rq[rq_sop->data_queue_idx];
572 enic_rxmbuf_queue_release(enic, rq_sop);
574 enic_rxmbuf_queue_release(enic, rq_data);
576 rte_free(rq_sop->mbuf_ring);
578 rte_free(rq_data->mbuf_ring);
580 rq_sop->mbuf_ring = NULL;
581 rq_data->mbuf_ring = NULL;
583 vnic_rq_free(rq_sop);
585 vnic_rq_free(rq_data);
587 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
593 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
595 struct rte_eth_dev *eth_dev = enic->rte_dev;
596 vnic_wq_enable(&enic->wq[queue_idx]);
597 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
600 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
602 struct rte_eth_dev *eth_dev = enic->rte_dev;
605 ret = vnic_wq_disable(&enic->wq[queue_idx]);
609 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
613 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
615 struct vnic_rq *rq_sop;
616 struct vnic_rq *rq_data;
617 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
618 rq_data = &enic->rq[rq_sop->data_queue_idx];
619 struct rte_eth_dev *eth_dev = enic->rte_dev;
622 vnic_rq_enable(rq_data);
624 vnic_rq_enable(rq_sop);
625 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
628 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
630 int ret1 = 0, ret2 = 0;
631 struct rte_eth_dev *eth_dev = enic->rte_dev;
632 struct vnic_rq *rq_sop;
633 struct vnic_rq *rq_data;
634 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
635 rq_data = &enic->rq[rq_sop->data_queue_idx];
637 ret2 = vnic_rq_disable(rq_sop);
640 ret1 = vnic_rq_disable(rq_data);
647 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
651 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
652 unsigned int socket_id, struct rte_mempool *mp,
653 uint16_t nb_desc, uint16_t free_thresh)
656 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
657 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
658 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
659 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
660 unsigned int mbuf_size, mbufs_per_pkt;
661 unsigned int nb_sop_desc, nb_data_desc;
662 uint16_t min_sop, max_sop, min_data, max_data;
663 uint32_t max_rx_pkt_len;
666 rq_sop->data_queue_idx = data_queue_idx;
668 rq_data->data_queue_idx = 0;
669 rq_sop->socket_id = socket_id;
671 rq_data->socket_id = socket_id;
674 rq_sop->rx_free_thresh = free_thresh;
675 rq_data->rx_free_thresh = free_thresh;
676 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
679 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
680 RTE_PKTMBUF_HEADROOM);
681 /* max_rx_pkt_len includes the ethernet header and CRC. */
682 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
684 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
685 DEV_RX_OFFLOAD_SCATTER) {
686 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
687 /* ceil((max pkt len)/mbuf_size) */
688 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
690 dev_info(enic, "Scatter rx mode disabled\n");
692 if (max_rx_pkt_len > mbuf_size) {
693 dev_warning(enic, "The maximum Rx packet size (%u) is"
694 " larger than the mbuf size (%u), and"
695 " scatter is disabled. Larger packets will"
697 max_rx_pkt_len, mbuf_size);
701 if (mbufs_per_pkt > 1) {
702 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
703 rq_sop->data_queue_enable = 1;
706 * HW does not directly support rxmode.max_rx_pkt_len. HW always
707 * receives packet sizes up to the "max" MTU.
708 * If not using scatter, we can achieve the effect of dropping
709 * larger packets by reducing the size of posted buffers.
710 * See enic_alloc_rx_queue_mbufs().
713 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
714 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
715 " when scatter rx mode is in use.\n");
718 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
720 rq_sop->data_queue_enable = 0;
724 /* number of descriptors have to be a multiple of 32 */
725 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
726 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
728 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
729 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
731 if (mbufs_per_pkt > 1) {
733 max_sop = ((enic->config.rq_desc_count /
734 (mbufs_per_pkt - 1)) & ~0x1F);
735 min_data = min_sop * (mbufs_per_pkt - 1);
736 max_data = enic->config.rq_desc_count;
739 max_sop = enic->config.rq_desc_count;
744 if (nb_desc < (min_sop + min_data)) {
746 "Number of rx descs too low, adjusting to minimum\n");
747 nb_sop_desc = min_sop;
748 nb_data_desc = min_data;
749 } else if (nb_desc > (max_sop + max_data)) {
751 "Number of rx_descs too high, adjusting to maximum\n");
752 nb_sop_desc = max_sop;
753 nb_data_desc = max_data;
755 if (mbufs_per_pkt > 1) {
756 dev_info(enic, "For max packet size %u and mbuf size %u valid"
757 " rx descriptor range is %u to %u\n",
758 max_rx_pkt_len, mbuf_size, min_sop + min_data,
761 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
762 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
764 /* Allocate sop queue resources */
765 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
766 nb_sop_desc, sizeof(struct rq_enet_desc));
768 dev_err(enic, "error in allocation of sop rq\n");
771 nb_sop_desc = rq_sop->ring.desc_count;
773 if (rq_data->in_use) {
774 /* Allocate data queue resources */
775 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
777 sizeof(struct rq_enet_desc));
779 dev_err(enic, "error in allocation of data rq\n");
780 goto err_free_rq_sop;
782 nb_data_desc = rq_data->ring.desc_count;
784 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
785 socket_id, nb_sop_desc + nb_data_desc,
786 sizeof(struct cq_enet_rq_desc));
788 dev_err(enic, "error in allocation of cq for rq\n");
789 goto err_free_rq_data;
792 /* Allocate the mbuf rings */
793 rq_sop->mbuf_ring = (struct rte_mbuf **)
794 rte_zmalloc_socket("rq->mbuf_ring",
795 sizeof(struct rte_mbuf *) * nb_sop_desc,
796 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
797 if (rq_sop->mbuf_ring == NULL)
800 if (rq_data->in_use) {
801 rq_data->mbuf_ring = (struct rte_mbuf **)
802 rte_zmalloc_socket("rq->mbuf_ring",
803 sizeof(struct rte_mbuf *) * nb_data_desc,
804 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
805 if (rq_data->mbuf_ring == NULL)
806 goto err_free_sop_mbuf;
809 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
814 rte_free(rq_sop->mbuf_ring);
816 /* cleanup on error */
817 vnic_cq_free(&enic->cq[queue_idx]);
820 vnic_rq_free(rq_data);
822 vnic_rq_free(rq_sop);
827 void enic_free_wq(void *txq)
835 wq = (struct vnic_wq *)txq;
836 enic = vnic_dev_priv(wq->vdev);
837 rte_memzone_free(wq->cqmsg_rz);
839 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
842 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
843 unsigned int socket_id, uint16_t nb_desc)
846 struct vnic_wq *wq = &enic->wq[queue_idx];
847 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
851 wq->socket_id = socket_id;
853 if (nb_desc > enic->config.wq_desc_count) {
855 "WQ %d - number of tx desc in cmd line (%d)"\
856 "is greater than that in the UCSM/CIMC adapter"\
857 "policy. Applying the value in the adapter "\
859 queue_idx, nb_desc, enic->config.wq_desc_count);
860 } else if (nb_desc != enic->config.wq_desc_count) {
861 enic->config.wq_desc_count = nb_desc;
863 "TX Queues - effective number of descs:%d\n",
868 /* Allocate queue resources */
869 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
870 enic->config.wq_desc_count,
871 sizeof(struct wq_enet_desc));
873 dev_err(enic, "error in allocation of wq\n");
877 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
878 socket_id, enic->config.wq_desc_count,
879 sizeof(struct cq_enet_wq_desc));
882 dev_err(enic, "error in allocation of cq for wq\n");
885 /* setup up CQ message */
886 snprintf((char *)name, sizeof(name),
887 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
890 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
891 sizeof(uint32_t), SOCKET_ID_ANY,
892 RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
899 int enic_disable(struct enic *enic)
904 for (i = 0; i < enic->intr_count; i++) {
905 vnic_intr_mask(&enic->intr[i]);
906 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
908 enic_rxq_intr_deinit(enic);
909 rte_intr_disable(&enic->pdev->intr_handle);
910 rte_intr_callback_unregister(&enic->pdev->intr_handle,
912 (void *)enic->rte_dev);
914 vnic_dev_disable(enic->vdev);
916 enic_clsf_destroy(enic);
918 if (!enic_is_sriov_vf(enic))
919 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
921 for (i = 0; i < enic->wq_count; i++) {
922 err = vnic_wq_disable(&enic->wq[i]);
926 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
927 if (enic->rq[i].in_use) {
928 err = vnic_rq_disable(&enic->rq[i]);
934 /* If we were using interrupts, set the interrupt vector to -1
935 * to disable interrupts. We are not disabling link notifcations,
936 * though, as we want the polling of link status to continue working.
938 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
939 vnic_dev_notify_set(enic->vdev, -1);
941 vnic_dev_set_reset_flag(enic->vdev, 1);
943 for (i = 0; i < enic->wq_count; i++)
944 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
946 for (i = 0; i < enic_vnic_rq_count(enic); i++)
947 if (enic->rq[i].in_use)
948 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
949 for (i = 0; i < enic->cq_count; i++)
950 vnic_cq_clean(&enic->cq[i]);
951 for (i = 0; i < enic->intr_count; i++)
952 vnic_intr_clean(&enic->intr[i]);
957 static int enic_dev_wait(struct vnic_dev *vdev,
958 int (*start)(struct vnic_dev *, int),
959 int (*finished)(struct vnic_dev *, int *),
966 err = start(vdev, arg);
970 /* Wait for func to complete...2 seconds max */
971 for (i = 0; i < 2000; i++) {
972 err = finished(vdev, &done);
982 static int enic_dev_open(struct enic *enic)
986 err = enic_dev_wait(enic->vdev, vnic_dev_open,
987 vnic_dev_open_done, 0);
989 dev_err(enic_get_dev(enic),
990 "vNIC device open failed, err %d\n", err);
995 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
997 dma_addr_t rss_key_buf_pa;
998 union vnic_rss_key *rss_key_buf_va = NULL;
1002 RTE_ASSERT(user_key != NULL);
1003 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1004 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1005 &rss_key_buf_pa, name);
1006 if (!rss_key_buf_va)
1009 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1010 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1012 err = enic_set_rss_key(enic,
1014 sizeof(union vnic_rss_key));
1016 /* Save for later queries */
1018 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1019 sizeof(union vnic_rss_key));
1021 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1022 rss_key_buf_va, rss_key_buf_pa);
1027 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1029 dma_addr_t rss_cpu_buf_pa;
1030 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1034 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1035 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1036 &rss_cpu_buf_pa, name);
1037 if (!rss_cpu_buf_va)
1040 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1042 err = enic_set_rss_cpu(enic,
1044 sizeof(union vnic_rss_cpu));
1046 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1047 rss_cpu_buf_va, rss_cpu_buf_pa);
1049 /* Save for later queries */
1051 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1055 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1056 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1058 const u8 tso_ipid_split_en = 0;
1061 err = enic_set_nic_cfg(enic,
1062 rss_default_cpu, rss_hash_type,
1063 rss_hash_bits, rss_base_cpu,
1064 rss_enable, tso_ipid_split_en,
1065 enic->ig_vlan_strip_en);
1070 /* Initialize RSS with defaults, called from dev_configure */
1071 int enic_init_rss_nic_cfg(struct enic *enic)
1073 static uint8_t default_rss_key[] = {
1074 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1075 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1076 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1077 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1079 struct rte_eth_rss_conf rss_conf;
1080 union vnic_rss_cpu rss_cpu;
1083 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1085 * If setting key for the first time, and the user gives us none, then
1086 * push the default key to NIC.
1088 if (rss_conf.rss_key == NULL) {
1089 rss_conf.rss_key = default_rss_key;
1090 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1092 ret = enic_set_rss_conf(enic, &rss_conf);
1094 dev_err(enic, "Failed to configure RSS\n");
1097 if (enic->rss_enable) {
1098 /* If enabling RSS, use the default reta */
1099 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1100 rss_cpu.cpu[i / 4].b[i % 4] =
1101 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1103 ret = enic_set_rss_reta(enic, &rss_cpu);
1105 dev_err(enic, "Failed to set RSS indirection table\n");
1110 int enic_setup_finish(struct enic *enic)
1112 enic_init_soft_stats(enic);
1115 vnic_dev_packet_filter(enic->vdev,
1128 static int enic_rss_conf_valid(struct enic *enic,
1129 struct rte_eth_rss_conf *rss_conf)
1131 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1132 if (enic->flow_type_rss_offloads == 0)
1134 if (rss_conf->rss_key != NULL &&
1135 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1136 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1137 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1140 if (rss_conf->rss_hf != 0 &&
1141 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1142 dev_err(enic, "Given rss_hf contains none of the supported"
1149 /* Set hash type and key according to rss_conf */
1150 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1152 struct rte_eth_dev *eth_dev;
1158 RTE_ASSERT(rss_conf != NULL);
1159 ret = enic_rss_conf_valid(enic, rss_conf);
1161 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1165 eth_dev = enic->rte_dev;
1167 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1168 if (enic->rq_count > 1 &&
1169 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1172 if (rss_hf & ETH_RSS_IPV4)
1173 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1174 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1175 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1176 if (rss_hf & ETH_RSS_IPV6)
1177 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1178 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1179 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1180 if (rss_hf & ETH_RSS_IPV6_EX)
1181 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1182 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1183 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1189 /* Set the hash key if provided */
1190 if (rss_enable && rss_conf->rss_key) {
1191 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1193 dev_err(enic, "Failed to set RSS key\n");
1198 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1199 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1202 enic->rss_hf = rss_hf;
1203 enic->rss_hash_type = rss_hash_type;
1204 enic->rss_enable = rss_enable;
1209 int enic_set_vlan_strip(struct enic *enic)
1212 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1213 * together. So, re-do niccfg, preserving the current RSS settings.
1215 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1216 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1220 void enic_add_packet_filter(struct enic *enic)
1222 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1223 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1224 enic->promisc, enic->allmulti);
1227 int enic_get_link_status(struct enic *enic)
1229 return vnic_dev_link_status(enic->vdev);
1232 static void enic_dev_deinit(struct enic *enic)
1234 struct rte_eth_dev *eth_dev = enic->rte_dev;
1236 /* stop link status checking */
1237 vnic_dev_notify_unset(enic->vdev);
1239 rte_free(eth_dev->data->mac_addrs);
1241 rte_free(enic->intr);
1247 int enic_set_vnic_res(struct enic *enic)
1249 struct rte_eth_dev *eth_dev = enic->rte_dev;
1251 unsigned int required_rq, required_wq, required_cq, required_intr;
1253 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1254 required_rq = eth_dev->data->nb_rx_queues * 2;
1255 required_wq = eth_dev->data->nb_tx_queues;
1256 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1257 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1258 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1259 required_intr += eth_dev->data->nb_rx_queues;
1262 if (enic->conf_rq_count < required_rq) {
1263 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1264 eth_dev->data->nb_rx_queues,
1265 required_rq, enic->conf_rq_count);
1268 if (enic->conf_wq_count < required_wq) {
1269 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1270 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1274 if (enic->conf_cq_count < required_cq) {
1275 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1276 required_cq, enic->conf_cq_count);
1279 if (enic->conf_intr_count < required_intr) {
1280 dev_err(dev, "Not enough Interrupts to support Rx queue"
1281 " interrupts. Required:%u, Configured:%u\n",
1282 required_intr, enic->conf_intr_count);
1287 enic->rq_count = eth_dev->data->nb_rx_queues;
1288 enic->wq_count = eth_dev->data->nb_tx_queues;
1289 enic->cq_count = enic->rq_count + enic->wq_count;
1290 enic->intr_count = required_intr;
1296 /* Initialize the completion queue for an RQ */
1298 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1300 struct vnic_rq *sop_rq, *data_rq;
1301 unsigned int cq_idx;
1304 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1305 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1308 vnic_cq_clean(&enic->cq[cq_idx]);
1309 vnic_cq_init(&enic->cq[cq_idx],
1310 0 /* flow_control_enable */,
1311 1 /* color_enable */,
1314 1 /* cq_tail_color */,
1315 0 /* interrupt_enable */,
1316 1 /* cq_entry_enable */,
1317 0 /* cq_message_enable */,
1318 0 /* interrupt offset */,
1319 0 /* cq_message_addr */);
1322 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1323 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1324 sop_rq->ring.desc_count - 1, 1, 0);
1325 if (data_rq->in_use) {
1326 vnic_rq_init_start(data_rq,
1328 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1329 data_rq->ring.desc_count - 1, 1, 0);
1332 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1336 if (data_rq->in_use) {
1337 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1339 enic_rxmbuf_queue_release(enic, sop_rq);
1347 /* The Cisco NIC can send and receive packets up to a max packet size
1348 * determined by the NIC type and firmware. There is also an MTU
1349 * configured into the NIC via the CIMC/UCSM management interface
1350 * which can be overridden by this function (up to the max packet size).
1351 * Depending on the network setup, doing so may cause packet drops
1352 * and unexpected behavior.
1354 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1356 unsigned int rq_idx;
1359 uint16_t old_mtu; /* previous setting */
1360 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1361 struct rte_eth_dev *eth_dev = enic->rte_dev;
1363 old_mtu = eth_dev->data->mtu;
1364 config_mtu = enic->config.mtu;
1366 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1367 return -E_RTE_SECONDARY;
1369 if (new_mtu > enic->max_mtu) {
1371 "MTU not updated: requested (%u) greater than max (%u)\n",
1372 new_mtu, enic->max_mtu);
1375 if (new_mtu < ENIC_MIN_MTU) {
1377 "MTU not updated: requested (%u) less than min (%u)\n",
1378 new_mtu, ENIC_MIN_MTU);
1381 if (new_mtu > config_mtu)
1383 "MTU (%u) is greater than value configured in NIC (%u)\n",
1384 new_mtu, config_mtu);
1386 /* The easy case is when scatter is disabled. However if the MTU
1387 * becomes greater than the mbuf data size, packet drops will ensue.
1389 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1390 DEV_RX_OFFLOAD_SCATTER)) {
1391 eth_dev->data->mtu = new_mtu;
1395 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1396 * change Rx scatter mode if necessary for better performance. I.e. if
1397 * MTU was greater than the mbuf size and now it's less, scatter Rx
1398 * doesn't have to be used and vice versa.
1400 rte_spinlock_lock(&enic->mtu_lock);
1402 /* Stop traffic on all RQs */
1403 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1404 rq = &enic->rq[rq_idx];
1405 if (rq->is_sop && rq->in_use) {
1406 rc = enic_stop_rq(enic,
1407 enic_sop_rq_idx_to_rte_idx(rq_idx));
1409 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1415 /* replace Rx function with a no-op to avoid getting stale pkts */
1416 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1419 /* Allow time for threads to exit the real Rx function. */
1422 /* now it is safe to reconfigure the RQs */
1424 /* update the mtu */
1425 eth_dev->data->mtu = new_mtu;
1427 /* free and reallocate RQs with the new MTU */
1428 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1429 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1432 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1433 rq->tot_nb_desc, rq->rx_free_thresh);
1436 "Fatal MTU alloc error- No traffic will pass\n");
1440 rc = enic_reinit_rq(enic, rq_idx);
1443 "Fatal MTU RQ reinit- No traffic will pass\n");
1448 /* put back the real receive function */
1450 eth_dev->rx_pkt_burst = enic_recv_pkts;
1453 /* restart Rx traffic */
1454 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1455 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1456 if (rq->is_sop && rq->in_use)
1457 enic_start_rq(enic, rq_idx);
1461 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1462 rte_spinlock_unlock(&enic->mtu_lock);
1466 static int enic_dev_init(struct enic *enic)
1469 struct rte_eth_dev *eth_dev = enic->rte_dev;
1471 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1473 /* Get vNIC configuration
1475 err = enic_get_vnic_config(enic);
1477 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1481 /* Get available resource counts */
1482 enic_get_res_counts(enic);
1483 if (enic->conf_rq_count == 1) {
1484 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1485 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1486 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1489 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1490 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1491 enic->conf_cq_count, 8);
1492 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1493 enic->conf_intr_count, 8);
1494 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1495 enic->conf_rq_count, 8);
1496 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1497 enic->conf_wq_count, 8);
1498 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1499 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1502 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1503 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1506 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1507 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1510 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1511 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1515 /* Get the supported filters */
1516 enic_fdir_info(enic);
1518 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1519 * ENIC_MAX_MAC_ADDR, 0);
1520 if (!eth_dev->data->mac_addrs) {
1521 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1524 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1525 eth_dev->data->mac_addrs);
1527 vnic_dev_set_reset_flag(enic->vdev, 0);
1529 LIST_INIT(&enic->flows);
1530 rte_spinlock_init(&enic->flows_lock);
1532 /* set up link status checking */
1533 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1539 int enic_probe(struct enic *enic)
1541 struct rte_pci_device *pdev = enic->pdev;
1544 dev_debug(enic, " Initializing ENIC PMD\n");
1546 /* if this is a secondary process the hardware is already initialized */
1547 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1550 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1551 enic->bar0.len = pdev->mem_resource[0].len;
1553 /* Register vNIC device */
1554 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1556 dev_err(enic, "vNIC registration failed, aborting\n");
1560 LIST_INIT(&enic->memzone_list);
1561 rte_spinlock_init(&enic->memzone_list_lock);
1563 vnic_register_cbacks(enic->vdev,
1564 enic_alloc_consistent,
1565 enic_free_consistent);
1568 * Allocate the consistent memory for stats upfront so both primary and
1569 * secondary processes can dump stats.
1571 err = vnic_dev_alloc_stats_mem(enic->vdev);
1573 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1574 goto err_out_unregister;
1576 /* Issue device open to get device in known state */
1577 err = enic_dev_open(enic);
1579 dev_err(enic, "vNIC dev open failed, aborting\n");
1580 goto err_out_unregister;
1583 /* Set ingress vlan rewrite mode before vnic initialization */
1584 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1585 IG_VLAN_REWRITE_MODE_PASS_THRU);
1588 "Failed to set ingress vlan rewrite mode, aborting.\n");
1589 goto err_out_dev_close;
1592 /* Issue device init to initialize the vnic-to-switch link.
1593 * We'll start with carrier off and wait for link UP
1594 * notification later to turn on carrier. We don't need
1595 * to wait here for the vnic-to-switch link initialization
1596 * to complete; link UP notification is the indication that
1597 * the process is complete.
1600 err = vnic_dev_init(enic->vdev, 0);
1602 dev_err(enic, "vNIC dev init failed, aborting\n");
1603 goto err_out_dev_close;
1606 err = enic_dev_init(enic);
1608 dev_err(enic, "Device initialization failed, aborting\n");
1609 goto err_out_dev_close;
1615 vnic_dev_close(enic->vdev);
1617 vnic_dev_unregister(enic->vdev);
1622 void enic_remove(struct enic *enic)
1624 enic_dev_deinit(enic);
1625 vnic_dev_close(enic->vdev);
1626 vnic_dev_unregister(enic->vdev);