1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 void enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 if (vnic_dev_del_addr(enic->vdev, mac_addr))
171 dev_err(enic, "del mac addr failed\n");
174 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
178 if (!is_eth_addr_valid(mac_addr)) {
179 dev_err(enic, "invalid mac address\n");
183 err = vnic_dev_add_addr(enic->vdev, mac_addr);
185 dev_err(enic, "add mac addr failed\n");
190 enic_free_rq_buf(struct rte_mbuf **mbuf)
195 rte_pktmbuf_free(*mbuf);
199 void enic_init_vnic_resources(struct enic *enic)
201 unsigned int error_interrupt_enable = 1;
202 unsigned int error_interrupt_offset = 0;
203 unsigned int rxq_interrupt_enable = 0;
204 unsigned int rxq_interrupt_offset;
205 unsigned int index = 0;
207 struct vnic_rq *data_rq;
209 if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
210 rxq_interrupt_enable = 1;
211 rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
213 for (index = 0; index < enic->rq_count; index++) {
214 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
216 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
218 error_interrupt_enable,
219 error_interrupt_offset);
221 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
223 vnic_rq_init(data_rq,
225 error_interrupt_enable,
226 error_interrupt_offset);
228 vnic_cq_init(&enic->cq[cq_idx],
229 0 /* flow_control_enable */,
230 1 /* color_enable */,
233 1 /* cq_tail_color */,
234 rxq_interrupt_enable,
235 1 /* cq_entry_enable */,
236 0 /* cq_message_enable */,
237 rxq_interrupt_offset,
238 0 /* cq_message_addr */);
239 if (rxq_interrupt_enable)
240 rxq_interrupt_offset++;
243 for (index = 0; index < enic->wq_count; index++) {
244 vnic_wq_init(&enic->wq[index],
245 enic_cq_wq(enic, index),
246 error_interrupt_enable,
247 error_interrupt_offset);
249 cq_idx = enic_cq_wq(enic, index);
250 vnic_cq_init(&enic->cq[cq_idx],
251 0 /* flow_control_enable */,
252 1 /* color_enable */,
255 1 /* cq_tail_color */,
256 0 /* interrupt_enable */,
257 0 /* cq_entry_enable */,
258 1 /* cq_message_enable */,
259 0 /* interrupt offset */,
260 (u64)enic->wq[index].cqmsg_rz->iova);
263 for (index = 0; index < enic->intr_count; index++) {
264 vnic_intr_init(&enic->intr[index],
265 enic->config.intr_timer_usec,
266 enic->config.intr_timer_type,
267 /*mask_on_assertion*/1);
273 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
276 struct rq_enet_desc *rqd = rq->ring.descs;
279 uint32_t max_rx_pkt_len;
285 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
286 rq->ring.desc_count);
289 * If *not* using scatter and the mbuf size is smaller than the
290 * requested max packet size (max_rx_pkt_len), then reduce the
291 * posted buffer size to max_rx_pkt_len. HW still receives packets
292 * larger than max_rx_pkt_len, but they will be truncated, which we
293 * drop in the rx handler. Not ideal, but better than returning
294 * large packets when the user is not expecting them.
296 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
297 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
298 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
299 rq_buf_len = max_rx_pkt_len;
300 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
301 mb = rte_mbuf_raw_alloc(rq->mp);
303 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
304 (unsigned)rq->index);
308 mb->data_off = RTE_PKTMBUF_HEADROOM;
309 dma_addr = (dma_addr_t)(mb->buf_iova
310 + RTE_PKTMBUF_HEADROOM);
311 rq_enet_desc_enc(rqd, dma_addr,
312 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
313 : RQ_ENET_TYPE_NOT_SOP),
315 rq->mbuf_ring[i] = mb;
318 /* make sure all prior writes are complete before doing the PIO write */
321 /* Post all but the last buffer to VIC. */
322 rq->posted_index = rq->ring.desc_count - 1;
326 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
327 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
328 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
329 iowrite32(0, &rq->ctrl->fetch_index);
337 enic_alloc_consistent(void *priv, size_t size,
338 dma_addr_t *dma_handle, u8 *name)
341 const struct rte_memzone *rz;
343 struct enic *enic = (struct enic *)priv;
344 struct enic_memzone_entry *mze;
346 rz = rte_memzone_reserve_aligned((const char *)name, size,
347 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
349 pr_err("%s : Failed to allocate memory requested for %s\n",
355 *dma_handle = (dma_addr_t)rz->iova;
357 mze = rte_malloc("enic memzone entry",
358 sizeof(struct enic_memzone_entry), 0);
361 pr_err("%s : Failed to allocate memory for memzone list\n",
363 rte_memzone_free(rz);
369 rte_spinlock_lock(&enic->memzone_list_lock);
370 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
371 rte_spinlock_unlock(&enic->memzone_list_lock);
377 enic_free_consistent(void *priv,
378 __rte_unused size_t size,
380 dma_addr_t dma_handle)
382 struct enic_memzone_entry *mze;
383 struct enic *enic = (struct enic *)priv;
385 rte_spinlock_lock(&enic->memzone_list_lock);
386 LIST_FOREACH(mze, &enic->memzone_list, entries) {
387 if (mze->rz->addr == vaddr &&
388 mze->rz->iova == dma_handle)
392 rte_spinlock_unlock(&enic->memzone_list_lock);
394 "Tried to free memory, but couldn't find it in the memzone list\n");
397 LIST_REMOVE(mze, entries);
398 rte_spinlock_unlock(&enic->memzone_list_lock);
399 rte_memzone_free(mze->rz);
403 int enic_link_update(struct enic *enic)
405 struct rte_eth_dev *eth_dev = enic->rte_dev;
406 struct rte_eth_link link;
408 memset(&link, 0, sizeof(link));
409 link.link_status = enic_get_link_status(enic);
410 link.link_duplex = ETH_LINK_FULL_DUPLEX;
411 link.link_speed = vnic_dev_port_speed(enic->vdev);
413 return rte_eth_linkstatus_set(eth_dev, &link);
417 enic_intr_handler(void *arg)
419 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
420 struct enic *enic = pmd_priv(dev);
422 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
424 enic_link_update(enic);
425 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
426 enic_log_q_error(enic);
429 static int enic_rxq_intr_init(struct enic *enic)
431 struct rte_intr_handle *intr_handle;
432 uint32_t rxq_intr_count, i;
435 intr_handle = enic->rte_dev->intr_handle;
436 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
439 * Rx queue interrupts only work when we have MSI-X interrupts,
440 * one per queue. Sharing one interrupt is technically
441 * possible with VIC, but it is not worth the complications it brings.
443 if (!rte_intr_cap_multiple(intr_handle)) {
444 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
445 " (vfio-pci driver)\n");
448 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
449 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
451 dev_err(enic, "Failed to enable event fds for Rx queue"
455 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
456 rxq_intr_count * sizeof(int), 0);
457 if (intr_handle->intr_vec == NULL) {
458 dev_err(enic, "Failed to allocate intr_vec\n");
461 for (i = 0; i < rxq_intr_count; i++)
462 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
466 static void enic_rxq_intr_deinit(struct enic *enic)
468 struct rte_intr_handle *intr_handle;
470 intr_handle = enic->rte_dev->intr_handle;
471 rte_intr_efd_disable(intr_handle);
472 if (intr_handle->intr_vec != NULL) {
473 rte_free(intr_handle->intr_vec);
474 intr_handle->intr_vec = NULL;
478 int enic_enable(struct enic *enic)
482 struct rte_eth_dev *eth_dev = enic->rte_dev;
484 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
485 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
487 /* vnic notification of link status has already been turned on in
488 * enic_dev_init() which is called during probe time. Here we are
489 * just turning on interrupt vector 0 if needed.
491 if (eth_dev->data->dev_conf.intr_conf.lsc)
492 vnic_dev_notify_set(enic->vdev, 0);
494 err = enic_rxq_intr_init(enic);
497 if (enic_clsf_init(enic))
498 dev_warning(enic, "Init of hash table for clsf failed."\
499 "Flow director feature will not work\n");
501 for (index = 0; index < enic->rq_count; index++) {
502 err = enic_alloc_rx_queue_mbufs(enic,
503 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
505 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
508 err = enic_alloc_rx_queue_mbufs(enic,
509 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
511 /* release the allocated mbufs for the sop rq*/
512 enic_rxmbuf_queue_release(enic,
513 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
515 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
520 for (index = 0; index < enic->wq_count; index++)
521 enic_start_wq(enic, index);
522 for (index = 0; index < enic->rq_count; index++)
523 enic_start_rq(enic, index);
525 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
527 vnic_dev_enable_wait(enic->vdev);
529 /* Register and enable error interrupt */
530 rte_intr_callback_register(&(enic->pdev->intr_handle),
531 enic_intr_handler, (void *)enic->rte_dev);
533 rte_intr_enable(&(enic->pdev->intr_handle));
534 /* Unmask LSC interrupt */
535 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
540 int enic_alloc_intr_resources(struct enic *enic)
545 dev_info(enic, "vNIC resources used: "\
546 "wq %d rq %d cq %d intr %d\n",
547 enic->wq_count, enic_vnic_rq_count(enic),
548 enic->cq_count, enic->intr_count);
550 for (i = 0; i < enic->intr_count; i++) {
551 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
553 enic_free_vnic_resources(enic);
560 void enic_free_rq(void *rxq)
562 struct vnic_rq *rq_sop, *rq_data;
568 rq_sop = (struct vnic_rq *)rxq;
569 enic = vnic_dev_priv(rq_sop->vdev);
570 rq_data = &enic->rq[rq_sop->data_queue_idx];
572 enic_rxmbuf_queue_release(enic, rq_sop);
574 enic_rxmbuf_queue_release(enic, rq_data);
576 rte_free(rq_sop->mbuf_ring);
578 rte_free(rq_data->mbuf_ring);
580 rq_sop->mbuf_ring = NULL;
581 rq_data->mbuf_ring = NULL;
583 vnic_rq_free(rq_sop);
585 vnic_rq_free(rq_data);
587 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
593 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
595 struct rte_eth_dev *eth_dev = enic->rte_dev;
596 vnic_wq_enable(&enic->wq[queue_idx]);
597 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
600 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
602 struct rte_eth_dev *eth_dev = enic->rte_dev;
605 ret = vnic_wq_disable(&enic->wq[queue_idx]);
609 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
613 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
615 struct vnic_rq *rq_sop;
616 struct vnic_rq *rq_data;
617 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
618 rq_data = &enic->rq[rq_sop->data_queue_idx];
619 struct rte_eth_dev *eth_dev = enic->rte_dev;
622 vnic_rq_enable(rq_data);
624 vnic_rq_enable(rq_sop);
625 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
628 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
630 int ret1 = 0, ret2 = 0;
631 struct rte_eth_dev *eth_dev = enic->rte_dev;
632 struct vnic_rq *rq_sop;
633 struct vnic_rq *rq_data;
634 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
635 rq_data = &enic->rq[rq_sop->data_queue_idx];
637 ret2 = vnic_rq_disable(rq_sop);
640 ret1 = vnic_rq_disable(rq_data);
647 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
651 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
652 unsigned int socket_id, struct rte_mempool *mp,
653 uint16_t nb_desc, uint16_t free_thresh)
656 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
657 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
658 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
659 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
660 unsigned int mbuf_size, mbufs_per_pkt;
661 unsigned int nb_sop_desc, nb_data_desc;
662 uint16_t min_sop, max_sop, min_data, max_data;
663 uint32_t max_rx_pkt_len;
666 rq_sop->data_queue_idx = data_queue_idx;
668 rq_data->data_queue_idx = 0;
669 rq_sop->socket_id = socket_id;
671 rq_data->socket_id = socket_id;
674 rq_sop->rx_free_thresh = free_thresh;
675 rq_data->rx_free_thresh = free_thresh;
676 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
679 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
680 RTE_PKTMBUF_HEADROOM);
681 /* max_rx_pkt_len includes the ethernet header and CRC. */
682 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
684 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
685 DEV_RX_OFFLOAD_SCATTER) {
686 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
687 /* ceil((max pkt len)/mbuf_size) */
688 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
690 dev_info(enic, "Scatter rx mode disabled\n");
692 if (max_rx_pkt_len > mbuf_size) {
693 dev_warning(enic, "The maximum Rx packet size (%u) is"
694 " larger than the mbuf size (%u), and"
695 " scatter is disabled. Larger packets will"
697 max_rx_pkt_len, mbuf_size);
701 if (mbufs_per_pkt > 1) {
702 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
703 rq_sop->data_queue_enable = 1;
706 * HW does not directly support rxmode.max_rx_pkt_len. HW always
707 * receives packet sizes up to the "max" MTU.
708 * If not using scatter, we can achieve the effect of dropping
709 * larger packets by reducing the size of posted buffers.
710 * See enic_alloc_rx_queue_mbufs().
713 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
714 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
715 " when scatter rx mode is in use.\n");
718 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
720 rq_sop->data_queue_enable = 0;
724 /* number of descriptors have to be a multiple of 32 */
725 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
726 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
728 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
729 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
731 if (mbufs_per_pkt > 1) {
733 max_sop = ((enic->config.rq_desc_count /
734 (mbufs_per_pkt - 1)) & ~0x1F);
735 min_data = min_sop * (mbufs_per_pkt - 1);
736 max_data = enic->config.rq_desc_count;
739 max_sop = enic->config.rq_desc_count;
744 if (nb_desc < (min_sop + min_data)) {
746 "Number of rx descs too low, adjusting to minimum\n");
747 nb_sop_desc = min_sop;
748 nb_data_desc = min_data;
749 } else if (nb_desc > (max_sop + max_data)) {
751 "Number of rx_descs too high, adjusting to maximum\n");
752 nb_sop_desc = max_sop;
753 nb_data_desc = max_data;
755 if (mbufs_per_pkt > 1) {
756 dev_info(enic, "For max packet size %u and mbuf size %u valid"
757 " rx descriptor range is %u to %u\n",
758 max_rx_pkt_len, mbuf_size, min_sop + min_data,
761 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
762 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
764 /* Allocate sop queue resources */
765 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
766 nb_sop_desc, sizeof(struct rq_enet_desc));
768 dev_err(enic, "error in allocation of sop rq\n");
771 nb_sop_desc = rq_sop->ring.desc_count;
773 if (rq_data->in_use) {
774 /* Allocate data queue resources */
775 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
777 sizeof(struct rq_enet_desc));
779 dev_err(enic, "error in allocation of data rq\n");
780 goto err_free_rq_sop;
782 nb_data_desc = rq_data->ring.desc_count;
784 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
785 socket_id, nb_sop_desc + nb_data_desc,
786 sizeof(struct cq_enet_rq_desc));
788 dev_err(enic, "error in allocation of cq for rq\n");
789 goto err_free_rq_data;
792 /* Allocate the mbuf rings */
793 rq_sop->mbuf_ring = (struct rte_mbuf **)
794 rte_zmalloc_socket("rq->mbuf_ring",
795 sizeof(struct rte_mbuf *) * nb_sop_desc,
796 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
797 if (rq_sop->mbuf_ring == NULL)
800 if (rq_data->in_use) {
801 rq_data->mbuf_ring = (struct rte_mbuf **)
802 rte_zmalloc_socket("rq->mbuf_ring",
803 sizeof(struct rte_mbuf *) * nb_data_desc,
804 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
805 if (rq_data->mbuf_ring == NULL)
806 goto err_free_sop_mbuf;
809 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
814 rte_free(rq_sop->mbuf_ring);
816 /* cleanup on error */
817 vnic_cq_free(&enic->cq[queue_idx]);
820 vnic_rq_free(rq_data);
822 vnic_rq_free(rq_sop);
827 void enic_free_wq(void *txq)
835 wq = (struct vnic_wq *)txq;
836 enic = vnic_dev_priv(wq->vdev);
837 rte_memzone_free(wq->cqmsg_rz);
839 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
842 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
843 unsigned int socket_id, uint16_t nb_desc)
846 struct vnic_wq *wq = &enic->wq[queue_idx];
847 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
851 wq->socket_id = socket_id;
853 if (nb_desc > enic->config.wq_desc_count) {
855 "WQ %d - number of tx desc in cmd line (%d)"\
856 "is greater than that in the UCSM/CIMC adapter"\
857 "policy. Applying the value in the adapter "\
859 queue_idx, nb_desc, enic->config.wq_desc_count);
860 } else if (nb_desc != enic->config.wq_desc_count) {
861 enic->config.wq_desc_count = nb_desc;
863 "TX Queues - effective number of descs:%d\n",
868 /* Allocate queue resources */
869 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
870 enic->config.wq_desc_count,
871 sizeof(struct wq_enet_desc));
873 dev_err(enic, "error in allocation of wq\n");
877 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
878 socket_id, enic->config.wq_desc_count,
879 sizeof(struct cq_enet_wq_desc));
882 dev_err(enic, "error in allocation of cq for wq\n");
885 /* setup up CQ message */
886 snprintf((char *)name, sizeof(name),
887 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
890 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
891 sizeof(uint32_t), SOCKET_ID_ANY,
892 RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
899 int enic_disable(struct enic *enic)
904 for (i = 0; i < enic->intr_count; i++) {
905 vnic_intr_mask(&enic->intr[i]);
906 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
908 enic_rxq_intr_deinit(enic);
909 rte_intr_disable(&enic->pdev->intr_handle);
910 rte_intr_callback_unregister(&enic->pdev->intr_handle,
912 (void *)enic->rte_dev);
914 vnic_dev_disable(enic->vdev);
916 enic_clsf_destroy(enic);
918 if (!enic_is_sriov_vf(enic))
919 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
921 for (i = 0; i < enic->wq_count; i++) {
922 err = vnic_wq_disable(&enic->wq[i]);
926 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
927 if (enic->rq[i].in_use) {
928 err = vnic_rq_disable(&enic->rq[i]);
934 /* If we were using interrupts, set the interrupt vector to -1
935 * to disable interrupts. We are not disabling link notifcations,
936 * though, as we want the polling of link status to continue working.
938 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
939 vnic_dev_notify_set(enic->vdev, -1);
941 vnic_dev_set_reset_flag(enic->vdev, 1);
943 for (i = 0; i < enic->wq_count; i++)
944 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
946 for (i = 0; i < enic_vnic_rq_count(enic); i++)
947 if (enic->rq[i].in_use)
948 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
949 for (i = 0; i < enic->cq_count; i++)
950 vnic_cq_clean(&enic->cq[i]);
951 for (i = 0; i < enic->intr_count; i++)
952 vnic_intr_clean(&enic->intr[i]);
957 static int enic_dev_wait(struct vnic_dev *vdev,
958 int (*start)(struct vnic_dev *, int),
959 int (*finished)(struct vnic_dev *, int *),
966 err = start(vdev, arg);
970 /* Wait for func to complete...2 seconds max */
971 for (i = 0; i < 2000; i++) {
972 err = finished(vdev, &done);
982 static int enic_dev_open(struct enic *enic)
985 int flags = CMD_OPENF_IG_DESCCACHE;
987 err = enic_dev_wait(enic->vdev, vnic_dev_open,
988 vnic_dev_open_done, flags);
990 dev_err(enic_get_dev(enic),
991 "vNIC device open failed, err %d\n", err);
996 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
998 dma_addr_t rss_key_buf_pa;
999 union vnic_rss_key *rss_key_buf_va = NULL;
1003 RTE_ASSERT(user_key != NULL);
1004 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1005 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1006 &rss_key_buf_pa, name);
1007 if (!rss_key_buf_va)
1010 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1011 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1013 err = enic_set_rss_key(enic,
1015 sizeof(union vnic_rss_key));
1017 /* Save for later queries */
1019 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1020 sizeof(union vnic_rss_key));
1022 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1023 rss_key_buf_va, rss_key_buf_pa);
1028 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1030 dma_addr_t rss_cpu_buf_pa;
1031 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1035 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1036 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1037 &rss_cpu_buf_pa, name);
1038 if (!rss_cpu_buf_va)
1041 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1043 err = enic_set_rss_cpu(enic,
1045 sizeof(union vnic_rss_cpu));
1047 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1048 rss_cpu_buf_va, rss_cpu_buf_pa);
1050 /* Save for later queries */
1052 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1056 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1057 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1059 const u8 tso_ipid_split_en = 0;
1062 err = enic_set_nic_cfg(enic,
1063 rss_default_cpu, rss_hash_type,
1064 rss_hash_bits, rss_base_cpu,
1065 rss_enable, tso_ipid_split_en,
1066 enic->ig_vlan_strip_en);
1071 /* Initialize RSS with defaults, called from dev_configure */
1072 int enic_init_rss_nic_cfg(struct enic *enic)
1074 static uint8_t default_rss_key[] = {
1075 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1076 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1077 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1078 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1080 struct rte_eth_rss_conf rss_conf;
1081 union vnic_rss_cpu rss_cpu;
1084 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1086 * If setting key for the first time, and the user gives us none, then
1087 * push the default key to NIC.
1089 if (rss_conf.rss_key == NULL) {
1090 rss_conf.rss_key = default_rss_key;
1091 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1093 ret = enic_set_rss_conf(enic, &rss_conf);
1095 dev_err(enic, "Failed to configure RSS\n");
1098 if (enic->rss_enable) {
1099 /* If enabling RSS, use the default reta */
1100 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1101 rss_cpu.cpu[i / 4].b[i % 4] =
1102 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1104 ret = enic_set_rss_reta(enic, &rss_cpu);
1106 dev_err(enic, "Failed to set RSS indirection table\n");
1111 int enic_setup_finish(struct enic *enic)
1113 enic_init_soft_stats(enic);
1116 vnic_dev_packet_filter(enic->vdev,
1129 static int enic_rss_conf_valid(struct enic *enic,
1130 struct rte_eth_rss_conf *rss_conf)
1132 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1133 if (enic->flow_type_rss_offloads == 0)
1135 if (rss_conf->rss_key != NULL &&
1136 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1137 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1138 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1141 if (rss_conf->rss_hf != 0 &&
1142 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1143 dev_err(enic, "Given rss_hf contains none of the supported"
1150 /* Set hash type and key according to rss_conf */
1151 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1153 struct rte_eth_dev *eth_dev;
1159 RTE_ASSERT(rss_conf != NULL);
1160 ret = enic_rss_conf_valid(enic, rss_conf);
1162 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1166 eth_dev = enic->rte_dev;
1168 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1169 if (enic->rq_count > 1 &&
1170 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1173 if (rss_hf & ETH_RSS_IPV4)
1174 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1175 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1176 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1177 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
1179 * 'TCP' is not a typo. HW does not have a separate
1180 * enable bit for UDP RSS. The TCP bit enables both TCP
1183 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1185 if (rss_hf & ETH_RSS_IPV6)
1186 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1187 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1188 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1189 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
1190 /* Again, 'TCP' is not a typo. */
1191 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1193 if (rss_hf & ETH_RSS_IPV6_EX)
1194 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1195 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1196 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1202 /* Set the hash key if provided */
1203 if (rss_enable && rss_conf->rss_key) {
1204 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1206 dev_err(enic, "Failed to set RSS key\n");
1211 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1212 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1215 enic->rss_hf = rss_hf;
1216 enic->rss_hash_type = rss_hash_type;
1217 enic->rss_enable = rss_enable;
1222 int enic_set_vlan_strip(struct enic *enic)
1225 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1226 * together. So, re-do niccfg, preserving the current RSS settings.
1228 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1229 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1233 void enic_add_packet_filter(struct enic *enic)
1235 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1236 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1237 enic->promisc, enic->allmulti);
1240 int enic_get_link_status(struct enic *enic)
1242 return vnic_dev_link_status(enic->vdev);
1245 static void enic_dev_deinit(struct enic *enic)
1247 struct rte_eth_dev *eth_dev = enic->rte_dev;
1249 /* stop link status checking */
1250 vnic_dev_notify_unset(enic->vdev);
1252 rte_free(eth_dev->data->mac_addrs);
1254 rte_free(enic->intr);
1260 int enic_set_vnic_res(struct enic *enic)
1262 struct rte_eth_dev *eth_dev = enic->rte_dev;
1264 unsigned int required_rq, required_wq, required_cq, required_intr;
1266 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1267 required_rq = eth_dev->data->nb_rx_queues * 2;
1268 required_wq = eth_dev->data->nb_tx_queues;
1269 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1270 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1271 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1272 required_intr += eth_dev->data->nb_rx_queues;
1275 if (enic->conf_rq_count < required_rq) {
1276 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1277 eth_dev->data->nb_rx_queues,
1278 required_rq, enic->conf_rq_count);
1281 if (enic->conf_wq_count < required_wq) {
1282 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1283 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1287 if (enic->conf_cq_count < required_cq) {
1288 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1289 required_cq, enic->conf_cq_count);
1292 if (enic->conf_intr_count < required_intr) {
1293 dev_err(dev, "Not enough Interrupts to support Rx queue"
1294 " interrupts. Required:%u, Configured:%u\n",
1295 required_intr, enic->conf_intr_count);
1300 enic->rq_count = eth_dev->data->nb_rx_queues;
1301 enic->wq_count = eth_dev->data->nb_tx_queues;
1302 enic->cq_count = enic->rq_count + enic->wq_count;
1303 enic->intr_count = required_intr;
1309 /* Initialize the completion queue for an RQ */
1311 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1313 struct vnic_rq *sop_rq, *data_rq;
1314 unsigned int cq_idx;
1317 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1318 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1321 vnic_cq_clean(&enic->cq[cq_idx]);
1322 vnic_cq_init(&enic->cq[cq_idx],
1323 0 /* flow_control_enable */,
1324 1 /* color_enable */,
1327 1 /* cq_tail_color */,
1328 0 /* interrupt_enable */,
1329 1 /* cq_entry_enable */,
1330 0 /* cq_message_enable */,
1331 0 /* interrupt offset */,
1332 0 /* cq_message_addr */);
1335 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1336 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1337 sop_rq->ring.desc_count - 1, 1, 0);
1338 if (data_rq->in_use) {
1339 vnic_rq_init_start(data_rq,
1341 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1342 data_rq->ring.desc_count - 1, 1, 0);
1345 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1349 if (data_rq->in_use) {
1350 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1352 enic_rxmbuf_queue_release(enic, sop_rq);
1360 /* The Cisco NIC can send and receive packets up to a max packet size
1361 * determined by the NIC type and firmware. There is also an MTU
1362 * configured into the NIC via the CIMC/UCSM management interface
1363 * which can be overridden by this function (up to the max packet size).
1364 * Depending on the network setup, doing so may cause packet drops
1365 * and unexpected behavior.
1367 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1369 unsigned int rq_idx;
1372 uint16_t old_mtu; /* previous setting */
1373 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1374 struct rte_eth_dev *eth_dev = enic->rte_dev;
1376 old_mtu = eth_dev->data->mtu;
1377 config_mtu = enic->config.mtu;
1379 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1380 return -E_RTE_SECONDARY;
1382 if (new_mtu > enic->max_mtu) {
1384 "MTU not updated: requested (%u) greater than max (%u)\n",
1385 new_mtu, enic->max_mtu);
1388 if (new_mtu < ENIC_MIN_MTU) {
1390 "MTU not updated: requested (%u) less than min (%u)\n",
1391 new_mtu, ENIC_MIN_MTU);
1394 if (new_mtu > config_mtu)
1396 "MTU (%u) is greater than value configured in NIC (%u)\n",
1397 new_mtu, config_mtu);
1399 /* The easy case is when scatter is disabled. However if the MTU
1400 * becomes greater than the mbuf data size, packet drops will ensue.
1402 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1403 DEV_RX_OFFLOAD_SCATTER)) {
1404 eth_dev->data->mtu = new_mtu;
1408 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1409 * change Rx scatter mode if necessary for better performance. I.e. if
1410 * MTU was greater than the mbuf size and now it's less, scatter Rx
1411 * doesn't have to be used and vice versa.
1413 rte_spinlock_lock(&enic->mtu_lock);
1415 /* Stop traffic on all RQs */
1416 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1417 rq = &enic->rq[rq_idx];
1418 if (rq->is_sop && rq->in_use) {
1419 rc = enic_stop_rq(enic,
1420 enic_sop_rq_idx_to_rte_idx(rq_idx));
1422 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1428 /* replace Rx function with a no-op to avoid getting stale pkts */
1429 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1432 /* Allow time for threads to exit the real Rx function. */
1435 /* now it is safe to reconfigure the RQs */
1437 /* update the mtu */
1438 eth_dev->data->mtu = new_mtu;
1440 /* free and reallocate RQs with the new MTU */
1441 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1442 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1447 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1448 rq->tot_nb_desc, rq->rx_free_thresh);
1451 "Fatal MTU alloc error- No traffic will pass\n");
1455 rc = enic_reinit_rq(enic, rq_idx);
1458 "Fatal MTU RQ reinit- No traffic will pass\n");
1463 /* put back the real receive function */
1465 eth_dev->rx_pkt_burst = enic_recv_pkts;
1468 /* restart Rx traffic */
1469 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1470 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1471 if (rq->is_sop && rq->in_use)
1472 enic_start_rq(enic, rq_idx);
1476 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1477 rte_spinlock_unlock(&enic->mtu_lock);
1481 static int enic_dev_init(struct enic *enic)
1484 struct rte_eth_dev *eth_dev = enic->rte_dev;
1486 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1488 /* Get vNIC configuration
1490 err = enic_get_vnic_config(enic);
1492 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1496 /* Get available resource counts */
1497 enic_get_res_counts(enic);
1498 if (enic->conf_rq_count == 1) {
1499 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1500 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1501 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1504 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1505 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1506 enic->conf_cq_count, 8);
1507 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1508 enic->conf_intr_count, 8);
1509 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1510 enic->conf_rq_count, 8);
1511 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1512 enic->conf_wq_count, 8);
1513 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1514 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1517 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1518 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1521 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1522 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1525 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1526 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1530 /* Get the supported filters */
1531 enic_fdir_info(enic);
1533 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1534 * ENIC_MAX_MAC_ADDR, 0);
1535 if (!eth_dev->data->mac_addrs) {
1536 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1539 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1540 eth_dev->data->mac_addrs);
1542 vnic_dev_set_reset_flag(enic->vdev, 0);
1544 LIST_INIT(&enic->flows);
1545 rte_spinlock_init(&enic->flows_lock);
1547 /* set up link status checking */
1548 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1554 int enic_probe(struct enic *enic)
1556 struct rte_pci_device *pdev = enic->pdev;
1559 dev_debug(enic, " Initializing ENIC PMD\n");
1561 /* if this is a secondary process the hardware is already initialized */
1562 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1565 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1566 enic->bar0.len = pdev->mem_resource[0].len;
1568 /* Register vNIC device */
1569 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1571 dev_err(enic, "vNIC registration failed, aborting\n");
1575 LIST_INIT(&enic->memzone_list);
1576 rte_spinlock_init(&enic->memzone_list_lock);
1578 vnic_register_cbacks(enic->vdev,
1579 enic_alloc_consistent,
1580 enic_free_consistent);
1583 * Allocate the consistent memory for stats upfront so both primary and
1584 * secondary processes can dump stats.
1586 err = vnic_dev_alloc_stats_mem(enic->vdev);
1588 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1589 goto err_out_unregister;
1591 /* Issue device open to get device in known state */
1592 err = enic_dev_open(enic);
1594 dev_err(enic, "vNIC dev open failed, aborting\n");
1595 goto err_out_unregister;
1598 /* Set ingress vlan rewrite mode before vnic initialization */
1599 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1600 IG_VLAN_REWRITE_MODE_PASS_THRU);
1603 "Failed to set ingress vlan rewrite mode, aborting.\n");
1604 goto err_out_dev_close;
1607 /* Issue device init to initialize the vnic-to-switch link.
1608 * We'll start with carrier off and wait for link UP
1609 * notification later to turn on carrier. We don't need
1610 * to wait here for the vnic-to-switch link initialization
1611 * to complete; link UP notification is the indication that
1612 * the process is complete.
1615 err = vnic_dev_init(enic->vdev, 0);
1617 dev_err(enic, "vNIC dev init failed, aborting\n");
1618 goto err_out_dev_close;
1621 err = enic_dev_init(enic);
1623 dev_err(enic, "Device initialization failed, aborting\n");
1624 goto err_out_dev_close;
1630 vnic_dev_close(enic->vdev);
1632 vnic_dev_unregister(enic->vdev);
1637 void enic_remove(struct enic *enic)
1639 enic_dev_deinit(enic);
1640 vnic_dev_close(enic->vdev);
1641 vnic_dev_unregister(enic->vdev);