1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 void enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 if (vnic_dev_del_addr(enic->vdev, mac_addr))
171 dev_err(enic, "del mac addr failed\n");
174 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
178 if (!is_eth_addr_valid(mac_addr)) {
179 dev_err(enic, "invalid mac address\n");
183 err = vnic_dev_add_addr(enic->vdev, mac_addr);
185 dev_err(enic, "add mac addr failed\n");
190 enic_free_rq_buf(struct rte_mbuf **mbuf)
195 rte_pktmbuf_free(*mbuf);
199 void enic_init_vnic_resources(struct enic *enic)
201 unsigned int error_interrupt_enable = 1;
202 unsigned int error_interrupt_offset = 0;
203 unsigned int rxq_interrupt_enable = 0;
204 unsigned int rxq_interrupt_offset;
205 unsigned int index = 0;
207 struct vnic_rq *data_rq;
209 if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
210 rxq_interrupt_enable = 1;
211 rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
213 for (index = 0; index < enic->rq_count; index++) {
214 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
216 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
218 error_interrupt_enable,
219 error_interrupt_offset);
221 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
223 vnic_rq_init(data_rq,
225 error_interrupt_enable,
226 error_interrupt_offset);
228 vnic_cq_init(&enic->cq[cq_idx],
229 0 /* flow_control_enable */,
230 1 /* color_enable */,
233 1 /* cq_tail_color */,
234 rxq_interrupt_enable,
235 1 /* cq_entry_enable */,
236 0 /* cq_message_enable */,
237 rxq_interrupt_offset,
238 0 /* cq_message_addr */);
239 if (rxq_interrupt_enable)
240 rxq_interrupt_offset++;
243 for (index = 0; index < enic->wq_count; index++) {
244 vnic_wq_init(&enic->wq[index],
245 enic_cq_wq(enic, index),
246 error_interrupt_enable,
247 error_interrupt_offset);
249 cq_idx = enic_cq_wq(enic, index);
250 vnic_cq_init(&enic->cq[cq_idx],
251 0 /* flow_control_enable */,
252 1 /* color_enable */,
255 1 /* cq_tail_color */,
256 0 /* interrupt_enable */,
257 0 /* cq_entry_enable */,
258 1 /* cq_message_enable */,
259 0 /* interrupt offset */,
260 (u64)enic->wq[index].cqmsg_rz->iova);
263 for (index = 0; index < enic->intr_count; index++) {
264 vnic_intr_init(&enic->intr[index],
265 enic->config.intr_timer_usec,
266 enic->config.intr_timer_type,
267 /*mask_on_assertion*/1);
273 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
276 struct rq_enet_desc *rqd = rq->ring.descs;
279 uint32_t max_rx_pkt_len;
285 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
286 rq->ring.desc_count);
289 * If *not* using scatter and the mbuf size is smaller than the
290 * requested max packet size (max_rx_pkt_len), then reduce the
291 * posted buffer size to max_rx_pkt_len. HW still receives packets
292 * larger than max_rx_pkt_len, but they will be truncated, which we
293 * drop in the rx handler. Not ideal, but better than returning
294 * large packets when the user is not expecting them.
296 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
297 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
298 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
299 rq_buf_len = max_rx_pkt_len;
300 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
301 mb = rte_mbuf_raw_alloc(rq->mp);
303 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
304 (unsigned)rq->index);
308 mb->data_off = RTE_PKTMBUF_HEADROOM;
309 dma_addr = (dma_addr_t)(mb->buf_iova
310 + RTE_PKTMBUF_HEADROOM);
311 rq_enet_desc_enc(rqd, dma_addr,
312 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
313 : RQ_ENET_TYPE_NOT_SOP),
315 rq->mbuf_ring[i] = mb;
318 /* make sure all prior writes are complete before doing the PIO write */
321 /* Post all but the last buffer to VIC. */
322 rq->posted_index = rq->ring.desc_count - 1;
326 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
327 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
328 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
329 iowrite32(0, &rq->ctrl->fetch_index);
337 enic_alloc_consistent(void *priv, size_t size,
338 dma_addr_t *dma_handle, u8 *name)
341 const struct rte_memzone *rz;
343 struct enic *enic = (struct enic *)priv;
344 struct enic_memzone_entry *mze;
346 rz = rte_memzone_reserve_aligned((const char *)name,
347 size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
349 pr_err("%s : Failed to allocate memory requested for %s\n",
355 *dma_handle = (dma_addr_t)rz->iova;
357 mze = rte_malloc("enic memzone entry",
358 sizeof(struct enic_memzone_entry), 0);
361 pr_err("%s : Failed to allocate memory for memzone list\n",
363 rte_memzone_free(rz);
369 rte_spinlock_lock(&enic->memzone_list_lock);
370 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
371 rte_spinlock_unlock(&enic->memzone_list_lock);
377 enic_free_consistent(void *priv,
378 __rte_unused size_t size,
380 dma_addr_t dma_handle)
382 struct enic_memzone_entry *mze;
383 struct enic *enic = (struct enic *)priv;
385 rte_spinlock_lock(&enic->memzone_list_lock);
386 LIST_FOREACH(mze, &enic->memzone_list, entries) {
387 if (mze->rz->addr == vaddr &&
388 mze->rz->iova == dma_handle)
392 rte_spinlock_unlock(&enic->memzone_list_lock);
394 "Tried to free memory, but couldn't find it in the memzone list\n");
397 LIST_REMOVE(mze, entries);
398 rte_spinlock_unlock(&enic->memzone_list_lock);
399 rte_memzone_free(mze->rz);
403 int enic_link_update(struct enic *enic)
405 struct rte_eth_dev *eth_dev = enic->rte_dev;
406 struct rte_eth_link link;
408 memset(&link, 0, sizeof(link));
409 link.link_status = enic_get_link_status(enic);
410 link.link_duplex = ETH_LINK_FULL_DUPLEX;
411 link.link_speed = vnic_dev_port_speed(enic->vdev);
413 return rte_eth_linkstatus_set(eth_dev, &link);
417 enic_intr_handler(void *arg)
419 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
420 struct enic *enic = pmd_priv(dev);
422 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
424 enic_link_update(enic);
425 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
426 enic_log_q_error(enic);
429 static int enic_rxq_intr_init(struct enic *enic)
431 struct rte_intr_handle *intr_handle;
432 uint32_t rxq_intr_count, i;
435 intr_handle = enic->rte_dev->intr_handle;
436 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
439 * Rx queue interrupts only work when we have MSI-X interrupts,
440 * one per queue. Sharing one interrupt is technically
441 * possible with VIC, but it is not worth the complications it brings.
443 if (!rte_intr_cap_multiple(intr_handle)) {
444 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
445 " (vfio-pci driver)\n");
448 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
449 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
451 dev_err(enic, "Failed to enable event fds for Rx queue"
455 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
456 rxq_intr_count * sizeof(int), 0);
457 if (intr_handle->intr_vec == NULL) {
458 dev_err(enic, "Failed to allocate intr_vec\n");
461 for (i = 0; i < rxq_intr_count; i++)
462 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
466 static void enic_rxq_intr_deinit(struct enic *enic)
468 struct rte_intr_handle *intr_handle;
470 intr_handle = enic->rte_dev->intr_handle;
471 rte_intr_efd_disable(intr_handle);
472 if (intr_handle->intr_vec != NULL) {
473 rte_free(intr_handle->intr_vec);
474 intr_handle->intr_vec = NULL;
478 int enic_enable(struct enic *enic)
482 struct rte_eth_dev *eth_dev = enic->rte_dev;
484 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
485 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
487 /* vnic notification of link status has already been turned on in
488 * enic_dev_init() which is called during probe time. Here we are
489 * just turning on interrupt vector 0 if needed.
491 if (eth_dev->data->dev_conf.intr_conf.lsc)
492 vnic_dev_notify_set(enic->vdev, 0);
494 err = enic_rxq_intr_init(enic);
497 if (enic_clsf_init(enic))
498 dev_warning(enic, "Init of hash table for clsf failed."\
499 "Flow director feature will not work\n");
501 for (index = 0; index < enic->rq_count; index++) {
502 err = enic_alloc_rx_queue_mbufs(enic,
503 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
505 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
508 err = enic_alloc_rx_queue_mbufs(enic,
509 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
511 /* release the allocated mbufs for the sop rq*/
512 enic_rxmbuf_queue_release(enic,
513 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
515 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
520 for (index = 0; index < enic->wq_count; index++)
521 enic_start_wq(enic, index);
522 for (index = 0; index < enic->rq_count; index++)
523 enic_start_rq(enic, index);
525 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
527 vnic_dev_enable_wait(enic->vdev);
529 /* Register and enable error interrupt */
530 rte_intr_callback_register(&(enic->pdev->intr_handle),
531 enic_intr_handler, (void *)enic->rte_dev);
533 rte_intr_enable(&(enic->pdev->intr_handle));
534 /* Unmask LSC interrupt */
535 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
540 int enic_alloc_intr_resources(struct enic *enic)
545 dev_info(enic, "vNIC resources used: "\
546 "wq %d rq %d cq %d intr %d\n",
547 enic->wq_count, enic_vnic_rq_count(enic),
548 enic->cq_count, enic->intr_count);
550 for (i = 0; i < enic->intr_count; i++) {
551 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
553 enic_free_vnic_resources(enic);
560 void enic_free_rq(void *rxq)
562 struct vnic_rq *rq_sop, *rq_data;
568 rq_sop = (struct vnic_rq *)rxq;
569 enic = vnic_dev_priv(rq_sop->vdev);
570 rq_data = &enic->rq[rq_sop->data_queue_idx];
572 enic_rxmbuf_queue_release(enic, rq_sop);
574 enic_rxmbuf_queue_release(enic, rq_data);
576 rte_free(rq_sop->mbuf_ring);
578 rte_free(rq_data->mbuf_ring);
580 rq_sop->mbuf_ring = NULL;
581 rq_data->mbuf_ring = NULL;
583 vnic_rq_free(rq_sop);
585 vnic_rq_free(rq_data);
587 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
593 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
595 struct rte_eth_dev *eth_dev = enic->rte_dev;
596 vnic_wq_enable(&enic->wq[queue_idx]);
597 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
600 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
602 struct rte_eth_dev *eth_dev = enic->rte_dev;
605 ret = vnic_wq_disable(&enic->wq[queue_idx]);
609 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
613 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
615 struct vnic_rq *rq_sop;
616 struct vnic_rq *rq_data;
617 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
618 rq_data = &enic->rq[rq_sop->data_queue_idx];
619 struct rte_eth_dev *eth_dev = enic->rte_dev;
622 vnic_rq_enable(rq_data);
624 vnic_rq_enable(rq_sop);
625 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
628 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
630 int ret1 = 0, ret2 = 0;
631 struct rte_eth_dev *eth_dev = enic->rte_dev;
632 struct vnic_rq *rq_sop;
633 struct vnic_rq *rq_data;
634 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
635 rq_data = &enic->rq[rq_sop->data_queue_idx];
637 ret2 = vnic_rq_disable(rq_sop);
640 ret1 = vnic_rq_disable(rq_data);
647 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
651 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
652 unsigned int socket_id, struct rte_mempool *mp,
653 uint16_t nb_desc, uint16_t free_thresh)
656 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
657 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
658 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
659 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
660 unsigned int mbuf_size, mbufs_per_pkt;
661 unsigned int nb_sop_desc, nb_data_desc;
662 uint16_t min_sop, max_sop, min_data, max_data;
663 uint32_t max_rx_pkt_len;
666 rq_sop->data_queue_idx = data_queue_idx;
668 rq_data->data_queue_idx = 0;
669 rq_sop->socket_id = socket_id;
671 rq_data->socket_id = socket_id;
674 rq_sop->rx_free_thresh = free_thresh;
675 rq_data->rx_free_thresh = free_thresh;
676 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
679 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
680 RTE_PKTMBUF_HEADROOM);
681 /* max_rx_pkt_len includes the ethernet header and CRC. */
682 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
684 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
685 DEV_RX_OFFLOAD_SCATTER) {
686 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
687 /* ceil((max pkt len)/mbuf_size) */
688 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
690 dev_info(enic, "Scatter rx mode disabled\n");
692 if (max_rx_pkt_len > mbuf_size) {
693 dev_warning(enic, "The maximum Rx packet size (%u) is"
694 " larger than the mbuf size (%u), and"
695 " scatter is disabled. Larger packets will"
697 max_rx_pkt_len, mbuf_size);
701 if (mbufs_per_pkt > 1) {
702 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
703 rq_sop->data_queue_enable = 1;
706 * HW does not directly support rxmode.max_rx_pkt_len. HW always
707 * receives packet sizes up to the "max" MTU.
708 * If not using scatter, we can achieve the effect of dropping
709 * larger packets by reducing the size of posted buffers.
710 * See enic_alloc_rx_queue_mbufs().
713 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
714 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
715 " when scatter rx mode is in use.\n");
718 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
720 rq_sop->data_queue_enable = 0;
724 /* number of descriptors have to be a multiple of 32 */
725 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
726 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
728 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
729 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
731 if (mbufs_per_pkt > 1) {
733 max_sop = ((enic->config.rq_desc_count /
734 (mbufs_per_pkt - 1)) & ~0x1F);
735 min_data = min_sop * (mbufs_per_pkt - 1);
736 max_data = enic->config.rq_desc_count;
739 max_sop = enic->config.rq_desc_count;
744 if (nb_desc < (min_sop + min_data)) {
746 "Number of rx descs too low, adjusting to minimum\n");
747 nb_sop_desc = min_sop;
748 nb_data_desc = min_data;
749 } else if (nb_desc > (max_sop + max_data)) {
751 "Number of rx_descs too high, adjusting to maximum\n");
752 nb_sop_desc = max_sop;
753 nb_data_desc = max_data;
755 if (mbufs_per_pkt > 1) {
756 dev_info(enic, "For max packet size %u and mbuf size %u valid"
757 " rx descriptor range is %u to %u\n",
758 max_rx_pkt_len, mbuf_size, min_sop + min_data,
761 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
762 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
764 /* Allocate sop queue resources */
765 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
766 nb_sop_desc, sizeof(struct rq_enet_desc));
768 dev_err(enic, "error in allocation of sop rq\n");
771 nb_sop_desc = rq_sop->ring.desc_count;
773 if (rq_data->in_use) {
774 /* Allocate data queue resources */
775 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
777 sizeof(struct rq_enet_desc));
779 dev_err(enic, "error in allocation of data rq\n");
780 goto err_free_rq_sop;
782 nb_data_desc = rq_data->ring.desc_count;
784 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
785 socket_id, nb_sop_desc + nb_data_desc,
786 sizeof(struct cq_enet_rq_desc));
788 dev_err(enic, "error in allocation of cq for rq\n");
789 goto err_free_rq_data;
792 /* Allocate the mbuf rings */
793 rq_sop->mbuf_ring = (struct rte_mbuf **)
794 rte_zmalloc_socket("rq->mbuf_ring",
795 sizeof(struct rte_mbuf *) * nb_sop_desc,
796 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
797 if (rq_sop->mbuf_ring == NULL)
800 if (rq_data->in_use) {
801 rq_data->mbuf_ring = (struct rte_mbuf **)
802 rte_zmalloc_socket("rq->mbuf_ring",
803 sizeof(struct rte_mbuf *) * nb_data_desc,
804 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
805 if (rq_data->mbuf_ring == NULL)
806 goto err_free_sop_mbuf;
809 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
814 rte_free(rq_sop->mbuf_ring);
816 /* cleanup on error */
817 vnic_cq_free(&enic->cq[queue_idx]);
820 vnic_rq_free(rq_data);
822 vnic_rq_free(rq_sop);
827 void enic_free_wq(void *txq)
835 wq = (struct vnic_wq *)txq;
836 enic = vnic_dev_priv(wq->vdev);
837 rte_memzone_free(wq->cqmsg_rz);
839 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
842 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
843 unsigned int socket_id, uint16_t nb_desc)
846 struct vnic_wq *wq = &enic->wq[queue_idx];
847 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
851 wq->socket_id = socket_id;
853 if (nb_desc > enic->config.wq_desc_count) {
855 "WQ %d - number of tx desc in cmd line (%d)"\
856 "is greater than that in the UCSM/CIMC adapter"\
857 "policy. Applying the value in the adapter "\
859 queue_idx, nb_desc, enic->config.wq_desc_count);
860 } else if (nb_desc != enic->config.wq_desc_count) {
861 enic->config.wq_desc_count = nb_desc;
863 "TX Queues - effective number of descs:%d\n",
868 /* Allocate queue resources */
869 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
870 enic->config.wq_desc_count,
871 sizeof(struct wq_enet_desc));
873 dev_err(enic, "error in allocation of wq\n");
877 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
878 socket_id, enic->config.wq_desc_count,
879 sizeof(struct cq_enet_wq_desc));
882 dev_err(enic, "error in allocation of cq for wq\n");
885 /* setup up CQ message */
886 snprintf((char *)name, sizeof(name),
887 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
890 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
900 int enic_disable(struct enic *enic)
905 for (i = 0; i < enic->intr_count; i++) {
906 vnic_intr_mask(&enic->intr[i]);
907 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
909 enic_rxq_intr_deinit(enic);
910 rte_intr_disable(&enic->pdev->intr_handle);
911 rte_intr_callback_unregister(&enic->pdev->intr_handle,
913 (void *)enic->rte_dev);
915 vnic_dev_disable(enic->vdev);
917 enic_clsf_destroy(enic);
919 if (!enic_is_sriov_vf(enic))
920 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
922 for (i = 0; i < enic->wq_count; i++) {
923 err = vnic_wq_disable(&enic->wq[i]);
927 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
928 if (enic->rq[i].in_use) {
929 err = vnic_rq_disable(&enic->rq[i]);
935 /* If we were using interrupts, set the interrupt vector to -1
936 * to disable interrupts. We are not disabling link notifcations,
937 * though, as we want the polling of link status to continue working.
939 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
940 vnic_dev_notify_set(enic->vdev, -1);
942 vnic_dev_set_reset_flag(enic->vdev, 1);
944 for (i = 0; i < enic->wq_count; i++)
945 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
947 for (i = 0; i < enic_vnic_rq_count(enic); i++)
948 if (enic->rq[i].in_use)
949 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
950 for (i = 0; i < enic->cq_count; i++)
951 vnic_cq_clean(&enic->cq[i]);
952 for (i = 0; i < enic->intr_count; i++)
953 vnic_intr_clean(&enic->intr[i]);
958 static int enic_dev_wait(struct vnic_dev *vdev,
959 int (*start)(struct vnic_dev *, int),
960 int (*finished)(struct vnic_dev *, int *),
967 err = start(vdev, arg);
971 /* Wait for func to complete...2 seconds max */
972 for (i = 0; i < 2000; i++) {
973 err = finished(vdev, &done);
983 static int enic_dev_open(struct enic *enic)
987 err = enic_dev_wait(enic->vdev, vnic_dev_open,
988 vnic_dev_open_done, 0);
990 dev_err(enic_get_dev(enic),
991 "vNIC device open failed, err %d\n", err);
996 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
998 dma_addr_t rss_key_buf_pa;
999 union vnic_rss_key *rss_key_buf_va = NULL;
1003 RTE_ASSERT(user_key != NULL);
1004 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1005 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1006 &rss_key_buf_pa, name);
1007 if (!rss_key_buf_va)
1010 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1011 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1013 err = enic_set_rss_key(enic,
1015 sizeof(union vnic_rss_key));
1017 /* Save for later queries */
1019 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1020 sizeof(union vnic_rss_key));
1022 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1023 rss_key_buf_va, rss_key_buf_pa);
1028 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1030 dma_addr_t rss_cpu_buf_pa;
1031 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1035 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1036 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1037 &rss_cpu_buf_pa, name);
1038 if (!rss_cpu_buf_va)
1041 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1043 err = enic_set_rss_cpu(enic,
1045 sizeof(union vnic_rss_cpu));
1047 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1048 rss_cpu_buf_va, rss_cpu_buf_pa);
1050 /* Save for later queries */
1052 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1056 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1057 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1059 const u8 tso_ipid_split_en = 0;
1062 err = enic_set_nic_cfg(enic,
1063 rss_default_cpu, rss_hash_type,
1064 rss_hash_bits, rss_base_cpu,
1065 rss_enable, tso_ipid_split_en,
1066 enic->ig_vlan_strip_en);
1071 /* Initialize RSS with defaults, called from dev_configure */
1072 int enic_init_rss_nic_cfg(struct enic *enic)
1074 static uint8_t default_rss_key[] = {
1075 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1076 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1077 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1078 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1080 struct rte_eth_rss_conf rss_conf;
1081 union vnic_rss_cpu rss_cpu;
1084 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1086 * If setting key for the first time, and the user gives us none, then
1087 * push the default key to NIC.
1089 if (rss_conf.rss_key == NULL) {
1090 rss_conf.rss_key = default_rss_key;
1091 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1093 ret = enic_set_rss_conf(enic, &rss_conf);
1095 dev_err(enic, "Failed to configure RSS\n");
1098 if (enic->rss_enable) {
1099 /* If enabling RSS, use the default reta */
1100 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1101 rss_cpu.cpu[i / 4].b[i % 4] =
1102 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1104 ret = enic_set_rss_reta(enic, &rss_cpu);
1106 dev_err(enic, "Failed to set RSS indirection table\n");
1111 int enic_setup_finish(struct enic *enic)
1113 enic_init_soft_stats(enic);
1116 vnic_dev_packet_filter(enic->vdev,
1129 static int enic_rss_conf_valid(struct enic *enic,
1130 struct rte_eth_rss_conf *rss_conf)
1132 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1133 if (enic->flow_type_rss_offloads == 0)
1135 if (rss_conf->rss_key != NULL &&
1136 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1137 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1138 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1141 if (rss_conf->rss_hf != 0 &&
1142 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1143 dev_err(enic, "Given rss_hf contains none of the supported"
1150 /* Set hash type and key according to rss_conf */
1151 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1153 struct rte_eth_dev *eth_dev;
1159 RTE_ASSERT(rss_conf != NULL);
1160 ret = enic_rss_conf_valid(enic, rss_conf);
1162 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1166 eth_dev = enic->rte_dev;
1168 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1169 if (enic->rq_count > 1 &&
1170 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1173 if (rss_hf & ETH_RSS_IPV4)
1174 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1175 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1176 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1177 if (rss_hf & ETH_RSS_IPV6)
1178 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1179 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1180 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1181 if (rss_hf & ETH_RSS_IPV6_EX)
1182 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1183 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1184 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1190 /* Set the hash key if provided */
1191 if (rss_enable && rss_conf->rss_key) {
1192 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1194 dev_err(enic, "Failed to set RSS key\n");
1199 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1200 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1203 enic->rss_hf = rss_hf;
1204 enic->rss_hash_type = rss_hash_type;
1205 enic->rss_enable = rss_enable;
1210 int enic_set_vlan_strip(struct enic *enic)
1213 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1214 * together. So, re-do niccfg, preserving the current RSS settings.
1216 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1217 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1221 void enic_add_packet_filter(struct enic *enic)
1223 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1224 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1225 enic->promisc, enic->allmulti);
1228 int enic_get_link_status(struct enic *enic)
1230 return vnic_dev_link_status(enic->vdev);
1233 static void enic_dev_deinit(struct enic *enic)
1235 struct rte_eth_dev *eth_dev = enic->rte_dev;
1237 /* stop link status checking */
1238 vnic_dev_notify_unset(enic->vdev);
1240 rte_free(eth_dev->data->mac_addrs);
1242 rte_free(enic->intr);
1248 int enic_set_vnic_res(struct enic *enic)
1250 struct rte_eth_dev *eth_dev = enic->rte_dev;
1252 unsigned int required_rq, required_wq, required_cq, required_intr;
1254 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1255 required_rq = eth_dev->data->nb_rx_queues * 2;
1256 required_wq = eth_dev->data->nb_tx_queues;
1257 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1258 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1259 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1260 required_intr += eth_dev->data->nb_rx_queues;
1263 if (enic->conf_rq_count < required_rq) {
1264 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1265 eth_dev->data->nb_rx_queues,
1266 required_rq, enic->conf_rq_count);
1269 if (enic->conf_wq_count < required_wq) {
1270 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1271 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1275 if (enic->conf_cq_count < required_cq) {
1276 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1277 required_cq, enic->conf_cq_count);
1280 if (enic->conf_intr_count < required_intr) {
1281 dev_err(dev, "Not enough Interrupts to support Rx queue"
1282 " interrupts. Required:%u, Configured:%u\n",
1283 required_intr, enic->conf_intr_count);
1288 enic->rq_count = eth_dev->data->nb_rx_queues;
1289 enic->wq_count = eth_dev->data->nb_tx_queues;
1290 enic->cq_count = enic->rq_count + enic->wq_count;
1291 enic->intr_count = required_intr;
1297 /* Initialize the completion queue for an RQ */
1299 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1301 struct vnic_rq *sop_rq, *data_rq;
1302 unsigned int cq_idx;
1305 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1306 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1309 vnic_cq_clean(&enic->cq[cq_idx]);
1310 vnic_cq_init(&enic->cq[cq_idx],
1311 0 /* flow_control_enable */,
1312 1 /* color_enable */,
1315 1 /* cq_tail_color */,
1316 0 /* interrupt_enable */,
1317 1 /* cq_entry_enable */,
1318 0 /* cq_message_enable */,
1319 0 /* interrupt offset */,
1320 0 /* cq_message_addr */);
1323 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1324 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1325 sop_rq->ring.desc_count - 1, 1, 0);
1326 if (data_rq->in_use) {
1327 vnic_rq_init_start(data_rq,
1329 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1330 data_rq->ring.desc_count - 1, 1, 0);
1333 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1337 if (data_rq->in_use) {
1338 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1340 enic_rxmbuf_queue_release(enic, sop_rq);
1348 /* The Cisco NIC can send and receive packets up to a max packet size
1349 * determined by the NIC type and firmware. There is also an MTU
1350 * configured into the NIC via the CIMC/UCSM management interface
1351 * which can be overridden by this function (up to the max packet size).
1352 * Depending on the network setup, doing so may cause packet drops
1353 * and unexpected behavior.
1355 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1357 unsigned int rq_idx;
1360 uint16_t old_mtu; /* previous setting */
1361 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1362 struct rte_eth_dev *eth_dev = enic->rte_dev;
1364 old_mtu = eth_dev->data->mtu;
1365 config_mtu = enic->config.mtu;
1367 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1368 return -E_RTE_SECONDARY;
1370 if (new_mtu > enic->max_mtu) {
1372 "MTU not updated: requested (%u) greater than max (%u)\n",
1373 new_mtu, enic->max_mtu);
1376 if (new_mtu < ENIC_MIN_MTU) {
1378 "MTU not updated: requested (%u) less than min (%u)\n",
1379 new_mtu, ENIC_MIN_MTU);
1382 if (new_mtu > config_mtu)
1384 "MTU (%u) is greater than value configured in NIC (%u)\n",
1385 new_mtu, config_mtu);
1387 /* The easy case is when scatter is disabled. However if the MTU
1388 * becomes greater than the mbuf data size, packet drops will ensue.
1390 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1391 DEV_RX_OFFLOAD_SCATTER)) {
1392 eth_dev->data->mtu = new_mtu;
1396 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1397 * change Rx scatter mode if necessary for better performance. I.e. if
1398 * MTU was greater than the mbuf size and now it's less, scatter Rx
1399 * doesn't have to be used and vice versa.
1401 rte_spinlock_lock(&enic->mtu_lock);
1403 /* Stop traffic on all RQs */
1404 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1405 rq = &enic->rq[rq_idx];
1406 if (rq->is_sop && rq->in_use) {
1407 rc = enic_stop_rq(enic,
1408 enic_sop_rq_idx_to_rte_idx(rq_idx));
1410 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1416 /* replace Rx function with a no-op to avoid getting stale pkts */
1417 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1420 /* Allow time for threads to exit the real Rx function. */
1423 /* now it is safe to reconfigure the RQs */
1425 /* update the mtu */
1426 eth_dev->data->mtu = new_mtu;
1428 /* free and reallocate RQs with the new MTU */
1429 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1430 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1433 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1434 rq->tot_nb_desc, rq->rx_free_thresh);
1437 "Fatal MTU alloc error- No traffic will pass\n");
1441 rc = enic_reinit_rq(enic, rq_idx);
1444 "Fatal MTU RQ reinit- No traffic will pass\n");
1449 /* put back the real receive function */
1451 eth_dev->rx_pkt_burst = enic_recv_pkts;
1454 /* restart Rx traffic */
1455 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1456 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1457 if (rq->is_sop && rq->in_use)
1458 enic_start_rq(enic, rq_idx);
1462 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1463 rte_spinlock_unlock(&enic->mtu_lock);
1467 static int enic_dev_init(struct enic *enic)
1470 struct rte_eth_dev *eth_dev = enic->rte_dev;
1472 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1474 /* Get vNIC configuration
1476 err = enic_get_vnic_config(enic);
1478 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1482 /* Get available resource counts */
1483 enic_get_res_counts(enic);
1484 if (enic->conf_rq_count == 1) {
1485 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1486 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1487 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1490 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1491 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1492 enic->conf_cq_count, 8);
1493 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1494 enic->conf_intr_count, 8);
1495 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1496 enic->conf_rq_count, 8);
1497 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1498 enic->conf_wq_count, 8);
1499 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1500 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1503 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1504 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1507 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1508 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1511 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1512 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1516 /* Get the supported filters */
1517 enic_fdir_info(enic);
1519 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1520 * ENIC_MAX_MAC_ADDR, 0);
1521 if (!eth_dev->data->mac_addrs) {
1522 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1525 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1526 eth_dev->data->mac_addrs);
1528 vnic_dev_set_reset_flag(enic->vdev, 0);
1530 LIST_INIT(&enic->flows);
1531 rte_spinlock_init(&enic->flows_lock);
1533 /* set up link status checking */
1534 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1540 int enic_probe(struct enic *enic)
1542 struct rte_pci_device *pdev = enic->pdev;
1545 dev_debug(enic, " Initializing ENIC PMD\n");
1547 /* if this is a secondary process the hardware is already initialized */
1548 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1551 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1552 enic->bar0.len = pdev->mem_resource[0].len;
1554 /* Register vNIC device */
1555 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1557 dev_err(enic, "vNIC registration failed, aborting\n");
1561 LIST_INIT(&enic->memzone_list);
1562 rte_spinlock_init(&enic->memzone_list_lock);
1564 vnic_register_cbacks(enic->vdev,
1565 enic_alloc_consistent,
1566 enic_free_consistent);
1569 * Allocate the consistent memory for stats upfront so both primary and
1570 * secondary processes can dump stats.
1572 err = vnic_dev_alloc_stats_mem(enic->vdev);
1574 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1575 goto err_out_unregister;
1577 /* Issue device open to get device in known state */
1578 err = enic_dev_open(enic);
1580 dev_err(enic, "vNIC dev open failed, aborting\n");
1581 goto err_out_unregister;
1584 /* Set ingress vlan rewrite mode before vnic initialization */
1585 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1586 IG_VLAN_REWRITE_MODE_PASS_THRU);
1589 "Failed to set ingress vlan rewrite mode, aborting.\n");
1590 goto err_out_dev_close;
1593 /* Issue device init to initialize the vnic-to-switch link.
1594 * We'll start with carrier off and wait for link UP
1595 * notification later to turn on carrier. We don't need
1596 * to wait here for the vnic-to-switch link initialization
1597 * to complete; link UP notification is the indication that
1598 * the process is complete.
1601 err = vnic_dev_init(enic->vdev, 0);
1603 dev_err(enic, "vNIC dev init failed, aborting\n");
1604 goto err_out_dev_close;
1607 err = enic_dev_init(enic);
1609 dev_err(enic, "Device initialization failed, aborting\n");
1610 goto err_out_dev_close;
1616 vnic_dev_close(enic->vdev);
1618 vnic_dev_unregister(enic->vdev);
1623 void enic_remove(struct enic *enic)
1625 enic_dev_deinit(enic);
1626 vnic_dev_close(enic->vdev);
1627 vnic_dev_unregister(enic->vdev);