4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_ethdev_pci.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_alarm.h>
39 #include <rte_ether.h>
42 #include "lio_23xx_vf.h"
43 #include "lio_ethdev.h"
46 /* Default RSS key in use */
47 static uint8_t lio_rss_key[40] = {
48 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
49 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
50 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
51 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
52 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
55 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
56 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
57 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
61 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
62 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
63 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
67 /* Wait for control command to reach nic. */
69 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
70 struct lio_dev_ctrl_cmd *ctrl_cmd)
72 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
74 while ((ctrl_cmd->cond == 0) && --timeout) {
75 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
83 * \brief Send Rx control command
84 * @param eth_dev Pointer to the structure rte_eth_dev
85 * @param start_stop whether to start or stop
88 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
90 struct lio_device *lio_dev = LIO_DEV(eth_dev);
91 struct lio_dev_ctrl_cmd ctrl_cmd;
92 struct lio_ctrl_pkt ctrl_pkt;
94 /* flush added to prevent cmd failure
95 * incase the queue is full
97 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
99 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
100 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
102 ctrl_cmd.eth_dev = eth_dev;
105 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
106 ctrl_pkt.ncmd.s.param1 = start_stop;
107 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
109 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
110 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
114 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
115 lio_dev_err(lio_dev, "RX Control command timed out\n");
122 /* store statistics names and its offset in stats structure */
123 struct rte_lio_xstats_name_off {
124 char name[RTE_ETH_XSTATS_NAME_SIZE];
128 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
129 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
130 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
131 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
132 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
133 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
134 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
135 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
136 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
137 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
138 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
139 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
140 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
141 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
142 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
143 sizeof(struct octeon_rx_stats)},
144 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
145 sizeof(struct octeon_rx_stats)},
146 {"tx_broadcast_pkts",
147 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
148 sizeof(struct octeon_rx_stats)},
149 {"tx_multicast_pkts",
150 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
151 sizeof(struct octeon_rx_stats)},
152 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
153 sizeof(struct octeon_rx_stats)},
154 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
155 sizeof(struct octeon_rx_stats)},
156 {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
158 sizeof(struct octeon_rx_stats)},
159 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
160 sizeof(struct octeon_rx_stats)},
161 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
162 sizeof(struct octeon_rx_stats)},
165 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
167 /* Get hw stats of the port */
169 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
172 struct lio_device *lio_dev = LIO_DEV(eth_dev);
173 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
174 struct octeon_link_stats *hw_stats;
175 struct lio_link_stats_resp *resp;
176 struct lio_soft_command *sc;
181 if (!lio_dev->intf_open) {
182 lio_dev_err(lio_dev, "Port %d down\n",
187 if (n < LIO_NB_XSTATS)
188 return LIO_NB_XSTATS;
190 resp_size = sizeof(struct lio_link_stats_resp);
191 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
195 resp = (struct lio_link_stats_resp *)sc->virtrptr;
196 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
197 LIO_OPCODE_PORT_STATS, 0, 0, 0);
199 /* Setting wait time in seconds */
200 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
202 retval = lio_send_soft_command(lio_dev, sc);
203 if (retval == LIO_IQ_SEND_FAILED) {
204 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
209 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
210 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
211 lio_process_ordered_list(lio_dev);
215 retval = resp->status;
217 lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
221 lio_swap_8B_data((uint64_t *)(&resp->link_stats),
222 sizeof(struct octeon_link_stats) >> 3);
224 hw_stats = &resp->link_stats;
226 for (i = 0; i < LIO_NB_XSTATS; i++) {
229 *(uint64_t *)(((char *)hw_stats) +
230 rte_lio_stats_strings[i].offset);
233 lio_free_soft_command(sc);
235 return LIO_NB_XSTATS;
238 lio_free_soft_command(sc);
244 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
245 struct rte_eth_xstat_name *xstats_names,
246 unsigned limit __rte_unused)
248 struct lio_device *lio_dev = LIO_DEV(eth_dev);
251 if (!lio_dev->intf_open) {
252 lio_dev_err(lio_dev, "Port %d down\n",
257 if (xstats_names == NULL)
258 return LIO_NB_XSTATS;
260 /* Note: limit checked in rte_eth_xstats_names() */
262 for (i = 0; i < LIO_NB_XSTATS; i++) {
263 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
264 "%s", rte_lio_stats_strings[i].name);
267 return LIO_NB_XSTATS;
270 /* Reset hw stats for the port */
272 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
274 struct lio_device *lio_dev = LIO_DEV(eth_dev);
275 struct lio_dev_ctrl_cmd ctrl_cmd;
276 struct lio_ctrl_pkt ctrl_pkt;
278 if (!lio_dev->intf_open) {
279 lio_dev_err(lio_dev, "Port %d down\n",
284 /* flush added to prevent cmd failure
285 * incase the queue is full
287 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
289 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
290 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
292 ctrl_cmd.eth_dev = eth_dev;
295 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
296 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
298 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
299 lio_dev_err(lio_dev, "Failed to send clear stats command\n");
303 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
304 lio_dev_err(lio_dev, "Clear stats command timed out\n");
308 /* clear stored per queue stats */
309 RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
310 (*eth_dev->dev_ops->stats_reset)(eth_dev);
313 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
315 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
316 struct rte_eth_stats *stats)
318 struct lio_device *lio_dev = LIO_DEV(eth_dev);
319 struct lio_droq_stats *oq_stats;
320 struct lio_iq_stats *iq_stats;
321 struct lio_instr_queue *txq;
322 struct lio_droq *droq;
328 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
329 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
330 txq = lio_dev->instr_queue[iq_no];
332 iq_stats = &txq->stats;
333 pkts += iq_stats->tx_done;
334 drop += iq_stats->tx_dropped;
335 bytes += iq_stats->tx_tot_bytes;
339 stats->opackets = pkts;
340 stats->obytes = bytes;
341 stats->oerrors = drop;
347 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
348 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
349 droq = lio_dev->droq[oq_no];
351 oq_stats = &droq->stats;
352 pkts += oq_stats->rx_pkts_received;
353 drop += (oq_stats->rx_dropped +
354 oq_stats->dropped_toomany +
355 oq_stats->dropped_nomem);
356 bytes += oq_stats->rx_bytes_received;
359 stats->ibytes = bytes;
360 stats->ipackets = pkts;
361 stats->ierrors = drop;
365 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
367 struct lio_device *lio_dev = LIO_DEV(eth_dev);
368 struct lio_droq_stats *oq_stats;
369 struct lio_iq_stats *iq_stats;
370 struct lio_instr_queue *txq;
371 struct lio_droq *droq;
374 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
375 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
376 txq = lio_dev->instr_queue[iq_no];
378 iq_stats = &txq->stats;
379 memset(iq_stats, 0, sizeof(struct lio_iq_stats));
383 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
384 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
385 droq = lio_dev->droq[oq_no];
387 oq_stats = &droq->stats;
388 memset(oq_stats, 0, sizeof(struct lio_droq_stats));
394 lio_dev_info_get(struct rte_eth_dev *eth_dev,
395 struct rte_eth_dev_info *devinfo)
397 struct lio_device *lio_dev = LIO_DEV(eth_dev);
398 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
400 devinfo->pci_dev = pci_dev;
402 switch (pci_dev->id.subsystem_device_id) {
403 /* CN23xx 10G cards */
404 case PCI_SUBSYS_DEV_ID_CN2350_210:
405 case PCI_SUBSYS_DEV_ID_CN2360_210:
406 devinfo->speed_capa = ETH_LINK_SPEED_10G;
408 /* CN23xx 25G cards */
409 case PCI_SUBSYS_DEV_ID_CN2350_225:
410 case PCI_SUBSYS_DEV_ID_CN2360_225:
411 devinfo->speed_capa = ETH_LINK_SPEED_25G;
415 "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
418 devinfo->max_rx_queues = lio_dev->max_rx_queues;
419 devinfo->max_tx_queues = lio_dev->max_tx_queues;
421 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
422 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
424 devinfo->max_mac_addrs = 1;
426 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
427 DEV_RX_OFFLOAD_UDP_CKSUM |
428 DEV_RX_OFFLOAD_TCP_CKSUM |
429 DEV_RX_OFFLOAD_VLAN_STRIP);
430 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
431 DEV_TX_OFFLOAD_UDP_CKSUM |
432 DEV_TX_OFFLOAD_TCP_CKSUM |
433 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
435 devinfo->rx_desc_lim = lio_rx_desc_lim;
436 devinfo->tx_desc_lim = lio_tx_desc_lim;
438 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
439 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
440 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
441 ETH_RSS_NONFRAG_IPV4_TCP |
443 ETH_RSS_NONFRAG_IPV6_TCP |
445 ETH_RSS_IPV6_TCP_EX);
449 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
451 struct lio_device *lio_dev = LIO_DEV(eth_dev);
452 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
453 uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
454 struct lio_dev_ctrl_cmd ctrl_cmd;
455 struct lio_ctrl_pkt ctrl_pkt;
457 PMD_INIT_FUNC_TRACE();
459 if (!lio_dev->intf_open) {
460 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
465 /* check if VF MTU is within allowed range.
466 * New value should not exceed PF MTU.
468 if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
469 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
470 ETHER_MIN_MTU, pf_mtu);
474 /* flush added to prevent cmd failure
475 * incase the queue is full
477 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
479 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
480 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
482 ctrl_cmd.eth_dev = eth_dev;
485 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
486 ctrl_pkt.ncmd.s.param1 = mtu;
487 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
489 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
490 lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
494 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
495 lio_dev_err(lio_dev, "Command to change MTU timed out\n");
499 if (frame_len > ETHER_MAX_LEN)
500 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
502 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
504 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
505 eth_dev->data->mtu = mtu;
511 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
512 struct rte_eth_rss_reta_entry64 *reta_conf,
515 struct lio_device *lio_dev = LIO_DEV(eth_dev);
516 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
517 struct lio_rss_set *rss_param;
518 struct lio_dev_ctrl_cmd ctrl_cmd;
519 struct lio_ctrl_pkt ctrl_pkt;
522 if (!lio_dev->intf_open) {
523 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
528 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
530 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
531 reta_size, LIO_RSS_MAX_TABLE_SZ);
535 /* flush added to prevent cmd failure
536 * incase the queue is full
538 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
540 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
541 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
543 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
545 ctrl_cmd.eth_dev = eth_dev;
548 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
549 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
550 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
552 rss_param->param.flags = 0xF;
553 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
554 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
556 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
557 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
558 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
559 index = (i * RTE_RETA_GROUP_SIZE) + j;
560 rss_state->itable[index] = reta_conf[i].reta[j];
565 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
566 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
568 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
570 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
571 lio_dev_err(lio_dev, "Failed to set rss hash\n");
575 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
576 lio_dev_err(lio_dev, "Set rss hash timed out\n");
584 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
585 struct rte_eth_rss_reta_entry64 *reta_conf,
588 struct lio_device *lio_dev = LIO_DEV(eth_dev);
589 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
592 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
594 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
595 reta_size, LIO_RSS_MAX_TABLE_SZ);
599 num = reta_size / RTE_RETA_GROUP_SIZE;
601 for (i = 0; i < num; i++) {
602 memcpy(reta_conf->reta,
603 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
604 RTE_RETA_GROUP_SIZE);
612 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
613 struct rte_eth_rss_conf *rss_conf)
615 struct lio_device *lio_dev = LIO_DEV(eth_dev);
616 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
617 uint8_t *hash_key = NULL;
620 if (rss_state->hash_disable) {
621 lio_dev_info(lio_dev, "RSS disabled in nic\n");
622 rss_conf->rss_hf = 0;
627 hash_key = rss_conf->rss_key;
628 if (hash_key != NULL)
629 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
632 rss_hf |= ETH_RSS_IPV4;
633 if (rss_state->tcp_hash)
634 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
636 rss_hf |= ETH_RSS_IPV6;
637 if (rss_state->ipv6_tcp_hash)
638 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
639 if (rss_state->ipv6_ex)
640 rss_hf |= ETH_RSS_IPV6_EX;
641 if (rss_state->ipv6_tcp_ex_hash)
642 rss_hf |= ETH_RSS_IPV6_TCP_EX;
644 rss_conf->rss_hf = rss_hf;
650 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
651 struct rte_eth_rss_conf *rss_conf)
653 struct lio_device *lio_dev = LIO_DEV(eth_dev);
654 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
655 struct lio_rss_set *rss_param;
656 struct lio_dev_ctrl_cmd ctrl_cmd;
657 struct lio_ctrl_pkt ctrl_pkt;
659 if (!lio_dev->intf_open) {
660 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
665 /* flush added to prevent cmd failure
666 * incase the queue is full
668 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
670 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
671 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
673 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
675 ctrl_cmd.eth_dev = eth_dev;
678 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
679 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
680 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
682 rss_param->param.flags = 0xF;
684 if (rss_conf->rss_key) {
685 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
686 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
687 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
688 memcpy(rss_state->hash_key, rss_conf->rss_key,
689 rss_state->hash_key_size);
690 memcpy(rss_param->key, rss_state->hash_key,
691 rss_state->hash_key_size);
694 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
695 /* Can't disable rss through hash flags,
696 * if it is enabled by default during init
698 if (!rss_state->hash_disable)
701 /* This is for --disable-rss during testpmd launch */
702 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
704 uint32_t hashinfo = 0;
706 /* Can't enable rss if disabled by default during init */
707 if (rss_state->hash_disable)
710 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
711 hashinfo |= LIO_RSS_HASH_IPV4;
717 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
718 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
719 rss_state->tcp_hash = 1;
721 rss_state->tcp_hash = 0;
724 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
725 hashinfo |= LIO_RSS_HASH_IPV6;
731 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
732 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
733 rss_state->ipv6_tcp_hash = 1;
735 rss_state->ipv6_tcp_hash = 0;
738 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
739 hashinfo |= LIO_RSS_HASH_IPV6_EX;
740 rss_state->ipv6_ex = 1;
742 rss_state->ipv6_ex = 0;
745 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
746 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
747 rss_state->ipv6_tcp_ex_hash = 1;
749 rss_state->ipv6_tcp_ex_hash = 0;
752 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
753 rss_param->param.hashinfo = hashinfo;
756 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
758 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
759 lio_dev_err(lio_dev, "Failed to set rss hash\n");
763 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
764 lio_dev_err(lio_dev, "Set rss hash timed out\n");
772 * Add vxlan dest udp port for an interface.
775 * Pointer to the structure rte_eth_dev
780 * On success return 0
781 * On failure return -1
784 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
785 struct rte_eth_udp_tunnel *udp_tnl)
787 struct lio_device *lio_dev = LIO_DEV(eth_dev);
788 struct lio_dev_ctrl_cmd ctrl_cmd;
789 struct lio_ctrl_pkt ctrl_pkt;
794 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
795 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
799 /* flush added to prevent cmd failure
800 * incase the queue is full
802 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
804 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
805 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
807 ctrl_cmd.eth_dev = eth_dev;
810 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
811 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
812 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
813 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
815 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
816 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
820 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
821 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
829 * Remove vxlan dest udp port for an interface.
832 * Pointer to the structure rte_eth_dev
837 * On success return 0
838 * On failure return -1
841 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
842 struct rte_eth_udp_tunnel *udp_tnl)
844 struct lio_device *lio_dev = LIO_DEV(eth_dev);
845 struct lio_dev_ctrl_cmd ctrl_cmd;
846 struct lio_ctrl_pkt ctrl_pkt;
851 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
852 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
856 /* flush added to prevent cmd failure
857 * incase the queue is full
859 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
861 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
862 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
864 ctrl_cmd.eth_dev = eth_dev;
867 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
868 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
869 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
870 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
872 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
873 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
877 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
878 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
886 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
888 struct lio_device *lio_dev = LIO_DEV(eth_dev);
889 struct lio_dev_ctrl_cmd ctrl_cmd;
890 struct lio_ctrl_pkt ctrl_pkt;
892 if (lio_dev->linfo.vlan_is_admin_assigned)
895 /* flush added to prevent cmd failure
896 * incase the queue is full
898 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
900 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
901 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
903 ctrl_cmd.eth_dev = eth_dev;
906 ctrl_pkt.ncmd.s.cmd = on ?
907 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
908 ctrl_pkt.ncmd.s.param1 = vlan_id;
909 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
911 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
912 lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
913 on ? "add" : "remove");
917 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
918 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
919 on ? "add" : "remove");
927 * Atomically writes the link status information into global
928 * structure rte_eth_dev.
931 * - Pointer to the structure rte_eth_dev to read from.
932 * - Pointer to the buffer to be saved with the link status.
935 * - On success, zero.
936 * - On failure, negative value.
939 lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
940 struct rte_eth_link *link)
942 struct rte_eth_link *dst = ð_dev->data->dev_link;
943 struct rte_eth_link *src = link;
945 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
946 *(uint64_t *)src) == 0)
953 lio_hweight64(uint64_t w)
955 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
958 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
959 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
960 res = res + (res >> 8);
961 res = res + (res >> 16);
963 return (res + (res >> 32)) & 0x00000000000000FFul;
967 lio_dev_link_update(struct rte_eth_dev *eth_dev,
968 int wait_to_complete __rte_unused)
970 struct lio_device *lio_dev = LIO_DEV(eth_dev);
971 struct rte_eth_link link, old;
974 link.link_status = ETH_LINK_DOWN;
975 link.link_speed = ETH_SPEED_NUM_NONE;
976 link.link_duplex = ETH_LINK_HALF_DUPLEX;
977 link.link_autoneg = ETH_LINK_AUTONEG;
978 memset(&old, 0, sizeof(old));
980 /* Return what we found */
981 if (lio_dev->linfo.link.s.link_up == 0) {
982 /* Interface is down */
983 if (lio_dev_atomic_write_link_status(eth_dev, &link))
985 if (link.link_status == old.link_status)
990 link.link_status = ETH_LINK_UP; /* Interface is up */
991 link.link_duplex = ETH_LINK_FULL_DUPLEX;
992 switch (lio_dev->linfo.link.s.speed) {
993 case LIO_LINK_SPEED_10000:
994 link.link_speed = ETH_SPEED_NUM_10G;
996 case LIO_LINK_SPEED_25000:
997 link.link_speed = ETH_SPEED_NUM_25G;
1000 link.link_speed = ETH_SPEED_NUM_NONE;
1001 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1004 if (lio_dev_atomic_write_link_status(eth_dev, &link))
1007 if (link.link_status == old.link_status)
1014 * \brief Net device enable, disable allmulticast
1015 * @param eth_dev Pointer to the structure rte_eth_dev
1018 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
1020 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1021 struct lio_dev_ctrl_cmd ctrl_cmd;
1022 struct lio_ctrl_pkt ctrl_pkt;
1024 /* flush added to prevent cmd failure
1025 * incase the queue is full
1027 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1029 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1030 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1032 ctrl_cmd.eth_dev = eth_dev;
1035 /* Create a ctrl pkt command to be sent to core app. */
1036 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
1037 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
1038 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1040 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1041 lio_dev_err(lio_dev, "Failed to send change flag message\n");
1045 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1046 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
1050 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1052 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1054 if (!lio_dev->intf_open) {
1055 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1060 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1061 lio_change_dev_flag(eth_dev);
1065 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1067 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1069 if (!lio_dev->intf_open) {
1070 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1075 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1076 lio_change_dev_flag(eth_dev);
1080 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1082 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1083 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1084 struct rte_eth_rss_reta_entry64 reta_conf[8];
1085 struct rte_eth_rss_conf rss_conf;
1088 /* Configure the RSS key and the RSS protocols used to compute
1089 * the RSS hash of input packets.
1091 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1092 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1093 rss_state->hash_disable = 1;
1094 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1098 if (rss_conf.rss_key == NULL)
1099 rss_conf.rss_key = lio_rss_key; /* Default hash key */
1101 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1103 memset(reta_conf, 0, sizeof(reta_conf));
1104 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1105 uint8_t q_idx, conf_idx, reta_idx;
1107 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1108 i % eth_dev->data->nb_rx_queues : 0);
1109 conf_idx = i / RTE_RETA_GROUP_SIZE;
1110 reta_idx = i % RTE_RETA_GROUP_SIZE;
1111 reta_conf[conf_idx].reta[reta_idx] = q_idx;
1112 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1115 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1119 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1121 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1122 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1123 struct rte_eth_rss_conf rss_conf;
1125 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1127 lio_dev_rss_configure(eth_dev);
1129 case ETH_MQ_RX_NONE:
1130 /* if mq_mode is none, disable rss mode. */
1132 memset(&rss_conf, 0, sizeof(rss_conf));
1133 rss_state->hash_disable = 1;
1134 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1139 * Setup our receive queue/ringbuffer. This is the
1140 * queue the Octeon uses to send us packets and
1141 * responses. We are given a memory pool for our
1142 * packet buffers that are used to populate the receive
1146 * Pointer to the structure rte_eth_dev
1149 * @param num_rx_descs
1150 * Number of entries in the queue
1152 * Where to allocate memory
1154 * Pointer to the struction rte_eth_rxconf
1156 * Pointer to the packet pool
1159 * - On success, return 0
1160 * - On failure, return -1
1163 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1164 uint16_t num_rx_descs, unsigned int socket_id,
1165 const struct rte_eth_rxconf *rx_conf __rte_unused,
1166 struct rte_mempool *mp)
1168 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1169 struct rte_pktmbuf_pool_private *mbp_priv;
1170 uint32_t fw_mapped_oq;
1173 if (q_no >= lio_dev->nb_rx_queues) {
1174 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1178 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1180 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1182 if ((lio_dev->droq[fw_mapped_oq]) &&
1183 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
1184 lio_dev_err(lio_dev,
1185 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
1186 lio_dev->droq[fw_mapped_oq]->max_count);
1190 mbp_priv = rte_mempool_get_priv(mp);
1191 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1193 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1195 lio_dev_err(lio_dev, "droq allocation failed\n");
1199 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1205 * Release the receive queue/ringbuffer. Called by
1209 * Opaque pointer to the receive queue to release
1215 lio_dev_rx_queue_release(void *rxq)
1217 struct lio_droq *droq = rxq;
1221 /* Run time queue deletion not supported */
1222 if (droq->lio_dev->port_configured)
1226 lio_delete_droq_queue(droq->lio_dev, oq_no);
1231 * Allocate and initialize SW ring. Initialize associated HW registers.
1234 * Pointer to structure rte_eth_dev
1239 * @param num_tx_descs
1240 * Number of ringbuffer descriptors
1243 * NUMA socket id, used for memory allocations
1246 * Pointer to the structure rte_eth_txconf
1249 * - On success, return 0
1250 * - On failure, return -errno value
1253 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1254 uint16_t num_tx_descs, unsigned int socket_id,
1255 const struct rte_eth_txconf *tx_conf __rte_unused)
1257 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1258 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1261 if (q_no >= lio_dev->nb_tx_queues) {
1262 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1266 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1268 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
1269 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
1270 lio_dev_err(lio_dev,
1271 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
1272 lio_dev->instr_queue[fw_mapped_iq]->max_count);
1276 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1277 num_tx_descs, lio_dev, socket_id);
1280 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1284 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1285 lio_dev->instr_queue[fw_mapped_iq]->max_count,
1289 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1293 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1299 * Release the transmit queue/ringbuffer. Called by
1303 * Opaque pointer to the transmit queue to release
1309 lio_dev_tx_queue_release(void *txq)
1311 struct lio_instr_queue *tq = txq;
1312 uint32_t fw_mapped_iq_no;
1316 /* Run time queue deletion not supported */
1317 if (tq->lio_dev->port_configured)
1321 lio_delete_sglist(tq);
1323 fw_mapped_iq_no = tq->txpciq.s.q_no;
1324 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1329 * Api to check link state.
1332 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1334 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1335 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1336 struct lio_link_status_resp *resp;
1337 union octeon_link_status *ls;
1338 struct lio_soft_command *sc;
1341 if (!lio_dev->intf_open)
1344 resp_size = sizeof(struct lio_link_status_resp);
1345 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1349 resp = (struct lio_link_status_resp *)sc->virtrptr;
1350 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1351 LIO_OPCODE_INFO, 0, 0, 0);
1353 /* Setting wait time in seconds */
1354 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1356 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1357 goto get_status_fail;
1359 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1360 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1365 goto get_status_fail;
1367 ls = &resp->link_info.link;
1369 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1371 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1372 if (ls->s.mtu < eth_dev->data->mtu) {
1373 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1375 eth_dev->data->mtu = ls->s.mtu;
1377 lio_dev->linfo.link.link_status64 = ls->link_status64;
1378 lio_dev_link_update(eth_dev, 0);
1381 lio_free_soft_command(sc);
1386 lio_free_soft_command(sc);
1389 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1390 * and will update link state if it changes.
1393 lio_sync_link_state_check(void *eth_dev)
1395 struct lio_device *lio_dev =
1396 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
1398 if (lio_dev->port_configured)
1399 lio_dev_get_link_status(eth_dev);
1401 /* Schedule periodic link status check.
1402 * Stop check if interface is close and start again while opening.
1404 if (lio_dev->intf_open)
1405 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1410 lio_dev_start(struct rte_eth_dev *eth_dev)
1413 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1414 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1415 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1418 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1420 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1423 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1426 /* Ready for link status updates */
1427 lio_dev->intf_open = 1;
1430 /* Configure RSS if device configured with multiple RX queues. */
1431 lio_dev_mq_rx_configure(eth_dev);
1433 /* start polling for lsc */
1434 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1435 lio_sync_link_state_check,
1438 lio_dev_err(lio_dev,
1439 "link state check handler creation failed\n");
1440 goto dev_lsc_handle_error;
1443 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1446 if (lio_dev->linfo.link.link_status64 == 0) {
1448 goto dev_mtu_set_error;
1451 mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
1452 if (mtu < ETHER_MIN_MTU)
1453 mtu = ETHER_MIN_MTU;
1455 if (eth_dev->data->mtu != mtu) {
1456 ret = lio_dev_mtu_set(eth_dev, mtu);
1458 goto dev_mtu_set_error;
1464 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1466 dev_lsc_handle_error:
1467 lio_dev->intf_open = 0;
1468 lio_send_rx_ctrl_cmd(eth_dev, 0);
1473 /* Stop device and disable input/output functions */
1475 lio_dev_stop(struct rte_eth_dev *eth_dev)
1477 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1479 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1480 lio_dev->intf_open = 0;
1483 /* Cancel callback if still running. */
1484 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1486 lio_send_rx_ctrl_cmd(eth_dev, 0);
1488 /* Clear recorded link status */
1489 lio_dev->linfo.link.link_status64 = 0;
1493 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1495 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1497 if (!lio_dev->intf_open) {
1498 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1502 if (lio_dev->linfo.link.s.link_up) {
1503 lio_dev_info(lio_dev, "Link is already UP\n");
1507 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1508 lio_dev_err(lio_dev, "Unable to set Link UP\n");
1512 lio_dev->linfo.link.s.link_up = 1;
1513 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1519 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1521 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1523 if (!lio_dev->intf_open) {
1524 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1528 if (!lio_dev->linfo.link.s.link_up) {
1529 lio_dev_info(lio_dev, "Link is already DOWN\n");
1533 lio_dev->linfo.link.s.link_up = 0;
1534 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1536 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1537 lio_dev->linfo.link.s.link_up = 1;
1538 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1539 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1547 * Reset and stop the device. This occurs on the first
1548 * call to this routine. Subsequent calls will simply
1549 * return. NB: This will require the NIC to be rebooted.
1552 * Pointer to the structure rte_eth_dev
1558 lio_dev_close(struct rte_eth_dev *eth_dev)
1560 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1563 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1565 if (lio_dev->intf_open)
1566 lio_dev_stop(eth_dev);
1568 lio_wait_for_instr_fetch(lio_dev);
1570 lio_dev->fn_list.disable_io_queues(lio_dev);
1572 cn23xx_vf_set_io_queues_off(lio_dev);
1574 /* Reset iq regs (IQ_DBELL).
1575 * Clear sli_pktx_cnts (OQ_PKTS_SENT).
1577 for (i = 0; i < lio_dev->nb_rx_queues; i++) {
1578 struct lio_droq *droq = lio_dev->droq[i];
1583 uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
1585 lio_dev_dbg(lio_dev,
1586 "pending oq count %u\n", pkt_count);
1587 rte_write32(pkt_count, droq->pkts_sent_reg);
1591 lio_dev->fn_list.free_mbox(lio_dev);
1593 /* Free glist resources */
1594 rte_free(lio_dev->glist_head);
1595 rte_free(lio_dev->glist_lock);
1596 lio_dev->glist_head = NULL;
1597 lio_dev->glist_lock = NULL;
1599 lio_dev->port_configured = 0;
1601 /* Delete all queues */
1602 lio_dev_clear_queues(eth_dev);
1606 * Enable tunnel rx checksum verification from firmware.
1609 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1611 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1612 struct lio_dev_ctrl_cmd ctrl_cmd;
1613 struct lio_ctrl_pkt ctrl_pkt;
1615 /* flush added to prevent cmd failure
1616 * incase the queue is full
1618 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1620 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1621 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1623 ctrl_cmd.eth_dev = eth_dev;
1626 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1627 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1628 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1630 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1631 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1635 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1636 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1640 * Enable checksum calculation for inner packet in a tunnel.
1643 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1645 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1646 struct lio_dev_ctrl_cmd ctrl_cmd;
1647 struct lio_ctrl_pkt ctrl_pkt;
1649 /* flush added to prevent cmd failure
1650 * incase the queue is full
1652 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1654 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1655 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1657 ctrl_cmd.eth_dev = eth_dev;
1660 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1661 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1662 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1664 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1665 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1669 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1670 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1673 static int lio_dev_configure(struct rte_eth_dev *eth_dev)
1675 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1676 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1677 int retval, num_iqueues, num_oqueues;
1678 uint8_t mac[ETHER_ADDR_LEN], i;
1679 struct lio_if_cfg_resp *resp;
1680 struct lio_soft_command *sc;
1681 union lio_if_cfg if_cfg;
1684 PMD_INIT_FUNC_TRACE();
1686 /* Re-configuring firmware not supported.
1687 * Can't change tx/rx queues per port from initial value.
1689 if (lio_dev->port_configured) {
1690 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
1691 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
1692 lio_dev_err(lio_dev,
1693 "rxq/txq re-conf not supported. Restart application with new value.\n");
1699 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1700 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1702 resp_size = sizeof(struct lio_if_cfg_resp);
1703 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1707 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1709 /* Firmware doesn't have capability to reconfigure the queues,
1710 * Claim all queues, and use as many required
1712 if_cfg.if_cfg64 = 0;
1713 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1714 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1715 if_cfg.s.base_queue = 0;
1717 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1719 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1720 LIO_OPCODE_IF_CFG, 0,
1721 if_cfg.if_cfg64, 0);
1723 /* Setting wait time in seconds */
1724 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1726 retval = lio_send_soft_command(lio_dev, sc);
1727 if (retval == LIO_IQ_SEND_FAILED) {
1728 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1730 /* Soft instr is freed by driver in case of failure. */
1731 goto nic_config_fail;
1734 /* Sleep on a wait queue till the cond flag indicates that the
1735 * response arrived or timed-out.
1737 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1738 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1739 lio_process_ordered_list(lio_dev);
1743 retval = resp->status;
1745 lio_dev_err(lio_dev, "iq/oq config failed\n");
1746 goto nic_config_fail;
1749 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1750 sizeof(struct octeon_if_cfg_info) >> 3);
1752 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1753 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1755 if (!(num_iqueues) || !(num_oqueues)) {
1756 lio_dev_err(lio_dev,
1757 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1758 (unsigned long)resp->cfg_info.iqmask,
1759 (unsigned long)resp->cfg_info.oqmask);
1760 goto nic_config_fail;
1763 lio_dev_dbg(lio_dev,
1764 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1765 eth_dev->data->port_id,
1766 (unsigned long)resp->cfg_info.iqmask,
1767 (unsigned long)resp->cfg_info.oqmask,
1768 num_iqueues, num_oqueues);
1770 lio_dev->linfo.num_rxpciq = num_oqueues;
1771 lio_dev->linfo.num_txpciq = num_iqueues;
1773 for (i = 0; i < num_oqueues; i++) {
1774 lio_dev->linfo.rxpciq[i].rxpciq64 =
1775 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1776 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1777 i, lio_dev->linfo.rxpciq[i].s.q_no);
1780 for (i = 0; i < num_iqueues; i++) {
1781 lio_dev->linfo.txpciq[i].txpciq64 =
1782 resp->cfg_info.linfo.txpciq[i].txpciq64;
1783 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1784 i, lio_dev->linfo.txpciq[i].s.q_no);
1787 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1788 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1789 lio_dev->linfo.link.link_status64 =
1790 resp->cfg_info.linfo.link.link_status64;
1792 /* 64-bit swap required on LE machines */
1793 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1794 for (i = 0; i < ETHER_ADDR_LEN; i++)
1795 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1798 /* Copy the permanent MAC address */
1799 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
1801 /* enable firmware checksum support for tunnel packets */
1802 lio_enable_hw_tunnel_rx_checksum(eth_dev);
1803 lio_enable_hw_tunnel_tx_checksum(eth_dev);
1805 lio_dev->glist_lock =
1806 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1807 if (lio_dev->glist_lock == NULL)
1810 lio_dev->glist_head =
1811 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1813 if (lio_dev->glist_head == NULL) {
1814 rte_free(lio_dev->glist_lock);
1815 lio_dev->glist_lock = NULL;
1819 lio_dev_link_update(eth_dev, 0);
1821 lio_dev->port_configured = 1;
1823 lio_free_soft_command(sc);
1825 /* Disable iq_0 for reconf */
1826 lio_dev->fn_list.disable_io_queues(lio_dev);
1828 /* Reset ioq regs */
1829 lio_dev->fn_list.setup_device_regs(lio_dev);
1831 /* Free iq_0 used during init */
1832 lio_free_instr_queue0(lio_dev);
1837 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1838 lio_free_soft_command(sc);
1839 lio_free_instr_queue0(lio_dev);
1844 /* Define our ethernet definitions */
1845 static const struct eth_dev_ops liovf_eth_dev_ops = {
1846 .dev_configure = lio_dev_configure,
1847 .dev_start = lio_dev_start,
1848 .dev_stop = lio_dev_stop,
1849 .dev_set_link_up = lio_dev_set_link_up,
1850 .dev_set_link_down = lio_dev_set_link_down,
1851 .dev_close = lio_dev_close,
1852 .allmulticast_enable = lio_dev_allmulticast_enable,
1853 .allmulticast_disable = lio_dev_allmulticast_disable,
1854 .link_update = lio_dev_link_update,
1855 .stats_get = lio_dev_stats_get,
1856 .xstats_get = lio_dev_xstats_get,
1857 .xstats_get_names = lio_dev_xstats_get_names,
1858 .stats_reset = lio_dev_stats_reset,
1859 .xstats_reset = lio_dev_xstats_reset,
1860 .dev_infos_get = lio_dev_info_get,
1861 .vlan_filter_set = lio_dev_vlan_filter_set,
1862 .rx_queue_setup = lio_dev_rx_queue_setup,
1863 .rx_queue_release = lio_dev_rx_queue_release,
1864 .tx_queue_setup = lio_dev_tx_queue_setup,
1865 .tx_queue_release = lio_dev_tx_queue_release,
1866 .reta_update = lio_dev_rss_reta_update,
1867 .reta_query = lio_dev_rss_reta_query,
1868 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1869 .rss_hash_update = lio_dev_rss_hash_update,
1870 .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
1871 .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
1872 .mtu_set = lio_dev_mtu_set,
1876 lio_check_pf_hs_response(void *lio_dev)
1878 struct lio_device *dev = lio_dev;
1880 /* check till response arrives */
1881 if (dev->pfvf_hsword.coproc_tics_per_us)
1884 cn23xx_vf_handle_mbox(dev);
1886 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1890 * \brief Identify the LIO device and to map the BAR address space
1891 * @param lio_dev lio device
1894 lio_chip_specific_setup(struct lio_device *lio_dev)
1896 struct rte_pci_device *pdev = lio_dev->pci_dev;
1897 uint32_t dev_id = pdev->id.device_id;
1902 case LIO_CN23XX_VF_VID:
1903 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1904 ret = cn23xx_vf_setup_device(lio_dev);
1909 lio_dev_err(lio_dev, "Unsupported Chip\n");
1913 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1919 lio_first_time_init(struct lio_device *lio_dev,
1920 struct rte_pci_device *pdev)
1924 PMD_INIT_FUNC_TRACE();
1926 /* set dpdk specific pci device pointer */
1927 lio_dev->pci_dev = pdev;
1929 /* Identify the LIO type and set device ops */
1930 if (lio_chip_specific_setup(lio_dev)) {
1931 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1935 /* Initialize soft command buffer pool */
1936 if (lio_setup_sc_buffer_pool(lio_dev)) {
1937 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1941 /* Initialize lists to manage the requests of different types that
1942 * arrive from applications for this lio device.
1944 lio_setup_response_list(lio_dev);
1946 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1947 lio_dev_err(lio_dev, "Mailbox setup failed\n");
1951 /* Check PF response */
1952 lio_check_pf_hs_response((void *)lio_dev);
1954 /* Do handshake and exit if incompatible PF driver */
1955 if (cn23xx_pfvf_handshake(lio_dev))
1958 if (cn23xx_vf_set_io_queues_off(lio_dev)) {
1959 lio_dev_err(lio_dev, "Setting io queues off failed\n");
1963 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1964 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1968 if (lio_setup_instr_queue0(lio_dev)) {
1969 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
1973 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
1975 lio_dev->max_tx_queues = dpdk_queues;
1976 lio_dev->max_rx_queues = dpdk_queues;
1978 /* Enable input and output queues for this device */
1979 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1985 lio_free_sc_buffer_pool(lio_dev);
1986 if (lio_dev->mbox[0])
1987 lio_dev->fn_list.free_mbox(lio_dev);
1988 if (lio_dev->instr_queue[0])
1989 lio_free_instr_queue0(lio_dev);
1995 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1997 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1999 PMD_INIT_FUNC_TRACE();
2001 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2004 /* lio_free_sc_buffer_pool */
2005 lio_free_sc_buffer_pool(lio_dev);
2007 rte_free(eth_dev->data->mac_addrs);
2008 eth_dev->data->mac_addrs = NULL;
2010 eth_dev->dev_ops = NULL;
2011 eth_dev->rx_pkt_burst = NULL;
2012 eth_dev->tx_pkt_burst = NULL;
2018 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2020 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2021 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2023 PMD_INIT_FUNC_TRACE();
2025 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2026 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2028 /* Primary does the initialization. */
2029 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2032 rte_eth_copy_pci_info(eth_dev, pdev);
2033 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
2035 if (pdev->mem_resource[0].addr) {
2036 lio_dev->hw_addr = pdev->mem_resource[0].addr;
2038 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2042 lio_dev->eth_dev = eth_dev;
2043 /* set lio device print string */
2044 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2045 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2046 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2048 lio_dev->port_id = eth_dev->data->port_id;
2050 if (lio_first_time_init(lio_dev, pdev)) {
2051 lio_dev_err(lio_dev, "Device init failed\n");
2055 eth_dev->dev_ops = &liovf_eth_dev_ops;
2056 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
2057 if (eth_dev->data->mac_addrs == NULL) {
2058 lio_dev_err(lio_dev,
2059 "MAC addresses memory allocation failed\n");
2060 eth_dev->dev_ops = NULL;
2061 eth_dev->rx_pkt_burst = NULL;
2062 eth_dev->tx_pkt_burst = NULL;
2066 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2069 lio_dev->port_configured = 0;
2070 /* Always allow unicast packets */
2071 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2077 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2078 struct rte_pci_device *pci_dev)
2080 struct rte_eth_dev *eth_dev;
2083 eth_dev = rte_eth_dev_pci_allocate(pci_dev,
2084 sizeof(struct lio_device));
2085 if (eth_dev == NULL)
2088 ret = lio_eth_dev_init(eth_dev);
2090 rte_eth_dev_pci_release(eth_dev);
2096 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2098 return rte_eth_dev_pci_generic_remove(pci_dev,
2099 lio_eth_dev_uninit);
2102 /* Set of PCI devices this driver supports */
2103 static const struct rte_pci_id pci_id_liovf_map[] = {
2104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2105 { .vendor_id = 0, /* sentinel */ }
2108 static struct rte_pci_driver rte_liovf_pmd = {
2109 .id_table = pci_id_liovf_map,
2110 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2111 .probe = lio_eth_dev_pci_probe,
2112 .remove = lio_eth_dev_pci_remove,
2115 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2116 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2117 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");