1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_string_fns.h>
6 #include <ethdev_driver.h>
7 #include <ethdev_pci.h>
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_alarm.h>
11 #include <rte_ether.h>
14 #include "lio_23xx_vf.h"
15 #include "lio_ethdev.h"
18 /* Default RSS key in use */
19 static uint8_t lio_rss_key[40] = {
20 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
21 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
22 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
23 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
24 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
27 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
28 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
29 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
33 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
34 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
35 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
39 /* Wait for control command to reach nic. */
41 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
42 struct lio_dev_ctrl_cmd *ctrl_cmd)
44 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
46 while ((ctrl_cmd->cond == 0) && --timeout) {
47 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
55 * \brief Send Rx control command
56 * @param eth_dev Pointer to the structure rte_eth_dev
57 * @param start_stop whether to start or stop
60 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
62 struct lio_device *lio_dev = LIO_DEV(eth_dev);
63 struct lio_dev_ctrl_cmd ctrl_cmd;
64 struct lio_ctrl_pkt ctrl_pkt;
66 /* flush added to prevent cmd failure
67 * incase the queue is full
69 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
71 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
72 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
74 ctrl_cmd.eth_dev = eth_dev;
77 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
78 ctrl_pkt.ncmd.s.param1 = start_stop;
79 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
81 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
82 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
86 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
87 lio_dev_err(lio_dev, "RX Control command timed out\n");
94 /* store statistics names and its offset in stats structure */
95 struct rte_lio_xstats_name_off {
96 char name[RTE_ETH_XSTATS_NAME_SIZE];
100 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
101 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
102 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
103 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
104 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
105 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
106 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
107 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
108 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
109 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
110 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
111 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
112 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
113 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
114 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
115 sizeof(struct octeon_rx_stats)},
116 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
117 sizeof(struct octeon_rx_stats)},
118 {"tx_broadcast_pkts",
119 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
120 sizeof(struct octeon_rx_stats)},
121 {"tx_multicast_pkts",
122 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
123 sizeof(struct octeon_rx_stats)},
124 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
125 sizeof(struct octeon_rx_stats)},
126 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
127 sizeof(struct octeon_rx_stats)},
128 {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
130 sizeof(struct octeon_rx_stats)},
131 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
132 sizeof(struct octeon_rx_stats)},
133 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
134 sizeof(struct octeon_rx_stats)},
137 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
139 /* Get hw stats of the port */
141 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
144 struct lio_device *lio_dev = LIO_DEV(eth_dev);
145 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
146 struct octeon_link_stats *hw_stats;
147 struct lio_link_stats_resp *resp;
148 struct lio_soft_command *sc;
153 if (!lio_dev->intf_open) {
154 lio_dev_err(lio_dev, "Port %d down\n",
159 if (n < LIO_NB_XSTATS)
160 return LIO_NB_XSTATS;
162 resp_size = sizeof(struct lio_link_stats_resp);
163 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
167 resp = (struct lio_link_stats_resp *)sc->virtrptr;
168 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
169 LIO_OPCODE_PORT_STATS, 0, 0, 0);
171 /* Setting wait time in seconds */
172 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
174 retval = lio_send_soft_command(lio_dev, sc);
175 if (retval == LIO_IQ_SEND_FAILED) {
176 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
181 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
182 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
183 lio_process_ordered_list(lio_dev);
187 retval = resp->status;
189 lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
193 lio_swap_8B_data((uint64_t *)(&resp->link_stats),
194 sizeof(struct octeon_link_stats) >> 3);
196 hw_stats = &resp->link_stats;
198 for (i = 0; i < LIO_NB_XSTATS; i++) {
201 *(uint64_t *)(((char *)hw_stats) +
202 rte_lio_stats_strings[i].offset);
205 lio_free_soft_command(sc);
207 return LIO_NB_XSTATS;
210 lio_free_soft_command(sc);
216 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
217 struct rte_eth_xstat_name *xstats_names,
218 unsigned limit __rte_unused)
220 struct lio_device *lio_dev = LIO_DEV(eth_dev);
223 if (!lio_dev->intf_open) {
224 lio_dev_err(lio_dev, "Port %d down\n",
229 if (xstats_names == NULL)
230 return LIO_NB_XSTATS;
232 /* Note: limit checked in rte_eth_xstats_names() */
234 for (i = 0; i < LIO_NB_XSTATS; i++) {
235 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
236 "%s", rte_lio_stats_strings[i].name);
239 return LIO_NB_XSTATS;
242 /* Reset hw stats for the port */
244 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
246 struct lio_device *lio_dev = LIO_DEV(eth_dev);
247 struct lio_dev_ctrl_cmd ctrl_cmd;
248 struct lio_ctrl_pkt ctrl_pkt;
251 if (!lio_dev->intf_open) {
252 lio_dev_err(lio_dev, "Port %d down\n",
257 /* flush added to prevent cmd failure
258 * incase the queue is full
260 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
262 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
263 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
265 ctrl_cmd.eth_dev = eth_dev;
268 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
269 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
271 ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt);
273 lio_dev_err(lio_dev, "Failed to send clear stats command\n");
277 ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd);
279 lio_dev_err(lio_dev, "Clear stats command timed out\n");
283 /* clear stored per queue stats */
284 RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0);
285 return (*eth_dev->dev_ops->stats_reset)(eth_dev);
288 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
290 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
291 struct rte_eth_stats *stats)
293 struct lio_device *lio_dev = LIO_DEV(eth_dev);
294 struct lio_droq_stats *oq_stats;
295 struct lio_iq_stats *iq_stats;
296 struct lio_instr_queue *txq;
297 struct lio_droq *droq;
303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
304 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
305 txq = lio_dev->instr_queue[iq_no];
307 iq_stats = &txq->stats;
308 pkts += iq_stats->tx_done;
309 drop += iq_stats->tx_dropped;
310 bytes += iq_stats->tx_tot_bytes;
314 stats->opackets = pkts;
315 stats->obytes = bytes;
316 stats->oerrors = drop;
322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
323 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
324 droq = lio_dev->droq[oq_no];
326 oq_stats = &droq->stats;
327 pkts += oq_stats->rx_pkts_received;
328 drop += (oq_stats->rx_dropped +
329 oq_stats->dropped_toomany +
330 oq_stats->dropped_nomem);
331 bytes += oq_stats->rx_bytes_received;
334 stats->ibytes = bytes;
335 stats->ipackets = pkts;
336 stats->ierrors = drop;
342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
344 struct lio_device *lio_dev = LIO_DEV(eth_dev);
345 struct lio_droq_stats *oq_stats;
346 struct lio_iq_stats *iq_stats;
347 struct lio_instr_queue *txq;
348 struct lio_droq *droq;
351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
352 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
353 txq = lio_dev->instr_queue[iq_no];
355 iq_stats = &txq->stats;
356 memset(iq_stats, 0, sizeof(struct lio_iq_stats));
360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
361 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
362 droq = lio_dev->droq[oq_no];
364 oq_stats = &droq->stats;
365 memset(oq_stats, 0, sizeof(struct lio_droq_stats));
373 lio_dev_info_get(struct rte_eth_dev *eth_dev,
374 struct rte_eth_dev_info *devinfo)
376 struct lio_device *lio_dev = LIO_DEV(eth_dev);
377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
379 switch (pci_dev->id.subsystem_device_id) {
380 /* CN23xx 10G cards */
381 case PCI_SUBSYS_DEV_ID_CN2350_210:
382 case PCI_SUBSYS_DEV_ID_CN2360_210:
383 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
384 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
385 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
386 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
387 devinfo->speed_capa = ETH_LINK_SPEED_10G;
389 /* CN23xx 25G cards */
390 case PCI_SUBSYS_DEV_ID_CN2350_225:
391 case PCI_SUBSYS_DEV_ID_CN2360_225:
392 devinfo->speed_capa = ETH_LINK_SPEED_25G;
395 devinfo->speed_capa = ETH_LINK_SPEED_10G;
397 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
401 devinfo->max_rx_queues = lio_dev->max_rx_queues;
402 devinfo->max_tx_queues = lio_dev->max_tx_queues;
404 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
405 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
407 devinfo->max_mac_addrs = 1;
409 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
410 DEV_RX_OFFLOAD_UDP_CKSUM |
411 DEV_RX_OFFLOAD_TCP_CKSUM |
412 DEV_RX_OFFLOAD_VLAN_STRIP |
413 DEV_RX_OFFLOAD_RSS_HASH);
414 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
415 DEV_TX_OFFLOAD_UDP_CKSUM |
416 DEV_TX_OFFLOAD_TCP_CKSUM |
417 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
419 devinfo->rx_desc_lim = lio_rx_desc_lim;
420 devinfo->tx_desc_lim = lio_tx_desc_lim;
422 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
423 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
424 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
425 ETH_RSS_NONFRAG_IPV4_TCP |
427 ETH_RSS_NONFRAG_IPV6_TCP |
429 ETH_RSS_IPV6_TCP_EX);
434 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
436 struct lio_device *lio_dev = LIO_DEV(eth_dev);
437 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
438 struct lio_dev_ctrl_cmd ctrl_cmd;
439 struct lio_ctrl_pkt ctrl_pkt;
441 PMD_INIT_FUNC_TRACE();
443 if (!lio_dev->intf_open) {
444 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
449 /* check if VF MTU is within allowed range.
450 * New value should not exceed PF MTU.
452 if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
453 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
454 RTE_ETHER_MIN_MTU, pf_mtu);
458 /* flush added to prevent cmd failure
459 * incase the queue is full
461 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
463 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
464 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
466 ctrl_cmd.eth_dev = eth_dev;
469 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
470 ctrl_pkt.ncmd.s.param1 = mtu;
471 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
473 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
474 lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
478 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
479 lio_dev_err(lio_dev, "Command to change MTU timed out\n");
483 if (mtu > RTE_ETHER_MTU)
484 eth_dev->data->dev_conf.rxmode.offloads |=
485 DEV_RX_OFFLOAD_JUMBO_FRAME;
487 eth_dev->data->dev_conf.rxmode.offloads &=
488 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
494 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
495 struct rte_eth_rss_reta_entry64 *reta_conf,
498 struct lio_device *lio_dev = LIO_DEV(eth_dev);
499 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
500 struct lio_rss_set *rss_param;
501 struct lio_dev_ctrl_cmd ctrl_cmd;
502 struct lio_ctrl_pkt ctrl_pkt;
505 if (!lio_dev->intf_open) {
506 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
511 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
513 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
514 reta_size, LIO_RSS_MAX_TABLE_SZ);
518 /* flush added to prevent cmd failure
519 * incase the queue is full
521 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
523 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
524 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
526 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
528 ctrl_cmd.eth_dev = eth_dev;
531 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
532 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
533 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
535 rss_param->param.flags = 0xF;
536 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
537 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
539 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
540 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
541 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
542 index = (i * RTE_RETA_GROUP_SIZE) + j;
543 rss_state->itable[index] = reta_conf[i].reta[j];
548 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
549 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
551 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
553 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
554 lio_dev_err(lio_dev, "Failed to set rss hash\n");
558 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
559 lio_dev_err(lio_dev, "Set rss hash timed out\n");
567 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
568 struct rte_eth_rss_reta_entry64 *reta_conf,
571 struct lio_device *lio_dev = LIO_DEV(eth_dev);
572 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
575 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
577 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
578 reta_size, LIO_RSS_MAX_TABLE_SZ);
582 num = reta_size / RTE_RETA_GROUP_SIZE;
584 for (i = 0; i < num; i++) {
585 memcpy(reta_conf->reta,
586 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
587 RTE_RETA_GROUP_SIZE);
595 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
596 struct rte_eth_rss_conf *rss_conf)
598 struct lio_device *lio_dev = LIO_DEV(eth_dev);
599 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
600 uint8_t *hash_key = NULL;
603 if (rss_state->hash_disable) {
604 lio_dev_info(lio_dev, "RSS disabled in nic\n");
605 rss_conf->rss_hf = 0;
610 hash_key = rss_conf->rss_key;
611 if (hash_key != NULL)
612 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
615 rss_hf |= ETH_RSS_IPV4;
616 if (rss_state->tcp_hash)
617 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
619 rss_hf |= ETH_RSS_IPV6;
620 if (rss_state->ipv6_tcp_hash)
621 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
622 if (rss_state->ipv6_ex)
623 rss_hf |= ETH_RSS_IPV6_EX;
624 if (rss_state->ipv6_tcp_ex_hash)
625 rss_hf |= ETH_RSS_IPV6_TCP_EX;
627 rss_conf->rss_hf = rss_hf;
633 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
634 struct rte_eth_rss_conf *rss_conf)
636 struct lio_device *lio_dev = LIO_DEV(eth_dev);
637 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
638 struct lio_rss_set *rss_param;
639 struct lio_dev_ctrl_cmd ctrl_cmd;
640 struct lio_ctrl_pkt ctrl_pkt;
642 if (!lio_dev->intf_open) {
643 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
648 /* flush added to prevent cmd failure
649 * incase the queue is full
651 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
653 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
654 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
656 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
658 ctrl_cmd.eth_dev = eth_dev;
661 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
662 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
663 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
665 rss_param->param.flags = 0xF;
667 if (rss_conf->rss_key) {
668 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
669 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
670 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
671 memcpy(rss_state->hash_key, rss_conf->rss_key,
672 rss_state->hash_key_size);
673 memcpy(rss_param->key, rss_state->hash_key,
674 rss_state->hash_key_size);
677 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
678 /* Can't disable rss through hash flags,
679 * if it is enabled by default during init
681 if (!rss_state->hash_disable)
684 /* This is for --disable-rss during testpmd launch */
685 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
687 uint32_t hashinfo = 0;
689 /* Can't enable rss if disabled by default during init */
690 if (rss_state->hash_disable)
693 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
694 hashinfo |= LIO_RSS_HASH_IPV4;
700 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
701 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
702 rss_state->tcp_hash = 1;
704 rss_state->tcp_hash = 0;
707 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
708 hashinfo |= LIO_RSS_HASH_IPV6;
714 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
715 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
716 rss_state->ipv6_tcp_hash = 1;
718 rss_state->ipv6_tcp_hash = 0;
721 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
722 hashinfo |= LIO_RSS_HASH_IPV6_EX;
723 rss_state->ipv6_ex = 1;
725 rss_state->ipv6_ex = 0;
728 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
729 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
730 rss_state->ipv6_tcp_ex_hash = 1;
732 rss_state->ipv6_tcp_ex_hash = 0;
735 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
736 rss_param->param.hashinfo = hashinfo;
739 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
741 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
742 lio_dev_err(lio_dev, "Failed to set rss hash\n");
746 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
747 lio_dev_err(lio_dev, "Set rss hash timed out\n");
755 * Add vxlan dest udp port for an interface.
758 * Pointer to the structure rte_eth_dev
763 * On success return 0
764 * On failure return -1
767 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
768 struct rte_eth_udp_tunnel *udp_tnl)
770 struct lio_device *lio_dev = LIO_DEV(eth_dev);
771 struct lio_dev_ctrl_cmd ctrl_cmd;
772 struct lio_ctrl_pkt ctrl_pkt;
777 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
778 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
782 /* flush added to prevent cmd failure
783 * incase the queue is full
785 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
787 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
788 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
790 ctrl_cmd.eth_dev = eth_dev;
793 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
794 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
795 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
796 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
798 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
799 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
803 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
804 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
812 * Remove vxlan dest udp port for an interface.
815 * Pointer to the structure rte_eth_dev
820 * On success return 0
821 * On failure return -1
824 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
825 struct rte_eth_udp_tunnel *udp_tnl)
827 struct lio_device *lio_dev = LIO_DEV(eth_dev);
828 struct lio_dev_ctrl_cmd ctrl_cmd;
829 struct lio_ctrl_pkt ctrl_pkt;
834 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
835 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
839 /* flush added to prevent cmd failure
840 * incase the queue is full
842 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
844 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
845 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
847 ctrl_cmd.eth_dev = eth_dev;
850 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
851 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
852 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
853 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
855 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
856 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
860 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
861 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
869 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
871 struct lio_device *lio_dev = LIO_DEV(eth_dev);
872 struct lio_dev_ctrl_cmd ctrl_cmd;
873 struct lio_ctrl_pkt ctrl_pkt;
875 if (lio_dev->linfo.vlan_is_admin_assigned)
878 /* flush added to prevent cmd failure
879 * incase the queue is full
881 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
883 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
884 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
886 ctrl_cmd.eth_dev = eth_dev;
889 ctrl_pkt.ncmd.s.cmd = on ?
890 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
891 ctrl_pkt.ncmd.s.param1 = vlan_id;
892 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
894 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
895 lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
896 on ? "add" : "remove");
900 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
901 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
902 on ? "add" : "remove");
910 lio_hweight64(uint64_t w)
912 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
915 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
916 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
917 res = res + (res >> 8);
918 res = res + (res >> 16);
920 return (res + (res >> 32)) & 0x00000000000000FFul;
924 lio_dev_link_update(struct rte_eth_dev *eth_dev,
925 int wait_to_complete __rte_unused)
927 struct lio_device *lio_dev = LIO_DEV(eth_dev);
928 struct rte_eth_link link;
931 memset(&link, 0, sizeof(link));
932 link.link_status = ETH_LINK_DOWN;
933 link.link_speed = ETH_SPEED_NUM_NONE;
934 link.link_duplex = ETH_LINK_HALF_DUPLEX;
935 link.link_autoneg = ETH_LINK_AUTONEG;
937 /* Return what we found */
938 if (lio_dev->linfo.link.s.link_up == 0) {
939 /* Interface is down */
940 return rte_eth_linkstatus_set(eth_dev, &link);
943 link.link_status = ETH_LINK_UP; /* Interface is up */
944 link.link_duplex = ETH_LINK_FULL_DUPLEX;
945 switch (lio_dev->linfo.link.s.speed) {
946 case LIO_LINK_SPEED_10000:
947 link.link_speed = ETH_SPEED_NUM_10G;
949 case LIO_LINK_SPEED_25000:
950 link.link_speed = ETH_SPEED_NUM_25G;
953 link.link_speed = ETH_SPEED_NUM_NONE;
954 link.link_duplex = ETH_LINK_HALF_DUPLEX;
957 return rte_eth_linkstatus_set(eth_dev, &link);
961 * \brief Net device enable, disable allmulticast
962 * @param eth_dev Pointer to the structure rte_eth_dev
965 * On success return 0
966 * On failure return negative errno
969 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
971 struct lio_device *lio_dev = LIO_DEV(eth_dev);
972 struct lio_dev_ctrl_cmd ctrl_cmd;
973 struct lio_ctrl_pkt ctrl_pkt;
975 /* flush added to prevent cmd failure
976 * incase the queue is full
978 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
980 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
981 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
983 ctrl_cmd.eth_dev = eth_dev;
986 /* Create a ctrl pkt command to be sent to core app. */
987 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
988 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
989 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
991 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
992 lio_dev_err(lio_dev, "Failed to send change flag message\n");
996 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
997 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
1005 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1007 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1009 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1010 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1011 LIO_VF_TRUST_MIN_VERSION);
1015 if (!lio_dev->intf_open) {
1016 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
1021 lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
1022 return lio_change_dev_flag(eth_dev);
1026 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1028 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1030 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1031 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1032 LIO_VF_TRUST_MIN_VERSION);
1036 if (!lio_dev->intf_open) {
1037 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
1042 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
1043 return lio_change_dev_flag(eth_dev);
1047 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1049 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1051 if (!lio_dev->intf_open) {
1052 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1057 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1058 return lio_change_dev_flag(eth_dev);
1062 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1064 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1066 if (!lio_dev->intf_open) {
1067 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1072 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1073 return lio_change_dev_flag(eth_dev);
1077 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1079 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1080 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1081 struct rte_eth_rss_reta_entry64 reta_conf[8];
1082 struct rte_eth_rss_conf rss_conf;
1085 /* Configure the RSS key and the RSS protocols used to compute
1086 * the RSS hash of input packets.
1088 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1089 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1090 rss_state->hash_disable = 1;
1091 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1095 if (rss_conf.rss_key == NULL)
1096 rss_conf.rss_key = lio_rss_key; /* Default hash key */
1098 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1100 memset(reta_conf, 0, sizeof(reta_conf));
1101 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1102 uint8_t q_idx, conf_idx, reta_idx;
1104 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1105 i % eth_dev->data->nb_rx_queues : 0);
1106 conf_idx = i / RTE_RETA_GROUP_SIZE;
1107 reta_idx = i % RTE_RETA_GROUP_SIZE;
1108 reta_conf[conf_idx].reta[reta_idx] = q_idx;
1109 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1112 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1116 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1118 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1119 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1120 struct rte_eth_rss_conf rss_conf;
1122 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1124 lio_dev_rss_configure(eth_dev);
1126 case ETH_MQ_RX_NONE:
1127 /* if mq_mode is none, disable rss mode. */
1129 memset(&rss_conf, 0, sizeof(rss_conf));
1130 rss_state->hash_disable = 1;
1131 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1136 * Setup our receive queue/ringbuffer. This is the
1137 * queue the Octeon uses to send us packets and
1138 * responses. We are given a memory pool for our
1139 * packet buffers that are used to populate the receive
1143 * Pointer to the structure rte_eth_dev
1146 * @param num_rx_descs
1147 * Number of entries in the queue
1149 * Where to allocate memory
1151 * Pointer to the struction rte_eth_rxconf
1153 * Pointer to the packet pool
1156 * - On success, return 0
1157 * - On failure, return -1
1160 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1161 uint16_t num_rx_descs, unsigned int socket_id,
1162 const struct rte_eth_rxconf *rx_conf __rte_unused,
1163 struct rte_mempool *mp)
1165 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1166 struct rte_pktmbuf_pool_private *mbp_priv;
1167 uint32_t fw_mapped_oq;
1170 if (q_no >= lio_dev->nb_rx_queues) {
1171 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1175 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1177 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1179 /* Free previous allocation if any */
1180 if (eth_dev->data->rx_queues[q_no] != NULL) {
1181 lio_dev_rx_queue_release(eth_dev, q_no);
1182 eth_dev->data->rx_queues[q_no] = NULL;
1185 mbp_priv = rte_mempool_get_priv(mp);
1186 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1188 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1190 lio_dev_err(lio_dev, "droq allocation failed\n");
1194 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1200 * Release the receive queue/ringbuffer. Called by
1204 * Pointer to Ethernet device structure.
1206 * Receive queue index.
1212 lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
1214 struct lio_droq *droq = dev->data->rx_queues[q_no];
1219 lio_delete_droq_queue(droq->lio_dev, oq_no);
1224 * Allocate and initialize SW ring. Initialize associated HW registers.
1227 * Pointer to structure rte_eth_dev
1232 * @param num_tx_descs
1233 * Number of ringbuffer descriptors
1236 * NUMA socket id, used for memory allocations
1239 * Pointer to the structure rte_eth_txconf
1242 * - On success, return 0
1243 * - On failure, return -errno value
1246 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1247 uint16_t num_tx_descs, unsigned int socket_id,
1248 const struct rte_eth_txconf *tx_conf __rte_unused)
1250 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1251 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1254 if (q_no >= lio_dev->nb_tx_queues) {
1255 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1259 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1261 /* Free previous allocation if any */
1262 if (eth_dev->data->tx_queues[q_no] != NULL) {
1263 lio_dev_tx_queue_release(eth_dev, q_no);
1264 eth_dev->data->tx_queues[q_no] = NULL;
1267 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1268 num_tx_descs, lio_dev, socket_id);
1271 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1275 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1276 lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
1280 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1284 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1290 * Release the transmit queue/ringbuffer. Called by
1294 * Pointer to Ethernet device structure.
1296 * Transmit queue index.
1302 lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
1304 struct lio_instr_queue *tq = dev->data->tx_queues[q_no];
1305 uint32_t fw_mapped_iq_no;
1310 lio_delete_sglist(tq);
1312 fw_mapped_iq_no = tq->txpciq.s.q_no;
1313 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1318 * Api to check link state.
1321 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1323 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1324 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1325 struct lio_link_status_resp *resp;
1326 union octeon_link_status *ls;
1327 struct lio_soft_command *sc;
1330 if (!lio_dev->intf_open)
1333 resp_size = sizeof(struct lio_link_status_resp);
1334 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1338 resp = (struct lio_link_status_resp *)sc->virtrptr;
1339 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1340 LIO_OPCODE_INFO, 0, 0, 0);
1342 /* Setting wait time in seconds */
1343 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1345 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1346 goto get_status_fail;
1348 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1349 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1354 goto get_status_fail;
1356 ls = &resp->link_info.link;
1358 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1360 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1361 if (ls->s.mtu < eth_dev->data->mtu) {
1362 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1364 eth_dev->data->mtu = ls->s.mtu;
1366 lio_dev->linfo.link.link_status64 = ls->link_status64;
1367 lio_dev_link_update(eth_dev, 0);
1370 lio_free_soft_command(sc);
1375 lio_free_soft_command(sc);
1378 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1379 * and will update link state if it changes.
1382 lio_sync_link_state_check(void *eth_dev)
1384 struct lio_device *lio_dev =
1385 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
1387 if (lio_dev->port_configured)
1388 lio_dev_get_link_status(eth_dev);
1390 /* Schedule periodic link status check.
1391 * Stop check if interface is close and start again while opening.
1393 if (lio_dev->intf_open)
1394 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1399 lio_dev_start(struct rte_eth_dev *eth_dev)
1401 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1402 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1405 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1407 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1410 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1413 /* Ready for link status updates */
1414 lio_dev->intf_open = 1;
1417 /* Configure RSS if device configured with multiple RX queues. */
1418 lio_dev_mq_rx_configure(eth_dev);
1420 /* Before update the link info,
1421 * must set linfo.link.link_status64 to 0.
1423 lio_dev->linfo.link.link_status64 = 0;
1425 /* start polling for lsc */
1426 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1427 lio_sync_link_state_check,
1430 lio_dev_err(lio_dev,
1431 "link state check handler creation failed\n");
1432 goto dev_lsc_handle_error;
1435 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1438 if (lio_dev->linfo.link.link_status64 == 0) {
1440 goto dev_mtu_set_error;
1443 ret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu);
1445 goto dev_mtu_set_error;
1450 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1452 dev_lsc_handle_error:
1453 lio_dev->intf_open = 0;
1454 lio_send_rx_ctrl_cmd(eth_dev, 0);
1459 /* Stop device and disable input/output functions */
1461 lio_dev_stop(struct rte_eth_dev *eth_dev)
1463 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1465 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1466 eth_dev->data->dev_started = 0;
1467 lio_dev->intf_open = 0;
1470 /* Cancel callback if still running. */
1471 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1473 lio_send_rx_ctrl_cmd(eth_dev, 0);
1475 lio_wait_for_instr_fetch(lio_dev);
1477 /* Clear recorded link status */
1478 lio_dev->linfo.link.link_status64 = 0;
1484 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1486 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1488 if (!lio_dev->intf_open) {
1489 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1493 if (lio_dev->linfo.link.s.link_up) {
1494 lio_dev_info(lio_dev, "Link is already UP\n");
1498 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1499 lio_dev_err(lio_dev, "Unable to set Link UP\n");
1503 lio_dev->linfo.link.s.link_up = 1;
1504 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1510 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1512 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1514 if (!lio_dev->intf_open) {
1515 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1519 if (!lio_dev->linfo.link.s.link_up) {
1520 lio_dev_info(lio_dev, "Link is already DOWN\n");
1524 lio_dev->linfo.link.s.link_up = 0;
1525 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1527 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1528 lio_dev->linfo.link.s.link_up = 1;
1529 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1530 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1538 * Reset and stop the device. This occurs on the first
1539 * call to this routine. Subsequent calls will simply
1540 * return. NB: This will require the NIC to be rebooted.
1543 * Pointer to the structure rte_eth_dev
1549 lio_dev_close(struct rte_eth_dev *eth_dev)
1551 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1554 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1557 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1559 if (lio_dev->intf_open)
1560 ret = lio_dev_stop(eth_dev);
1562 /* Reset ioq regs */
1563 lio_dev->fn_list.setup_device_regs(lio_dev);
1565 if (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) {
1566 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1567 rte_delay_ms(LIO_PCI_FLR_WAIT);
1571 lio_dev->fn_list.free_mbox(lio_dev);
1573 /* Free glist resources */
1574 rte_free(lio_dev->glist_head);
1575 rte_free(lio_dev->glist_lock);
1576 lio_dev->glist_head = NULL;
1577 lio_dev->glist_lock = NULL;
1579 lio_dev->port_configured = 0;
1581 /* Delete all queues */
1582 lio_dev_clear_queues(eth_dev);
1588 * Enable tunnel rx checksum verification from firmware.
1591 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1593 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1594 struct lio_dev_ctrl_cmd ctrl_cmd;
1595 struct lio_ctrl_pkt ctrl_pkt;
1597 /* flush added to prevent cmd failure
1598 * incase the queue is full
1600 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1602 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1603 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1605 ctrl_cmd.eth_dev = eth_dev;
1608 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1609 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1610 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1612 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1613 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1617 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1618 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1622 * Enable checksum calculation for inner packet in a tunnel.
1625 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1627 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1628 struct lio_dev_ctrl_cmd ctrl_cmd;
1629 struct lio_ctrl_pkt ctrl_pkt;
1631 /* flush added to prevent cmd failure
1632 * incase the queue is full
1634 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1636 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1637 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1639 ctrl_cmd.eth_dev = eth_dev;
1642 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1643 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1644 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1646 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1647 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1651 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1652 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1656 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1659 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1660 struct lio_dev_ctrl_cmd ctrl_cmd;
1661 struct lio_ctrl_pkt ctrl_pkt;
1663 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1664 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1665 LIO_Q_RECONF_MIN_VERSION);
1669 /* flush added to prevent cmd failure
1670 * incase the queue is full
1672 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1674 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1675 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1677 ctrl_cmd.eth_dev = eth_dev;
1680 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1681 ctrl_pkt.ncmd.s.param1 = num_txq;
1682 ctrl_pkt.ncmd.s.param2 = num_rxq;
1683 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1685 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1686 lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1690 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1691 lio_dev_err(lio_dev, "Queue count control command timed out\n");
1699 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1701 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1704 if (lio_dev->nb_rx_queues != num_rxq ||
1705 lio_dev->nb_tx_queues != num_txq) {
1706 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1708 lio_dev->nb_rx_queues = num_rxq;
1709 lio_dev->nb_tx_queues = num_txq;
1712 if (lio_dev->intf_open) {
1713 ret = lio_dev_stop(eth_dev);
1718 /* Reset ioq registers */
1719 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1720 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1728 lio_dev_configure(struct rte_eth_dev *eth_dev)
1730 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1731 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1732 int retval, num_iqueues, num_oqueues;
1733 uint8_t mac[RTE_ETHER_ADDR_LEN], i;
1734 struct lio_if_cfg_resp *resp;
1735 struct lio_soft_command *sc;
1736 union lio_if_cfg if_cfg;
1739 PMD_INIT_FUNC_TRACE();
1741 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1742 eth_dev->data->dev_conf.rxmode.offloads |=
1743 DEV_RX_OFFLOAD_RSS_HASH;
1745 /* Inform firmware about change in number of queues to use.
1746 * Disable IO queues and reset registers for re-configuration.
1748 if (lio_dev->port_configured)
1749 return lio_reconf_queues(eth_dev,
1750 eth_dev->data->nb_tx_queues,
1751 eth_dev->data->nb_rx_queues);
1753 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1754 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1756 /* Set max number of queues which can be re-configured. */
1757 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1758 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1760 resp_size = sizeof(struct lio_if_cfg_resp);
1761 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1765 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1767 /* Firmware doesn't have capability to reconfigure the queues,
1768 * Claim all queues, and use as many required
1770 if_cfg.if_cfg64 = 0;
1771 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1772 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1773 if_cfg.s.base_queue = 0;
1775 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1777 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1778 LIO_OPCODE_IF_CFG, 0,
1779 if_cfg.if_cfg64, 0);
1781 /* Setting wait time in seconds */
1782 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1784 retval = lio_send_soft_command(lio_dev, sc);
1785 if (retval == LIO_IQ_SEND_FAILED) {
1786 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1788 /* Soft instr is freed by driver in case of failure. */
1789 goto nic_config_fail;
1792 /* Sleep on a wait queue till the cond flag indicates that the
1793 * response arrived or timed-out.
1795 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1796 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1797 lio_process_ordered_list(lio_dev);
1801 retval = resp->status;
1803 lio_dev_err(lio_dev, "iq/oq config failed\n");
1804 goto nic_config_fail;
1807 strlcpy(lio_dev->firmware_version,
1808 resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);
1810 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1811 sizeof(struct octeon_if_cfg_info) >> 3);
1813 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1814 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1816 if (!(num_iqueues) || !(num_oqueues)) {
1817 lio_dev_err(lio_dev,
1818 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1819 (unsigned long)resp->cfg_info.iqmask,
1820 (unsigned long)resp->cfg_info.oqmask);
1821 goto nic_config_fail;
1824 lio_dev_dbg(lio_dev,
1825 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1826 eth_dev->data->port_id,
1827 (unsigned long)resp->cfg_info.iqmask,
1828 (unsigned long)resp->cfg_info.oqmask,
1829 num_iqueues, num_oqueues);
1831 lio_dev->linfo.num_rxpciq = num_oqueues;
1832 lio_dev->linfo.num_txpciq = num_iqueues;
1834 for (i = 0; i < num_oqueues; i++) {
1835 lio_dev->linfo.rxpciq[i].rxpciq64 =
1836 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1837 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1838 i, lio_dev->linfo.rxpciq[i].s.q_no);
1841 for (i = 0; i < num_iqueues; i++) {
1842 lio_dev->linfo.txpciq[i].txpciq64 =
1843 resp->cfg_info.linfo.txpciq[i].txpciq64;
1844 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1845 i, lio_dev->linfo.txpciq[i].s.q_no);
1848 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1849 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1850 lio_dev->linfo.link.link_status64 =
1851 resp->cfg_info.linfo.link.link_status64;
1853 /* 64-bit swap required on LE machines */
1854 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1855 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
1856 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1859 /* Copy the permanent MAC address */
1860 rte_ether_addr_copy((struct rte_ether_addr *)mac,
1861 ð_dev->data->mac_addrs[0]);
1863 /* enable firmware checksum support for tunnel packets */
1864 lio_enable_hw_tunnel_rx_checksum(eth_dev);
1865 lio_enable_hw_tunnel_tx_checksum(eth_dev);
1867 lio_dev->glist_lock =
1868 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1869 if (lio_dev->glist_lock == NULL)
1872 lio_dev->glist_head =
1873 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1875 if (lio_dev->glist_head == NULL) {
1876 rte_free(lio_dev->glist_lock);
1877 lio_dev->glist_lock = NULL;
1881 lio_dev_link_update(eth_dev, 0);
1883 lio_dev->port_configured = 1;
1885 lio_free_soft_command(sc);
1887 /* Reset ioq regs */
1888 lio_dev->fn_list.setup_device_regs(lio_dev);
1890 /* Free iq_0 used during init */
1891 lio_free_instr_queue0(lio_dev);
1896 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1897 lio_free_soft_command(sc);
1898 lio_free_instr_queue0(lio_dev);
1903 /* Define our ethernet definitions */
1904 static const struct eth_dev_ops liovf_eth_dev_ops = {
1905 .dev_configure = lio_dev_configure,
1906 .dev_start = lio_dev_start,
1907 .dev_stop = lio_dev_stop,
1908 .dev_set_link_up = lio_dev_set_link_up,
1909 .dev_set_link_down = lio_dev_set_link_down,
1910 .dev_close = lio_dev_close,
1911 .promiscuous_enable = lio_dev_promiscuous_enable,
1912 .promiscuous_disable = lio_dev_promiscuous_disable,
1913 .allmulticast_enable = lio_dev_allmulticast_enable,
1914 .allmulticast_disable = lio_dev_allmulticast_disable,
1915 .link_update = lio_dev_link_update,
1916 .stats_get = lio_dev_stats_get,
1917 .xstats_get = lio_dev_xstats_get,
1918 .xstats_get_names = lio_dev_xstats_get_names,
1919 .stats_reset = lio_dev_stats_reset,
1920 .xstats_reset = lio_dev_xstats_reset,
1921 .dev_infos_get = lio_dev_info_get,
1922 .vlan_filter_set = lio_dev_vlan_filter_set,
1923 .rx_queue_setup = lio_dev_rx_queue_setup,
1924 .rx_queue_release = lio_dev_rx_queue_release,
1925 .tx_queue_setup = lio_dev_tx_queue_setup,
1926 .tx_queue_release = lio_dev_tx_queue_release,
1927 .reta_update = lio_dev_rss_reta_update,
1928 .reta_query = lio_dev_rss_reta_query,
1929 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1930 .rss_hash_update = lio_dev_rss_hash_update,
1931 .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
1932 .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
1933 .mtu_set = lio_dev_mtu_set,
1937 lio_check_pf_hs_response(void *lio_dev)
1939 struct lio_device *dev = lio_dev;
1941 /* check till response arrives */
1942 if (dev->pfvf_hsword.coproc_tics_per_us)
1945 cn23xx_vf_handle_mbox(dev);
1947 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1951 * \brief Identify the LIO device and to map the BAR address space
1952 * @param lio_dev lio device
1955 lio_chip_specific_setup(struct lio_device *lio_dev)
1957 struct rte_pci_device *pdev = lio_dev->pci_dev;
1958 uint32_t dev_id = pdev->id.device_id;
1963 case LIO_CN23XX_VF_VID:
1964 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1965 ret = cn23xx_vf_setup_device(lio_dev);
1970 lio_dev_err(lio_dev, "Unsupported Chip\n");
1974 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1980 lio_first_time_init(struct lio_device *lio_dev,
1981 struct rte_pci_device *pdev)
1985 PMD_INIT_FUNC_TRACE();
1987 /* set dpdk specific pci device pointer */
1988 lio_dev->pci_dev = pdev;
1990 /* Identify the LIO type and set device ops */
1991 if (lio_chip_specific_setup(lio_dev)) {
1992 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1996 /* Initialize soft command buffer pool */
1997 if (lio_setup_sc_buffer_pool(lio_dev)) {
1998 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
2002 /* Initialize lists to manage the requests of different types that
2003 * arrive from applications for this lio device.
2005 lio_setup_response_list(lio_dev);
2007 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
2008 lio_dev_err(lio_dev, "Mailbox setup failed\n");
2012 /* Check PF response */
2013 lio_check_pf_hs_response((void *)lio_dev);
2015 /* Do handshake and exit if incompatible PF driver */
2016 if (cn23xx_pfvf_handshake(lio_dev))
2019 /* Request and wait for device reset. */
2020 if (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) {
2021 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
2022 /* FLR wait time doubled as a precaution. */
2023 rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
2026 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
2027 lio_dev_err(lio_dev, "Failed to configure device registers\n");
2031 if (lio_setup_instr_queue0(lio_dev)) {
2032 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
2036 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
2038 lio_dev->max_tx_queues = dpdk_queues;
2039 lio_dev->max_rx_queues = dpdk_queues;
2041 /* Enable input and output queues for this device */
2042 if (lio_dev->fn_list.enable_io_queues(lio_dev))
2048 lio_free_sc_buffer_pool(lio_dev);
2049 if (lio_dev->mbox[0])
2050 lio_dev->fn_list.free_mbox(lio_dev);
2051 if (lio_dev->instr_queue[0])
2052 lio_free_instr_queue0(lio_dev);
2058 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2060 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2062 PMD_INIT_FUNC_TRACE();
2064 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2067 /* lio_free_sc_buffer_pool */
2068 lio_free_sc_buffer_pool(lio_dev);
2074 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2076 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2077 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2079 PMD_INIT_FUNC_TRACE();
2081 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2082 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2084 /* Primary does the initialization. */
2085 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2088 rte_eth_copy_pci_info(eth_dev, pdev);
2090 if (pdev->mem_resource[0].addr) {
2091 lio_dev->hw_addr = pdev->mem_resource[0].addr;
2093 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2097 lio_dev->eth_dev = eth_dev;
2098 /* set lio device print string */
2099 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2100 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2101 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2103 lio_dev->port_id = eth_dev->data->port_id;
2105 if (lio_first_time_init(lio_dev, pdev)) {
2106 lio_dev_err(lio_dev, "Device init failed\n");
2110 eth_dev->dev_ops = &liovf_eth_dev_ops;
2111 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
2112 if (eth_dev->data->mac_addrs == NULL) {
2113 lio_dev_err(lio_dev,
2114 "MAC addresses memory allocation failed\n");
2115 eth_dev->dev_ops = NULL;
2116 eth_dev->rx_pkt_burst = NULL;
2117 eth_dev->tx_pkt_burst = NULL;
2121 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2124 lio_dev->port_configured = 0;
2125 /* Always allow unicast packets */
2126 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2132 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2133 struct rte_pci_device *pci_dev)
2135 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
2140 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2142 return rte_eth_dev_pci_generic_remove(pci_dev,
2143 lio_eth_dev_uninit);
2146 /* Set of PCI devices this driver supports */
2147 static const struct rte_pci_id pci_id_liovf_map[] = {
2148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2149 { .vendor_id = 0, /* sentinel */ }
2152 static struct rte_pci_driver rte_liovf_pmd = {
2153 .id_table = pci_id_liovf_map,
2154 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2155 .probe = lio_eth_dev_pci_probe,
2156 .remove = lio_eth_dev_pci_remove,
2159 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2160 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2161 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2162 RTE_LOG_REGISTER_SUFFIX(lio_logtype_init, init, NOTICE);
2163 RTE_LOG_REGISTER_SUFFIX(lio_logtype_driver, driver, NOTICE);